diff --git a/.gitattributes b/.gitattributes
index 53af4436490e073b4716c8e1768c112823089ced..bf2ef17384ecb28c62754329dab12e9d2811453b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -36,3 +36,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
tokenizer.json filter=lfs diff=lfs merge=lfs -text
mixchain_z_gsm8k.long2short.cot_valve.jsonl filter=lfs diff=lfs merge=lfs -text
mixchain_z_prm12k.long2short.cot_valve.jsonl filter=lfs diff=lfs merge=lfs -text
+tldr-14b-step-832/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+tldr-7b-checkpoint-256/tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/tldr-14b-step-832/config.json b/tldr-14b-step-832/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3bd0cad19fcf826b4934931752f9e864411713a5
--- /dev/null
+++ b/tldr-14b-step-832/config.json
@@ -0,0 +1,30 @@
+{
+ "_name_or_path": "/cpfs/user/lizhongzhi/huggingface_model/huggingface_model/DeepSeek-R1-Distill-Qwen-14B/",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151643,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 5120,
+ "initializer_range": 0.02,
+ "intermediate_size": 13824,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 48,
+ "model_type": "qwen2",
+ "num_attention_heads": 40,
+ "num_hidden_layers": 48,
+ "num_key_value_heads": 8,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-05,
+ "rope_scaling": null,
+ "rope_theta": 1000000.0,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.46.3",
+ "use_cache": true,
+ "use_sliding_window": false,
+ "vocab_size": 152064
+}
diff --git a/tldr-14b-step-832/eval_dev_token_acc.json b/tldr-14b-step-832/eval_dev_token_acc.json
new file mode 100644
index 0000000000000000000000000000000000000000..92a4813f89f0f998ecc4f9f1f3538838ac8620a2
--- /dev/null
+++ b/tldr-14b-step-832/eval_dev_token_acc.json
@@ -0,0 +1,8 @@
+{
+ "eval_dev_token": 4446.53125,
+ "eval_dev_acc": 0.513671875,
+ "eval_runtime": 329.208,
+ "eval_samples_per_second": 0.194,
+ "eval_steps_per_second": 0.003,
+ "epoch": 0.416
+}
\ No newline at end of file
diff --git a/tldr-14b-step-832/generation_config.json b/tldr-14b-step-832/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..59e60f99f4acabf5f765a866cb6d7060779fdcdf
--- /dev/null
+++ b/tldr-14b-step-832/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.46.3"
+}
diff --git a/tldr-14b-step-832/latest b/tldr-14b-step-832/latest
new file mode 100644
index 0000000000000000000000000000000000000000..c78b17a422daa3a8d134daf99306985060acd85d
--- /dev/null
+++ b/tldr-14b-step-832/latest
@@ -0,0 +1 @@
+global_step832
\ No newline at end of file
diff --git a/tldr-14b-step-832/long2short_proportions.json b/tldr-14b-step-832/long2short_proportions.json
new file mode 100644
index 0000000000000000000000000000000000000000..10d4bdb2b78f43a6afbd46aeeea1ca0c176bd3f8
--- /dev/null
+++ b/tldr-14b-step-832/long2short_proportions.json
@@ -0,0 +1,299 @@
+[
+ {
+ "global_step": 0,
+ "cot_domain_weight": [
+ 0.8,
+ 0.2
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 32,
+ "cot_domain_weight": [
+ 0.8388444060259401,
+ 0.1611555939740599
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 64,
+ "cot_domain_weight": [
+ 0.8505354307831271,
+ 0.14946456921687287
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 96,
+ "cot_domain_weight": [
+ 0.924430987039901,
+ 0.07556901296009898
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 128,
+ "cot_domain_weight": [
+ 0.9291076840859197,
+ 0.07089231591408032
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 160,
+ "cot_domain_weight": [
+ 0.9551703627379949,
+ 0.044829637262005104
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 192,
+ "cot_domain_weight": [
+ 0.9511838867907558,
+ 0.048816113209244213
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 224,
+ "cot_domain_weight": [
+ 0.959743360270474,
+ 0.040256639729526034
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 256,
+ "cot_domain_weight": [
+ 0.9756809723788156,
+ 0.02431902762118443
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 288,
+ "cot_domain_weight": [
+ 0.9798199552666768,
+ 0.02018004473332324
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 320,
+ "cot_domain_weight": [
+ 0.9872426298703754,
+ 0.01275737012962466
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 352,
+ "cot_domain_weight": [
+ 0.9888833217843287,
+ 0.011116678215671317
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 384,
+ "cot_domain_weight": [
+ 0.9933160784608732,
+ 0.006683921539126801
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 416,
+ "cot_domain_weight": [
+ 0.9891085129978076,
+ 0.010891487002192397
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 448,
+ "cot_domain_weight": [
+ 0.9930044565206148,
+ 0.00699554347938529
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 480,
+ "cot_domain_weight": [
+ 0.9946519011372694,
+ 0.0053480988627305865
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 512,
+ "cot_domain_weight": [
+ 0.9958559065994267,
+ 0.0041440934005732635
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 544,
+ "cot_domain_weight": [
+ 0.9975449034946824,
+ 0.0024550965053175616
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 576,
+ "cot_domain_weight": [
+ 0.9979881855698594,
+ 0.0020118144301406837
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 608,
+ "cot_domain_weight": [
+ 0.9987150533759055,
+ 0.001284946624094461
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 640,
+ "cot_domain_weight": [
+ 0.9985050461672554,
+ 0.001494953832744527
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 672,
+ "cot_domain_weight": [
+ 0.9985948003041136,
+ 0.0014051996958864268
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 704,
+ "cot_domain_weight": [
+ 0.9986131625707226,
+ 0.0013868374292773191
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 736,
+ "cot_domain_weight": [
+ 0.9988695513983467,
+ 0.0011304486016533988
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 768,
+ "cot_domain_weight": [
+ 0.9985235512457586,
+ 0.0014764487542413534
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 800,
+ "cot_domain_weight": [
+ 0.9983893508322773,
+ 0.0016106491677227333
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ },
+ {
+ "global_step": 832,
+ "cot_domain_weight": [
+ 0.9938949698284351,
+ 0.006105030171564917
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/gsm8k_shortcot_outputs_ds_medium_length.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/14b_data/s1_14b_longcot_outputs_ds.jsonl"
+ ]
+ }
+]
\ No newline at end of file
diff --git a/tldr-14b-step-832/model-00001-of-00006.safetensors b/tldr-14b-step-832/model-00001-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b4fb5c661a7eaa0595c1c26ae772bd1104e5e563
--- /dev/null
+++ b/tldr-14b-step-832/model-00001-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e28af04b1a542da0380afa4e4464d9205f9ac2e7a19d0b03eb45ae5bcb00a659
+size 4986211280
diff --git a/tldr-14b-step-832/model-00002-of-00006.safetensors b/tldr-14b-step-832/model-00002-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0b5c97bf04b5c216c03bebcf464be62a6cc08227
--- /dev/null
+++ b/tldr-14b-step-832/model-00002-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d6702f09d6287482ba236921497c67f690f04aab565c45b761c8341d87b7400
+size 4954847344
diff --git a/tldr-14b-step-832/model-00003-of-00006.safetensors b/tldr-14b-step-832/model-00003-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f9788164b4f2e307e72e631a38fecbcd4ec58cb4
--- /dev/null
+++ b/tldr-14b-step-832/model-00003-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab8b5cd3fee9a3ac4db4e91097f51b9963f2644c6dd9004fe51bb6b7480654b7
+size 4954847392
diff --git a/tldr-14b-step-832/model-00004-of-00006.safetensors b/tldr-14b-step-832/model-00004-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a13601263ad2a38b97b21e9dd74dd777d40fd00
--- /dev/null
+++ b/tldr-14b-step-832/model-00004-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c5dce47229f535aaad68658c1fb1cbdc1de89c4027ddad03c8f744eb235a7b7
+size 4954847392
diff --git a/tldr-14b-step-832/model-00005-of-00006.safetensors b/tldr-14b-step-832/model-00005-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f8d3900fc310868305ad3bca3e82145faeaef1fc
--- /dev/null
+++ b/tldr-14b-step-832/model-00005-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:68355f8ad433ecdcbd255a71e698c0b1d6f7518a3290fdaf050d7ccd6bcf1711
+size 4954847392
diff --git a/tldr-14b-step-832/model-00006-of-00006.safetensors b/tldr-14b-step-832/model-00006-of-00006.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..79eceb3da3c81920a3558aa782c4938f6a453734
--- /dev/null
+++ b/tldr-14b-step-832/model-00006-of-00006.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8552205d575c3ff6ee6b7ff90f99f5390a65e4cde121a6362c48fde04b17038e
+size 4734533160
diff --git a/tldr-14b-step-832/model.safetensors.index.json b/tldr-14b-step-832/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e9b53532550dfddbf42730aeeb499a3bb7a707f
--- /dev/null
+++ b/tldr-14b-step-832/model.safetensors.index.json
@@ -0,0 +1,586 @@
+{
+ "metadata": {
+ "total_size": 29540067328
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00006-of-00006.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00006.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.28.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.29.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.30.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.31.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.input_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.32.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.33.self_attn.k_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.q_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.v_proj.bias": "model-00004-of-00006.safetensors",
+ "model.layers.33.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
+ "model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.34.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.35.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.36.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.37.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.38.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.39.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.40.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.40.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.input_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.41.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.42.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.42.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.42.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.42.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.42.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.42.self_attn.k_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.q_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.v_proj.bias": "model-00005-of-00006.safetensors",
+ "model.layers.42.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
+ "model.layers.43.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.43.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.44.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.45.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.46.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.input_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.k_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.q_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.v_proj.bias": "model-00006-of-00006.safetensors",
+ "model.layers.47.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00006.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00006.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
+ "model.norm.weight": "model-00006-of-00006.safetensors"
+ }
+}
diff --git a/tldr-14b-step-832/rng_state_0.pth b/tldr-14b-step-832/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..32f3ae6ea318e47276edcfd028d3478172dd10bf
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f068e2025a4f00989e9c03d01c1281f7d1906969b0fbe7319192d985b5de4d49
+size 15984
diff --git a/tldr-14b-step-832/rng_state_1.pth b/tldr-14b-step-832/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f87392cf4192ff7b3b5e66d95f0e33718cd80f52
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2120f02ce7a002ef9e970ec506c42eb25f7d875f674566d6ad7c342a0a258acd
+size 15984
diff --git a/tldr-14b-step-832/rng_state_2.pth b/tldr-14b-step-832/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ebf595b2c68ebcdbda3e0ec4ed2acb79adb60d5d
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c78254b86c335260938fb97d27b32bab843db1941a13389d01fefd73eef0201b
+size 15984
diff --git a/tldr-14b-step-832/rng_state_3.pth b/tldr-14b-step-832/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..da10e558e5ebb8173b8d03601c2ce6920e6ccdb3
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:610071c273687a048043135ba6e816c86cd8cbc842496954bb1b35ca01c4c382
+size 15984
diff --git a/tldr-14b-step-832/rng_state_4.pth b/tldr-14b-step-832/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8e26a45d0ef6fc1b7138021e57c7b30b9e0bf6c1
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8edad2884c4e758562a95fef0fcf48f82a9ce39dbace5d1e32bcbfb847c72140
+size 15984
diff --git a/tldr-14b-step-832/rng_state_5.pth b/tldr-14b-step-832/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..de8fda1aa4a62fe184e0720a579156c51adec166
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1a41860bf7ca64b413a603bb6d5f036cb9d3c9a7f940c56733ca7e6bfab8afe
+size 15984
diff --git a/tldr-14b-step-832/rng_state_6.pth b/tldr-14b-step-832/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..cadeb2bf9407db3bd357428f07d7ca520aabaabe
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:258cea0926b92515dd8e0b49a91e9c7c6562597efa3422fce84c690cdf7c126c
+size 15984
diff --git a/tldr-14b-step-832/rng_state_7.pth b/tldr-14b-step-832/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e8e75fe55cffce8c3ace06a83e87ce45f07bcbc5
--- /dev/null
+++ b/tldr-14b-step-832/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca741f170ce54d6b6c5971457ec6506474163b21edaa3fbaa4e5285d89d229b1
+size 15984
diff --git a/tldr-14b-step-832/scheduler.pt b/tldr-14b-step-832/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a87fde8fa287ce640046980702cc5efb1bc5c6d4
--- /dev/null
+++ b/tldr-14b-step-832/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed96bbe4d84037e4c18f95733da1d2d5bb47c4402c4d667156d5ed1ff760dfb0
+size 1064
diff --git a/tldr-14b-step-832/special_tokens_map.json b/tldr-14b-step-832/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/tldr-14b-step-832/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tldr-14b-step-832/tokenizer.json b/tldr-14b-step-832/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/tldr-14b-step-832/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/tldr-14b-step-832/tokenizer_config.json b/tldr-14b-step-832/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..cb7864ff3d42916223f0a1f0dc6ccae482ff498f
--- /dev/null
+++ b/tldr-14b-step-832/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "legacy": true,
+ "model_max_length": 16000,
+ "pad_token": "<|end▁of▁sentence|>",
+ "padding_side": "left",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/tldr-14b-step-832/trainer_state.json b/tldr-14b-step-832/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..568a693589c2da9c746fc9ca1b07a49c81d5a0b1
--- /dev/null
+++ b/tldr-14b-step-832/trainer_state.json
@@ -0,0 +1,6091 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.416,
+ "eval_steps": 32,
+ "global_step": 832,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.0005,
+ "grad_norm": 1.1535558177055725,
+ "learning_rate": 1e-05,
+ "loss": 0.4543,
+ "step": 1
+ },
+ {
+ "epoch": 0.001,
+ "grad_norm": 0.5039215671909039,
+ "learning_rate": 1e-05,
+ "loss": 0.3002,
+ "step": 2
+ },
+ {
+ "epoch": 0.0015,
+ "grad_norm": 0.4111085873239227,
+ "learning_rate": 1e-05,
+ "loss": 0.2138,
+ "step": 3
+ },
+ {
+ "epoch": 0.002,
+ "grad_norm": 0.3258240952011462,
+ "learning_rate": 1e-05,
+ "loss": 0.1761,
+ "step": 4
+ },
+ {
+ "epoch": 0.0025,
+ "grad_norm": 0.25417983371771585,
+ "learning_rate": 1e-05,
+ "loss": 0.1923,
+ "step": 5
+ },
+ {
+ "epoch": 0.003,
+ "grad_norm": 0.2023054568437071,
+ "learning_rate": 1e-05,
+ "loss": 0.2035,
+ "step": 6
+ },
+ {
+ "epoch": 0.0035,
+ "grad_norm": 0.29479546251817934,
+ "learning_rate": 1e-05,
+ "loss": 0.166,
+ "step": 7
+ },
+ {
+ "epoch": 0.004,
+ "grad_norm": 0.2603707531918473,
+ "learning_rate": 1e-05,
+ "loss": 0.1563,
+ "step": 8
+ },
+ {
+ "epoch": 0.0045,
+ "grad_norm": 0.27772543950112105,
+ "learning_rate": 1e-05,
+ "loss": 0.1742,
+ "step": 9
+ },
+ {
+ "epoch": 0.005,
+ "grad_norm": 0.33899668368493546,
+ "learning_rate": 1e-05,
+ "loss": 0.1623,
+ "step": 10
+ },
+ {
+ "epoch": 0.0055,
+ "grad_norm": 0.17293404759978015,
+ "learning_rate": 1e-05,
+ "loss": 0.1902,
+ "step": 11
+ },
+ {
+ "epoch": 0.006,
+ "grad_norm": 0.16812733049304054,
+ "learning_rate": 1e-05,
+ "loss": 0.1505,
+ "step": 12
+ },
+ {
+ "epoch": 0.0065,
+ "grad_norm": 0.24635507048250077,
+ "learning_rate": 1e-05,
+ "loss": 0.1496,
+ "step": 13
+ },
+ {
+ "epoch": 0.007,
+ "grad_norm": 0.23026986852648482,
+ "learning_rate": 1e-05,
+ "loss": 0.1428,
+ "step": 14
+ },
+ {
+ "epoch": 0.0075,
+ "grad_norm": 0.20312493098149043,
+ "learning_rate": 1e-05,
+ "loss": 0.1411,
+ "step": 15
+ },
+ {
+ "epoch": 0.008,
+ "grad_norm": 0.19056569111470645,
+ "learning_rate": 1e-05,
+ "loss": 0.1556,
+ "step": 16
+ },
+ {
+ "epoch": 0.0085,
+ "grad_norm": 0.19180586046592904,
+ "learning_rate": 1e-05,
+ "loss": 0.1448,
+ "step": 17
+ },
+ {
+ "epoch": 0.009,
+ "grad_norm": 0.23670452313674922,
+ "learning_rate": 1e-05,
+ "loss": 0.1272,
+ "step": 18
+ },
+ {
+ "epoch": 0.0095,
+ "grad_norm": 0.2241037709056984,
+ "learning_rate": 1e-05,
+ "loss": 0.1584,
+ "step": 19
+ },
+ {
+ "epoch": 0.01,
+ "grad_norm": 0.1828368631692288,
+ "learning_rate": 1e-05,
+ "loss": 0.1347,
+ "step": 20
+ },
+ {
+ "epoch": 0.0105,
+ "grad_norm": 0.23115912688140353,
+ "learning_rate": 1e-05,
+ "loss": 0.1293,
+ "step": 21
+ },
+ {
+ "epoch": 0.011,
+ "grad_norm": 0.18886903067681482,
+ "learning_rate": 1e-05,
+ "loss": 0.1175,
+ "step": 22
+ },
+ {
+ "epoch": 0.0115,
+ "grad_norm": 0.19723236256202736,
+ "learning_rate": 1e-05,
+ "loss": 0.1665,
+ "step": 23
+ },
+ {
+ "epoch": 0.012,
+ "grad_norm": 0.2141017706399641,
+ "learning_rate": 1e-05,
+ "loss": 0.1491,
+ "step": 24
+ },
+ {
+ "epoch": 0.0125,
+ "grad_norm": 0.19090893978368054,
+ "learning_rate": 1e-05,
+ "loss": 0.1113,
+ "step": 25
+ },
+ {
+ "epoch": 0.013,
+ "grad_norm": 0.2460352826446744,
+ "learning_rate": 1e-05,
+ "loss": 0.1142,
+ "step": 26
+ },
+ {
+ "epoch": 0.0135,
+ "grad_norm": 0.21847319513439203,
+ "learning_rate": 1e-05,
+ "loss": 0.1273,
+ "step": 27
+ },
+ {
+ "epoch": 0.014,
+ "grad_norm": 0.18979686333150375,
+ "learning_rate": 1e-05,
+ "loss": 0.1441,
+ "step": 28
+ },
+ {
+ "epoch": 0.0145,
+ "grad_norm": 0.18886921913659824,
+ "learning_rate": 1e-05,
+ "loss": 0.1481,
+ "step": 29
+ },
+ {
+ "epoch": 0.015,
+ "grad_norm": 0.2115532479997522,
+ "learning_rate": 1e-05,
+ "loss": 0.0792,
+ "step": 30
+ },
+ {
+ "epoch": 0.0155,
+ "grad_norm": 0.20266301051804922,
+ "learning_rate": 1e-05,
+ "loss": 0.1379,
+ "step": 31
+ },
+ {
+ "epoch": 0.016,
+ "grad_norm": 0.1865900767381873,
+ "learning_rate": 1e-05,
+ "loss": 0.1453,
+ "step": 32
+ },
+ {
+ "epoch": 0.016,
+ "eval_dev_acc": 0.61328125,
+ "eval_dev_token": 5204.017578125,
+ "eval_runtime": 351.6008,
+ "eval_samples_per_second": 0.182,
+ "eval_steps_per_second": 0.003,
+ "step": 32
+ },
+ {
+ "epoch": 0.0165,
+ "grad_norm": 0.23402804608211233,
+ "learning_rate": 1e-05,
+ "loss": 0.1461,
+ "step": 33
+ },
+ {
+ "epoch": 0.017,
+ "grad_norm": 0.2191224488373381,
+ "learning_rate": 1e-05,
+ "loss": 0.1076,
+ "step": 34
+ },
+ {
+ "epoch": 0.0175,
+ "grad_norm": 0.18221820677824999,
+ "learning_rate": 1e-05,
+ "loss": 0.138,
+ "step": 35
+ },
+ {
+ "epoch": 0.018,
+ "grad_norm": 0.20326615377722793,
+ "learning_rate": 1e-05,
+ "loss": 0.1021,
+ "step": 36
+ },
+ {
+ "epoch": 0.0185,
+ "grad_norm": 0.2449658821374275,
+ "learning_rate": 1e-05,
+ "loss": 0.0881,
+ "step": 37
+ },
+ {
+ "epoch": 0.019,
+ "grad_norm": 0.2068478649138205,
+ "learning_rate": 1e-05,
+ "loss": 0.1249,
+ "step": 38
+ },
+ {
+ "epoch": 0.0195,
+ "grad_norm": 0.22630770783782977,
+ "learning_rate": 1e-05,
+ "loss": 0.1259,
+ "step": 39
+ },
+ {
+ "epoch": 0.02,
+ "grad_norm": 0.21173641583720768,
+ "learning_rate": 1e-05,
+ "loss": 0.1331,
+ "step": 40
+ },
+ {
+ "epoch": 0.0205,
+ "grad_norm": 0.23616623193061376,
+ "learning_rate": 1e-05,
+ "loss": 0.1192,
+ "step": 41
+ },
+ {
+ "epoch": 0.021,
+ "grad_norm": 0.20789242969830385,
+ "learning_rate": 1e-05,
+ "loss": 0.159,
+ "step": 42
+ },
+ {
+ "epoch": 0.0215,
+ "grad_norm": 0.21662842275351119,
+ "learning_rate": 1e-05,
+ "loss": 0.1455,
+ "step": 43
+ },
+ {
+ "epoch": 0.022,
+ "grad_norm": 0.224914886884455,
+ "learning_rate": 1e-05,
+ "loss": 0.1565,
+ "step": 44
+ },
+ {
+ "epoch": 0.0225,
+ "grad_norm": 0.17642201019062015,
+ "learning_rate": 1e-05,
+ "loss": 0.1022,
+ "step": 45
+ },
+ {
+ "epoch": 0.023,
+ "grad_norm": 0.19476006095917964,
+ "learning_rate": 1e-05,
+ "loss": 0.1738,
+ "step": 46
+ },
+ {
+ "epoch": 0.0235,
+ "grad_norm": 0.2041987726527936,
+ "learning_rate": 1e-05,
+ "loss": 0.1545,
+ "step": 47
+ },
+ {
+ "epoch": 0.024,
+ "grad_norm": 0.15364946520211809,
+ "learning_rate": 1e-05,
+ "loss": 0.141,
+ "step": 48
+ },
+ {
+ "epoch": 0.0245,
+ "grad_norm": 0.1632596100583654,
+ "learning_rate": 1e-05,
+ "loss": 0.1493,
+ "step": 49
+ },
+ {
+ "epoch": 0.025,
+ "grad_norm": 0.23305215604624085,
+ "learning_rate": 1e-05,
+ "loss": 0.1164,
+ "step": 50
+ },
+ {
+ "epoch": 0.0255,
+ "grad_norm": 0.2697225282405861,
+ "learning_rate": 1e-05,
+ "loss": 0.0921,
+ "step": 51
+ },
+ {
+ "epoch": 0.026,
+ "grad_norm": 0.19242004892152365,
+ "learning_rate": 1e-05,
+ "loss": 0.1224,
+ "step": 52
+ },
+ {
+ "epoch": 0.0265,
+ "grad_norm": 0.21422189358330607,
+ "learning_rate": 1e-05,
+ "loss": 0.1084,
+ "step": 53
+ },
+ {
+ "epoch": 0.027,
+ "grad_norm": 0.22368520523252378,
+ "learning_rate": 1e-05,
+ "loss": 0.1018,
+ "step": 54
+ },
+ {
+ "epoch": 0.0275,
+ "grad_norm": 0.15315126193234804,
+ "learning_rate": 1e-05,
+ "loss": 0.117,
+ "step": 55
+ },
+ {
+ "epoch": 0.028,
+ "grad_norm": 0.2042745134173473,
+ "learning_rate": 1e-05,
+ "loss": 0.1355,
+ "step": 56
+ },
+ {
+ "epoch": 0.0285,
+ "grad_norm": 0.23889007522498773,
+ "learning_rate": 1e-05,
+ "loss": 0.1387,
+ "step": 57
+ },
+ {
+ "epoch": 0.029,
+ "grad_norm": 0.17240068749452392,
+ "learning_rate": 1e-05,
+ "loss": 0.1634,
+ "step": 58
+ },
+ {
+ "epoch": 0.0295,
+ "grad_norm": 0.1899912754500666,
+ "learning_rate": 1e-05,
+ "loss": 0.1027,
+ "step": 59
+ },
+ {
+ "epoch": 0.03,
+ "grad_norm": 0.17118724561465037,
+ "learning_rate": 1e-05,
+ "loss": 0.1398,
+ "step": 60
+ },
+ {
+ "epoch": 0.0305,
+ "grad_norm": 0.19950059905690032,
+ "learning_rate": 1e-05,
+ "loss": 0.1118,
+ "step": 61
+ },
+ {
+ "epoch": 0.031,
+ "grad_norm": 0.19928365636144094,
+ "learning_rate": 1e-05,
+ "loss": 0.1146,
+ "step": 62
+ },
+ {
+ "epoch": 0.0315,
+ "grad_norm": 0.23146246196039105,
+ "learning_rate": 1e-05,
+ "loss": 0.0871,
+ "step": 63
+ },
+ {
+ "epoch": 0.032,
+ "grad_norm": 0.22746980065915193,
+ "learning_rate": 1e-05,
+ "loss": 0.1673,
+ "step": 64
+ },
+ {
+ "epoch": 0.032,
+ "eval_dev_acc": 0.541015625,
+ "eval_dev_token": 5677.76171875,
+ "eval_runtime": 358.7405,
+ "eval_samples_per_second": 0.178,
+ "eval_steps_per_second": 0.003,
+ "step": 64
+ },
+ {
+ "epoch": 0.0325,
+ "grad_norm": 0.1992092305273338,
+ "learning_rate": 1e-05,
+ "loss": 0.1292,
+ "step": 65
+ },
+ {
+ "epoch": 0.033,
+ "grad_norm": 0.19429880128063629,
+ "learning_rate": 1e-05,
+ "loss": 0.0911,
+ "step": 66
+ },
+ {
+ "epoch": 0.0335,
+ "grad_norm": 0.1287473705431077,
+ "learning_rate": 1e-05,
+ "loss": 0.1377,
+ "step": 67
+ },
+ {
+ "epoch": 0.034,
+ "grad_norm": 0.2228992327697556,
+ "learning_rate": 1e-05,
+ "loss": 0.0933,
+ "step": 68
+ },
+ {
+ "epoch": 0.0345,
+ "grad_norm": 0.18793882506839266,
+ "learning_rate": 1e-05,
+ "loss": 0.1097,
+ "step": 69
+ },
+ {
+ "epoch": 0.035,
+ "grad_norm": 0.1932965061071618,
+ "learning_rate": 1e-05,
+ "loss": 0.1062,
+ "step": 70
+ },
+ {
+ "epoch": 0.0355,
+ "grad_norm": 0.20585403698562318,
+ "learning_rate": 1e-05,
+ "loss": 0.1279,
+ "step": 71
+ },
+ {
+ "epoch": 0.036,
+ "grad_norm": 0.17833203451544913,
+ "learning_rate": 1e-05,
+ "loss": 0.1353,
+ "step": 72
+ },
+ {
+ "epoch": 0.0365,
+ "grad_norm": 0.21658266347490054,
+ "learning_rate": 1e-05,
+ "loss": 0.0961,
+ "step": 73
+ },
+ {
+ "epoch": 0.037,
+ "grad_norm": 0.2364173046757495,
+ "learning_rate": 1e-05,
+ "loss": 0.1038,
+ "step": 74
+ },
+ {
+ "epoch": 0.0375,
+ "grad_norm": 0.20844999333456934,
+ "learning_rate": 1e-05,
+ "loss": 0.1266,
+ "step": 75
+ },
+ {
+ "epoch": 0.038,
+ "grad_norm": 0.2288998611422715,
+ "learning_rate": 1e-05,
+ "loss": 0.0872,
+ "step": 76
+ },
+ {
+ "epoch": 0.0385,
+ "grad_norm": 0.1878179335817694,
+ "learning_rate": 1e-05,
+ "loss": 0.1038,
+ "step": 77
+ },
+ {
+ "epoch": 0.039,
+ "grad_norm": 0.19984002821227043,
+ "learning_rate": 1e-05,
+ "loss": 0.1376,
+ "step": 78
+ },
+ {
+ "epoch": 0.0395,
+ "grad_norm": 0.20050327741314344,
+ "learning_rate": 1e-05,
+ "loss": 0.135,
+ "step": 79
+ },
+ {
+ "epoch": 0.04,
+ "grad_norm": 0.19353835667751798,
+ "learning_rate": 1e-05,
+ "loss": 0.12,
+ "step": 80
+ },
+ {
+ "epoch": 0.0405,
+ "grad_norm": 0.12986340217496,
+ "learning_rate": 1e-05,
+ "loss": 0.1754,
+ "step": 81
+ },
+ {
+ "epoch": 0.041,
+ "grad_norm": 0.1889393654868388,
+ "learning_rate": 1e-05,
+ "loss": 0.1206,
+ "step": 82
+ },
+ {
+ "epoch": 0.0415,
+ "grad_norm": 0.17201063783314552,
+ "learning_rate": 1e-05,
+ "loss": 0.1401,
+ "step": 83
+ },
+ {
+ "epoch": 0.042,
+ "grad_norm": 0.19004092111820917,
+ "learning_rate": 1e-05,
+ "loss": 0.122,
+ "step": 84
+ },
+ {
+ "epoch": 0.0425,
+ "grad_norm": 0.21797692428743218,
+ "learning_rate": 1e-05,
+ "loss": 0.127,
+ "step": 85
+ },
+ {
+ "epoch": 0.043,
+ "grad_norm": 0.16779726003397347,
+ "learning_rate": 1e-05,
+ "loss": 0.1437,
+ "step": 86
+ },
+ {
+ "epoch": 0.0435,
+ "grad_norm": 0.23214262430834917,
+ "learning_rate": 1e-05,
+ "loss": 0.1187,
+ "step": 87
+ },
+ {
+ "epoch": 0.044,
+ "grad_norm": 0.19415443790822307,
+ "learning_rate": 1e-05,
+ "loss": 0.1063,
+ "step": 88
+ },
+ {
+ "epoch": 0.0445,
+ "grad_norm": 0.19254517148646239,
+ "learning_rate": 1e-05,
+ "loss": 0.1039,
+ "step": 89
+ },
+ {
+ "epoch": 0.045,
+ "grad_norm": 0.17307659461239167,
+ "learning_rate": 1e-05,
+ "loss": 0.1554,
+ "step": 90
+ },
+ {
+ "epoch": 0.0455,
+ "grad_norm": 0.24265029122082277,
+ "learning_rate": 1e-05,
+ "loss": 0.1387,
+ "step": 91
+ },
+ {
+ "epoch": 0.046,
+ "grad_norm": 0.17107218032177454,
+ "learning_rate": 1e-05,
+ "loss": 0.1301,
+ "step": 92
+ },
+ {
+ "epoch": 0.0465,
+ "grad_norm": 0.18075891622609033,
+ "learning_rate": 1e-05,
+ "loss": 0.1174,
+ "step": 93
+ },
+ {
+ "epoch": 0.047,
+ "grad_norm": 0.21595935885391185,
+ "learning_rate": 1e-05,
+ "loss": 0.1129,
+ "step": 94
+ },
+ {
+ "epoch": 0.0475,
+ "grad_norm": 0.23262812126963384,
+ "learning_rate": 1e-05,
+ "loss": 0.0983,
+ "step": 95
+ },
+ {
+ "epoch": 0.048,
+ "grad_norm": 0.20420999614021612,
+ "learning_rate": 1e-05,
+ "loss": 0.1161,
+ "step": 96
+ },
+ {
+ "epoch": 0.048,
+ "eval_dev_acc": 0.63671875,
+ "eval_dev_token": 5515.37109375,
+ "eval_runtime": 348.8656,
+ "eval_samples_per_second": 0.183,
+ "eval_steps_per_second": 0.003,
+ "step": 96
+ },
+ {
+ "epoch": 0.0485,
+ "grad_norm": 0.22980199994832454,
+ "learning_rate": 1e-05,
+ "loss": 0.1,
+ "step": 97
+ },
+ {
+ "epoch": 0.049,
+ "grad_norm": 0.19262135795217625,
+ "learning_rate": 1e-05,
+ "loss": 0.1124,
+ "step": 98
+ },
+ {
+ "epoch": 0.0495,
+ "grad_norm": 0.22869326649776367,
+ "learning_rate": 1e-05,
+ "loss": 0.1094,
+ "step": 99
+ },
+ {
+ "epoch": 0.05,
+ "grad_norm": 0.24173954720541516,
+ "learning_rate": 1e-05,
+ "loss": 0.0847,
+ "step": 100
+ },
+ {
+ "epoch": 0.0505,
+ "grad_norm": 0.2332215752101726,
+ "learning_rate": 1e-05,
+ "loss": 0.0946,
+ "step": 101
+ },
+ {
+ "epoch": 0.051,
+ "grad_norm": 0.26475494931892063,
+ "learning_rate": 1e-05,
+ "loss": 0.1094,
+ "step": 102
+ },
+ {
+ "epoch": 0.0515,
+ "grad_norm": 0.20833383536152603,
+ "learning_rate": 1e-05,
+ "loss": 0.0931,
+ "step": 103
+ },
+ {
+ "epoch": 0.052,
+ "grad_norm": 0.22676431614598225,
+ "learning_rate": 1e-05,
+ "loss": 0.0866,
+ "step": 104
+ },
+ {
+ "epoch": 0.0525,
+ "grad_norm": 0.22204744101927545,
+ "learning_rate": 1e-05,
+ "loss": 0.0984,
+ "step": 105
+ },
+ {
+ "epoch": 0.053,
+ "grad_norm": 0.2012349417230909,
+ "learning_rate": 1e-05,
+ "loss": 0.0899,
+ "step": 106
+ },
+ {
+ "epoch": 0.0535,
+ "grad_norm": 0.20131082411517556,
+ "learning_rate": 1e-05,
+ "loss": 0.095,
+ "step": 107
+ },
+ {
+ "epoch": 0.054,
+ "grad_norm": 0.2501268960199406,
+ "learning_rate": 1e-05,
+ "loss": 0.0872,
+ "step": 108
+ },
+ {
+ "epoch": 0.0545,
+ "grad_norm": 0.1877564425582315,
+ "learning_rate": 1e-05,
+ "loss": 0.1447,
+ "step": 109
+ },
+ {
+ "epoch": 0.055,
+ "grad_norm": 0.26373944955124323,
+ "learning_rate": 1e-05,
+ "loss": 0.1104,
+ "step": 110
+ },
+ {
+ "epoch": 0.0555,
+ "grad_norm": 0.2201267469286863,
+ "learning_rate": 1e-05,
+ "loss": 0.0864,
+ "step": 111
+ },
+ {
+ "epoch": 0.056,
+ "grad_norm": 0.2584324977531668,
+ "learning_rate": 1e-05,
+ "loss": 0.1243,
+ "step": 112
+ },
+ {
+ "epoch": 0.0565,
+ "grad_norm": 0.21251509346212935,
+ "learning_rate": 1e-05,
+ "loss": 0.1287,
+ "step": 113
+ },
+ {
+ "epoch": 0.057,
+ "grad_norm": 0.2391921924682281,
+ "learning_rate": 1e-05,
+ "loss": 0.1174,
+ "step": 114
+ },
+ {
+ "epoch": 0.0575,
+ "grad_norm": 0.2250627442441596,
+ "learning_rate": 1e-05,
+ "loss": 0.0961,
+ "step": 115
+ },
+ {
+ "epoch": 0.058,
+ "grad_norm": 0.21589217619835932,
+ "learning_rate": 1e-05,
+ "loss": 0.1041,
+ "step": 116
+ },
+ {
+ "epoch": 0.0585,
+ "grad_norm": 0.23947622053978754,
+ "learning_rate": 1e-05,
+ "loss": 0.1027,
+ "step": 117
+ },
+ {
+ "epoch": 0.059,
+ "grad_norm": 0.20944871960722214,
+ "learning_rate": 1e-05,
+ "loss": 0.1117,
+ "step": 118
+ },
+ {
+ "epoch": 0.0595,
+ "grad_norm": 0.2025724984657677,
+ "learning_rate": 1e-05,
+ "loss": 0.1102,
+ "step": 119
+ },
+ {
+ "epoch": 0.06,
+ "grad_norm": 0.1801908473330023,
+ "learning_rate": 1e-05,
+ "loss": 0.1072,
+ "step": 120
+ },
+ {
+ "epoch": 0.0605,
+ "grad_norm": 0.1940191661946107,
+ "learning_rate": 1e-05,
+ "loss": 0.0904,
+ "step": 121
+ },
+ {
+ "epoch": 0.061,
+ "grad_norm": 0.21867530545592728,
+ "learning_rate": 1e-05,
+ "loss": 0.1193,
+ "step": 122
+ },
+ {
+ "epoch": 0.0615,
+ "grad_norm": 0.22225462024793874,
+ "learning_rate": 1e-05,
+ "loss": 0.0782,
+ "step": 123
+ },
+ {
+ "epoch": 0.062,
+ "grad_norm": 0.24241920616546134,
+ "learning_rate": 1e-05,
+ "loss": 0.1099,
+ "step": 124
+ },
+ {
+ "epoch": 0.0625,
+ "grad_norm": 0.21360306106975577,
+ "learning_rate": 1e-05,
+ "loss": 0.091,
+ "step": 125
+ },
+ {
+ "epoch": 0.063,
+ "grad_norm": 0.2007423950283759,
+ "learning_rate": 1e-05,
+ "loss": 0.1152,
+ "step": 126
+ },
+ {
+ "epoch": 0.0635,
+ "grad_norm": 0.19343038731295426,
+ "learning_rate": 1e-05,
+ "loss": 0.1181,
+ "step": 127
+ },
+ {
+ "epoch": 0.064,
+ "grad_norm": 0.234786663580031,
+ "learning_rate": 1e-05,
+ "loss": 0.1149,
+ "step": 128
+ },
+ {
+ "epoch": 0.064,
+ "eval_dev_acc": 0.548828125,
+ "eval_dev_token": 5586.20703125,
+ "eval_runtime": 364.0015,
+ "eval_samples_per_second": 0.176,
+ "eval_steps_per_second": 0.003,
+ "step": 128
+ },
+ {
+ "epoch": 0.0645,
+ "grad_norm": 0.20468481909378916,
+ "learning_rate": 1e-05,
+ "loss": 0.0976,
+ "step": 129
+ },
+ {
+ "epoch": 0.065,
+ "grad_norm": 0.25320635754138643,
+ "learning_rate": 1e-05,
+ "loss": 0.1183,
+ "step": 130
+ },
+ {
+ "epoch": 0.0655,
+ "grad_norm": 0.21530698126365438,
+ "learning_rate": 1e-05,
+ "loss": 0.1156,
+ "step": 131
+ },
+ {
+ "epoch": 0.066,
+ "grad_norm": 0.20489101859011527,
+ "learning_rate": 1e-05,
+ "loss": 0.0991,
+ "step": 132
+ },
+ {
+ "epoch": 0.0665,
+ "grad_norm": 0.21113632835377186,
+ "learning_rate": 1e-05,
+ "loss": 0.0838,
+ "step": 133
+ },
+ {
+ "epoch": 0.067,
+ "grad_norm": 0.18341595697478763,
+ "learning_rate": 1e-05,
+ "loss": 0.1036,
+ "step": 134
+ },
+ {
+ "epoch": 0.0675,
+ "grad_norm": 0.1762785527366556,
+ "learning_rate": 1e-05,
+ "loss": 0.1674,
+ "step": 135
+ },
+ {
+ "epoch": 0.068,
+ "grad_norm": 0.2632947258005063,
+ "learning_rate": 1e-05,
+ "loss": 0.1202,
+ "step": 136
+ },
+ {
+ "epoch": 0.0685,
+ "grad_norm": 0.21085919863317307,
+ "learning_rate": 1e-05,
+ "loss": 0.1131,
+ "step": 137
+ },
+ {
+ "epoch": 0.069,
+ "grad_norm": 0.19457697084640746,
+ "learning_rate": 1e-05,
+ "loss": 0.1005,
+ "step": 138
+ },
+ {
+ "epoch": 0.0695,
+ "grad_norm": 0.17119664823512107,
+ "learning_rate": 1e-05,
+ "loss": 0.1389,
+ "step": 139
+ },
+ {
+ "epoch": 0.07,
+ "grad_norm": 0.19084161070023772,
+ "learning_rate": 1e-05,
+ "loss": 0.1527,
+ "step": 140
+ },
+ {
+ "epoch": 0.0705,
+ "grad_norm": 0.19580784724822164,
+ "learning_rate": 1e-05,
+ "loss": 0.1175,
+ "step": 141
+ },
+ {
+ "epoch": 0.071,
+ "grad_norm": 0.2110588281462844,
+ "learning_rate": 1e-05,
+ "loss": 0.1062,
+ "step": 142
+ },
+ {
+ "epoch": 0.0715,
+ "grad_norm": 0.20012341575489243,
+ "learning_rate": 1e-05,
+ "loss": 0.0848,
+ "step": 143
+ },
+ {
+ "epoch": 0.072,
+ "grad_norm": 0.1940479696118561,
+ "learning_rate": 1e-05,
+ "loss": 0.0694,
+ "step": 144
+ },
+ {
+ "epoch": 0.0725,
+ "grad_norm": 0.21593290579494073,
+ "learning_rate": 1e-05,
+ "loss": 0.0766,
+ "step": 145
+ },
+ {
+ "epoch": 0.073,
+ "grad_norm": 0.22638726501654005,
+ "learning_rate": 1e-05,
+ "loss": 0.1084,
+ "step": 146
+ },
+ {
+ "epoch": 0.0735,
+ "grad_norm": 0.20997037800742063,
+ "learning_rate": 1e-05,
+ "loss": 0.0761,
+ "step": 147
+ },
+ {
+ "epoch": 0.074,
+ "grad_norm": 0.2380179005894331,
+ "learning_rate": 1e-05,
+ "loss": 0.0927,
+ "step": 148
+ },
+ {
+ "epoch": 0.0745,
+ "grad_norm": 0.23889516090857615,
+ "learning_rate": 1e-05,
+ "loss": 0.0827,
+ "step": 149
+ },
+ {
+ "epoch": 0.075,
+ "grad_norm": 0.18767850358859223,
+ "learning_rate": 1e-05,
+ "loss": 0.0881,
+ "step": 150
+ },
+ {
+ "epoch": 0.0755,
+ "grad_norm": 0.19463069265385494,
+ "learning_rate": 1e-05,
+ "loss": 0.0917,
+ "step": 151
+ },
+ {
+ "epoch": 0.076,
+ "grad_norm": 0.212834963744102,
+ "learning_rate": 1e-05,
+ "loss": 0.0756,
+ "step": 152
+ },
+ {
+ "epoch": 0.0765,
+ "grad_norm": 0.20584172308777918,
+ "learning_rate": 1e-05,
+ "loss": 0.0762,
+ "step": 153
+ },
+ {
+ "epoch": 0.077,
+ "grad_norm": 0.2371449651260928,
+ "learning_rate": 1e-05,
+ "loss": 0.0978,
+ "step": 154
+ },
+ {
+ "epoch": 0.0775,
+ "grad_norm": 0.2049083024101962,
+ "learning_rate": 1e-05,
+ "loss": 0.0925,
+ "step": 155
+ },
+ {
+ "epoch": 0.078,
+ "grad_norm": 0.21217273061342656,
+ "learning_rate": 1e-05,
+ "loss": 0.0825,
+ "step": 156
+ },
+ {
+ "epoch": 0.0785,
+ "grad_norm": 0.20105825530151383,
+ "learning_rate": 1e-05,
+ "loss": 0.0858,
+ "step": 157
+ },
+ {
+ "epoch": 0.079,
+ "grad_norm": 0.2257052714675071,
+ "learning_rate": 1e-05,
+ "loss": 0.1105,
+ "step": 158
+ },
+ {
+ "epoch": 0.0795,
+ "grad_norm": 0.20210858652943217,
+ "learning_rate": 1e-05,
+ "loss": 0.1022,
+ "step": 159
+ },
+ {
+ "epoch": 0.08,
+ "grad_norm": 0.19482796495242663,
+ "learning_rate": 1e-05,
+ "loss": 0.1262,
+ "step": 160
+ },
+ {
+ "epoch": 0.08,
+ "eval_dev_acc": 0.6015625,
+ "eval_dev_token": 5543.318359375,
+ "eval_runtime": 353.7068,
+ "eval_samples_per_second": 0.181,
+ "eval_steps_per_second": 0.003,
+ "step": 160
+ },
+ {
+ "epoch": 0.0805,
+ "grad_norm": 0.22955090034416561,
+ "learning_rate": 1e-05,
+ "loss": 0.0988,
+ "step": 161
+ },
+ {
+ "epoch": 0.081,
+ "grad_norm": 0.23547588568917174,
+ "learning_rate": 1e-05,
+ "loss": 0.0803,
+ "step": 162
+ },
+ {
+ "epoch": 0.0815,
+ "grad_norm": 0.230658904399123,
+ "learning_rate": 1e-05,
+ "loss": 0.0881,
+ "step": 163
+ },
+ {
+ "epoch": 0.082,
+ "grad_norm": 0.2595571094242936,
+ "learning_rate": 1e-05,
+ "loss": 0.0881,
+ "step": 164
+ },
+ {
+ "epoch": 0.0825,
+ "grad_norm": 0.26763452927239884,
+ "learning_rate": 1e-05,
+ "loss": 0.114,
+ "step": 165
+ },
+ {
+ "epoch": 0.083,
+ "grad_norm": 0.257549186353109,
+ "learning_rate": 1e-05,
+ "loss": 0.1045,
+ "step": 166
+ },
+ {
+ "epoch": 0.0835,
+ "grad_norm": 0.19712751160118708,
+ "learning_rate": 1e-05,
+ "loss": 0.0899,
+ "step": 167
+ },
+ {
+ "epoch": 0.084,
+ "grad_norm": 0.17276675854807147,
+ "learning_rate": 1e-05,
+ "loss": 0.0684,
+ "step": 168
+ },
+ {
+ "epoch": 0.0845,
+ "grad_norm": 0.21103265575626073,
+ "learning_rate": 1e-05,
+ "loss": 0.0821,
+ "step": 169
+ },
+ {
+ "epoch": 0.085,
+ "grad_norm": 0.22292947141761962,
+ "learning_rate": 1e-05,
+ "loss": 0.0983,
+ "step": 170
+ },
+ {
+ "epoch": 0.0855,
+ "grad_norm": 0.21612821069411284,
+ "learning_rate": 1e-05,
+ "loss": 0.0917,
+ "step": 171
+ },
+ {
+ "epoch": 0.086,
+ "grad_norm": 0.20757781370778242,
+ "learning_rate": 1e-05,
+ "loss": 0.0919,
+ "step": 172
+ },
+ {
+ "epoch": 0.0865,
+ "grad_norm": 0.2054200483785948,
+ "learning_rate": 1e-05,
+ "loss": 0.077,
+ "step": 173
+ },
+ {
+ "epoch": 0.087,
+ "grad_norm": 0.24143931624393172,
+ "learning_rate": 1e-05,
+ "loss": 0.0919,
+ "step": 174
+ },
+ {
+ "epoch": 0.0875,
+ "grad_norm": 0.22708368965968964,
+ "learning_rate": 1e-05,
+ "loss": 0.0931,
+ "step": 175
+ },
+ {
+ "epoch": 0.088,
+ "grad_norm": 0.20044838709826737,
+ "learning_rate": 1e-05,
+ "loss": 0.0808,
+ "step": 176
+ },
+ {
+ "epoch": 0.0885,
+ "grad_norm": 0.20148090317828546,
+ "learning_rate": 1e-05,
+ "loss": 0.0905,
+ "step": 177
+ },
+ {
+ "epoch": 0.089,
+ "grad_norm": 0.2090109676571514,
+ "learning_rate": 1e-05,
+ "loss": 0.0801,
+ "step": 178
+ },
+ {
+ "epoch": 0.0895,
+ "grad_norm": 0.19147542578517765,
+ "learning_rate": 1e-05,
+ "loss": 0.0774,
+ "step": 179
+ },
+ {
+ "epoch": 0.09,
+ "grad_norm": 0.22051053694604383,
+ "learning_rate": 1e-05,
+ "loss": 0.0949,
+ "step": 180
+ },
+ {
+ "epoch": 0.0905,
+ "grad_norm": 0.20152485004966214,
+ "learning_rate": 1e-05,
+ "loss": 0.077,
+ "step": 181
+ },
+ {
+ "epoch": 0.091,
+ "grad_norm": 0.18668034234550573,
+ "learning_rate": 1e-05,
+ "loss": 0.0887,
+ "step": 182
+ },
+ {
+ "epoch": 0.0915,
+ "grad_norm": 0.21497572062563422,
+ "learning_rate": 1e-05,
+ "loss": 0.095,
+ "step": 183
+ },
+ {
+ "epoch": 0.092,
+ "grad_norm": 0.21716742542098177,
+ "learning_rate": 1e-05,
+ "loss": 0.0822,
+ "step": 184
+ },
+ {
+ "epoch": 0.0925,
+ "grad_norm": 0.20262525326126424,
+ "learning_rate": 1e-05,
+ "loss": 0.0804,
+ "step": 185
+ },
+ {
+ "epoch": 0.093,
+ "grad_norm": 0.18652482669251277,
+ "learning_rate": 1e-05,
+ "loss": 0.0959,
+ "step": 186
+ },
+ {
+ "epoch": 0.0935,
+ "grad_norm": 0.232270946947485,
+ "learning_rate": 1e-05,
+ "loss": 0.0785,
+ "step": 187
+ },
+ {
+ "epoch": 0.094,
+ "grad_norm": 0.22559277541611453,
+ "learning_rate": 1e-05,
+ "loss": 0.0868,
+ "step": 188
+ },
+ {
+ "epoch": 0.0945,
+ "grad_norm": 0.21772738934026295,
+ "learning_rate": 1e-05,
+ "loss": 0.1384,
+ "step": 189
+ },
+ {
+ "epoch": 0.095,
+ "grad_norm": 0.19366625753900965,
+ "learning_rate": 1e-05,
+ "loss": 0.0962,
+ "step": 190
+ },
+ {
+ "epoch": 0.0955,
+ "grad_norm": 0.2162137483161777,
+ "learning_rate": 1e-05,
+ "loss": 0.0753,
+ "step": 191
+ },
+ {
+ "epoch": 0.096,
+ "grad_norm": 0.2111612755929646,
+ "learning_rate": 1e-05,
+ "loss": 0.0776,
+ "step": 192
+ },
+ {
+ "epoch": 0.096,
+ "eval_dev_acc": 0.546875,
+ "eval_dev_token": 5439.14453125,
+ "eval_runtime": 358.6784,
+ "eval_samples_per_second": 0.178,
+ "eval_steps_per_second": 0.003,
+ "step": 192
+ },
+ {
+ "epoch": 0.0965,
+ "grad_norm": 0.20427587800007568,
+ "learning_rate": 1e-05,
+ "loss": 0.1026,
+ "step": 193
+ },
+ {
+ "epoch": 0.097,
+ "grad_norm": 0.1913558266102393,
+ "learning_rate": 1e-05,
+ "loss": 0.0947,
+ "step": 194
+ },
+ {
+ "epoch": 0.0975,
+ "grad_norm": 0.21823580107793827,
+ "learning_rate": 1e-05,
+ "loss": 0.0852,
+ "step": 195
+ },
+ {
+ "epoch": 0.098,
+ "grad_norm": 0.21947391378003933,
+ "learning_rate": 1e-05,
+ "loss": 0.0834,
+ "step": 196
+ },
+ {
+ "epoch": 0.0985,
+ "grad_norm": 0.2234108965736748,
+ "learning_rate": 1e-05,
+ "loss": 0.0922,
+ "step": 197
+ },
+ {
+ "epoch": 0.099,
+ "grad_norm": 0.20738196532743278,
+ "learning_rate": 1e-05,
+ "loss": 0.0814,
+ "step": 198
+ },
+ {
+ "epoch": 0.0995,
+ "grad_norm": 0.21586171895392783,
+ "learning_rate": 1e-05,
+ "loss": 0.0988,
+ "step": 199
+ },
+ {
+ "epoch": 0.1,
+ "grad_norm": 0.20785742252782521,
+ "learning_rate": 1e-05,
+ "loss": 0.0872,
+ "step": 200
+ },
+ {
+ "epoch": 0.1005,
+ "grad_norm": 0.22570317690675268,
+ "learning_rate": 1e-05,
+ "loss": 0.0937,
+ "step": 201
+ },
+ {
+ "epoch": 0.101,
+ "grad_norm": 0.19453877023547578,
+ "learning_rate": 1e-05,
+ "loss": 0.1083,
+ "step": 202
+ },
+ {
+ "epoch": 0.1015,
+ "grad_norm": 0.20591293588894416,
+ "learning_rate": 1e-05,
+ "loss": 0.0767,
+ "step": 203
+ },
+ {
+ "epoch": 0.102,
+ "grad_norm": 0.1798554117116506,
+ "learning_rate": 1e-05,
+ "loss": 0.1017,
+ "step": 204
+ },
+ {
+ "epoch": 0.1025,
+ "grad_norm": 0.1927231622743242,
+ "learning_rate": 1e-05,
+ "loss": 0.0911,
+ "step": 205
+ },
+ {
+ "epoch": 0.103,
+ "grad_norm": 0.21444122564196544,
+ "learning_rate": 1e-05,
+ "loss": 0.0856,
+ "step": 206
+ },
+ {
+ "epoch": 0.1035,
+ "grad_norm": 0.17259842125017608,
+ "learning_rate": 1e-05,
+ "loss": 0.0904,
+ "step": 207
+ },
+ {
+ "epoch": 0.104,
+ "grad_norm": 0.23979518316140722,
+ "learning_rate": 1e-05,
+ "loss": 0.084,
+ "step": 208
+ },
+ {
+ "epoch": 0.1045,
+ "grad_norm": 0.22442151254111703,
+ "learning_rate": 1e-05,
+ "loss": 0.0949,
+ "step": 209
+ },
+ {
+ "epoch": 0.105,
+ "grad_norm": 0.19615294291751353,
+ "learning_rate": 1e-05,
+ "loss": 0.0864,
+ "step": 210
+ },
+ {
+ "epoch": 0.1055,
+ "grad_norm": 0.18344154651920094,
+ "learning_rate": 1e-05,
+ "loss": 0.0843,
+ "step": 211
+ },
+ {
+ "epoch": 0.106,
+ "grad_norm": 0.21335203803361255,
+ "learning_rate": 1e-05,
+ "loss": 0.077,
+ "step": 212
+ },
+ {
+ "epoch": 0.1065,
+ "grad_norm": 0.252518444172673,
+ "learning_rate": 1e-05,
+ "loss": 0.0888,
+ "step": 213
+ },
+ {
+ "epoch": 0.107,
+ "grad_norm": 0.2116629068211744,
+ "learning_rate": 1e-05,
+ "loss": 0.098,
+ "step": 214
+ },
+ {
+ "epoch": 0.1075,
+ "grad_norm": 0.21920052784491295,
+ "learning_rate": 1e-05,
+ "loss": 0.0823,
+ "step": 215
+ },
+ {
+ "epoch": 0.108,
+ "grad_norm": 0.19779399311082105,
+ "learning_rate": 1e-05,
+ "loss": 0.0752,
+ "step": 216
+ },
+ {
+ "epoch": 0.1085,
+ "grad_norm": 0.19973286372655655,
+ "learning_rate": 1e-05,
+ "loss": 0.074,
+ "step": 217
+ },
+ {
+ "epoch": 0.109,
+ "grad_norm": 0.22343594632856933,
+ "learning_rate": 1e-05,
+ "loss": 0.0907,
+ "step": 218
+ },
+ {
+ "epoch": 0.1095,
+ "grad_norm": 0.25396426928555105,
+ "learning_rate": 1e-05,
+ "loss": 0.1075,
+ "step": 219
+ },
+ {
+ "epoch": 0.11,
+ "grad_norm": 0.1945715284952783,
+ "learning_rate": 1e-05,
+ "loss": 0.1053,
+ "step": 220
+ },
+ {
+ "epoch": 0.1105,
+ "grad_norm": 0.21559958220587308,
+ "learning_rate": 1e-05,
+ "loss": 0.0865,
+ "step": 221
+ },
+ {
+ "epoch": 0.111,
+ "grad_norm": 0.22369755043282374,
+ "learning_rate": 1e-05,
+ "loss": 0.0841,
+ "step": 222
+ },
+ {
+ "epoch": 0.1115,
+ "grad_norm": 0.2097379198995065,
+ "learning_rate": 1e-05,
+ "loss": 0.0753,
+ "step": 223
+ },
+ {
+ "epoch": 0.112,
+ "grad_norm": 0.17224880947033328,
+ "learning_rate": 1e-05,
+ "loss": 0.0699,
+ "step": 224
+ },
+ {
+ "epoch": 0.112,
+ "eval_dev_acc": 0.560546875,
+ "eval_dev_token": 5619.009765625,
+ "eval_runtime": 357.3428,
+ "eval_samples_per_second": 0.179,
+ "eval_steps_per_second": 0.003,
+ "step": 224
+ },
+ {
+ "epoch": 0.1125,
+ "grad_norm": 0.19442868536212735,
+ "learning_rate": 1e-05,
+ "loss": 0.0846,
+ "step": 225
+ },
+ {
+ "epoch": 0.113,
+ "grad_norm": 0.1573515275602218,
+ "learning_rate": 1e-05,
+ "loss": 0.0634,
+ "step": 226
+ },
+ {
+ "epoch": 0.1135,
+ "grad_norm": 0.17680777096637068,
+ "learning_rate": 1e-05,
+ "loss": 0.0761,
+ "step": 227
+ },
+ {
+ "epoch": 0.114,
+ "grad_norm": 0.20935704393341403,
+ "learning_rate": 1e-05,
+ "loss": 0.0549,
+ "step": 228
+ },
+ {
+ "epoch": 0.1145,
+ "grad_norm": 0.19829321187204563,
+ "learning_rate": 1e-05,
+ "loss": 0.05,
+ "step": 229
+ },
+ {
+ "epoch": 0.115,
+ "grad_norm": 0.18833561824346334,
+ "learning_rate": 1e-05,
+ "loss": 0.0656,
+ "step": 230
+ },
+ {
+ "epoch": 0.1155,
+ "grad_norm": 0.17277292328026173,
+ "learning_rate": 1e-05,
+ "loss": 0.08,
+ "step": 231
+ },
+ {
+ "epoch": 0.116,
+ "grad_norm": 0.2083709354078263,
+ "learning_rate": 1e-05,
+ "loss": 0.0628,
+ "step": 232
+ },
+ {
+ "epoch": 0.1165,
+ "grad_norm": 0.18113171413223286,
+ "learning_rate": 1e-05,
+ "loss": 0.0633,
+ "step": 233
+ },
+ {
+ "epoch": 0.117,
+ "grad_norm": 0.19985236934976783,
+ "learning_rate": 1e-05,
+ "loss": 0.0676,
+ "step": 234
+ },
+ {
+ "epoch": 0.1175,
+ "grad_norm": 0.2023196182410912,
+ "learning_rate": 1e-05,
+ "loss": 0.062,
+ "step": 235
+ },
+ {
+ "epoch": 0.118,
+ "grad_norm": 0.19446408540605106,
+ "learning_rate": 1e-05,
+ "loss": 0.0609,
+ "step": 236
+ },
+ {
+ "epoch": 0.1185,
+ "grad_norm": 0.1879635955015942,
+ "learning_rate": 1e-05,
+ "loss": 0.0631,
+ "step": 237
+ },
+ {
+ "epoch": 0.119,
+ "grad_norm": 0.18000098291861588,
+ "learning_rate": 1e-05,
+ "loss": 0.0628,
+ "step": 238
+ },
+ {
+ "epoch": 0.1195,
+ "grad_norm": 0.2091766063587954,
+ "learning_rate": 1e-05,
+ "loss": 0.0642,
+ "step": 239
+ },
+ {
+ "epoch": 0.12,
+ "grad_norm": 0.19781458462773657,
+ "learning_rate": 1e-05,
+ "loss": 0.0551,
+ "step": 240
+ },
+ {
+ "epoch": 0.1205,
+ "grad_norm": 0.20570535681429145,
+ "learning_rate": 1e-05,
+ "loss": 0.062,
+ "step": 241
+ },
+ {
+ "epoch": 0.121,
+ "grad_norm": 0.17479813291913535,
+ "learning_rate": 1e-05,
+ "loss": 0.0657,
+ "step": 242
+ },
+ {
+ "epoch": 0.1215,
+ "grad_norm": 0.20023576215167263,
+ "learning_rate": 1e-05,
+ "loss": 0.0545,
+ "step": 243
+ },
+ {
+ "epoch": 0.122,
+ "grad_norm": 0.21569894284947272,
+ "learning_rate": 1e-05,
+ "loss": 0.0641,
+ "step": 244
+ },
+ {
+ "epoch": 0.1225,
+ "grad_norm": 0.16426082027771785,
+ "learning_rate": 1e-05,
+ "loss": 0.0488,
+ "step": 245
+ },
+ {
+ "epoch": 0.123,
+ "grad_norm": 0.23142807048539513,
+ "learning_rate": 1e-05,
+ "loss": 0.0617,
+ "step": 246
+ },
+ {
+ "epoch": 0.1235,
+ "grad_norm": 0.21054969399806525,
+ "learning_rate": 1e-05,
+ "loss": 0.0566,
+ "step": 247
+ },
+ {
+ "epoch": 0.124,
+ "grad_norm": 0.1533567582820314,
+ "learning_rate": 1e-05,
+ "loss": 0.0559,
+ "step": 248
+ },
+ {
+ "epoch": 0.1245,
+ "grad_norm": 0.19025683614022437,
+ "learning_rate": 1e-05,
+ "loss": 0.051,
+ "step": 249
+ },
+ {
+ "epoch": 0.125,
+ "grad_norm": 0.16411772241541067,
+ "learning_rate": 1e-05,
+ "loss": 0.068,
+ "step": 250
+ },
+ {
+ "epoch": 0.1255,
+ "grad_norm": 0.23699209914417435,
+ "learning_rate": 1e-05,
+ "loss": 0.0845,
+ "step": 251
+ },
+ {
+ "epoch": 0.126,
+ "grad_norm": 0.2047456890167515,
+ "learning_rate": 1e-05,
+ "loss": 0.0588,
+ "step": 252
+ },
+ {
+ "epoch": 0.1265,
+ "grad_norm": 0.20625503133501016,
+ "learning_rate": 1e-05,
+ "loss": 0.0543,
+ "step": 253
+ },
+ {
+ "epoch": 0.127,
+ "grad_norm": 0.16081505489801892,
+ "learning_rate": 1e-05,
+ "loss": 0.0413,
+ "step": 254
+ },
+ {
+ "epoch": 0.1275,
+ "grad_norm": 0.20221086748641462,
+ "learning_rate": 1e-05,
+ "loss": 0.0704,
+ "step": 255
+ },
+ {
+ "epoch": 0.128,
+ "grad_norm": 0.20711399972324054,
+ "learning_rate": 1e-05,
+ "loss": 0.0563,
+ "step": 256
+ },
+ {
+ "epoch": 0.128,
+ "eval_dev_acc": 0.587890625,
+ "eval_dev_token": 5704.3125,
+ "eval_runtime": 356.7726,
+ "eval_samples_per_second": 0.179,
+ "eval_steps_per_second": 0.003,
+ "step": 256
+ },
+ {
+ "epoch": 0.1285,
+ "grad_norm": 0.18942704583355485,
+ "learning_rate": 1e-05,
+ "loss": 0.0692,
+ "step": 257
+ },
+ {
+ "epoch": 0.129,
+ "grad_norm": 0.19171984906136896,
+ "learning_rate": 1e-05,
+ "loss": 0.05,
+ "step": 258
+ },
+ {
+ "epoch": 0.1295,
+ "grad_norm": 0.2125382567332232,
+ "learning_rate": 1e-05,
+ "loss": 0.0615,
+ "step": 259
+ },
+ {
+ "epoch": 0.13,
+ "grad_norm": 0.17877261040661208,
+ "learning_rate": 1e-05,
+ "loss": 0.0597,
+ "step": 260
+ },
+ {
+ "epoch": 0.1305,
+ "grad_norm": 0.1708210973205684,
+ "learning_rate": 1e-05,
+ "loss": 0.067,
+ "step": 261
+ },
+ {
+ "epoch": 0.131,
+ "grad_norm": 0.1850887764718648,
+ "learning_rate": 1e-05,
+ "loss": 0.0544,
+ "step": 262
+ },
+ {
+ "epoch": 0.1315,
+ "grad_norm": 0.2010060822058417,
+ "learning_rate": 1e-05,
+ "loss": 0.0696,
+ "step": 263
+ },
+ {
+ "epoch": 0.132,
+ "grad_norm": 0.18460835555899294,
+ "learning_rate": 1e-05,
+ "loss": 0.0607,
+ "step": 264
+ },
+ {
+ "epoch": 0.1325,
+ "grad_norm": 0.2264686856441524,
+ "learning_rate": 1e-05,
+ "loss": 0.0638,
+ "step": 265
+ },
+ {
+ "epoch": 0.133,
+ "grad_norm": 0.17252712599660533,
+ "learning_rate": 1e-05,
+ "loss": 0.055,
+ "step": 266
+ },
+ {
+ "epoch": 0.1335,
+ "grad_norm": 0.1668268871760919,
+ "learning_rate": 1e-05,
+ "loss": 0.0467,
+ "step": 267
+ },
+ {
+ "epoch": 0.134,
+ "grad_norm": 0.17711472127782535,
+ "learning_rate": 1e-05,
+ "loss": 0.0462,
+ "step": 268
+ },
+ {
+ "epoch": 0.1345,
+ "grad_norm": 0.16354851889499628,
+ "learning_rate": 1e-05,
+ "loss": 0.0687,
+ "step": 269
+ },
+ {
+ "epoch": 0.135,
+ "grad_norm": 0.17844367901102645,
+ "learning_rate": 1e-05,
+ "loss": 0.0455,
+ "step": 270
+ },
+ {
+ "epoch": 0.1355,
+ "grad_norm": 0.19303024902618696,
+ "learning_rate": 1e-05,
+ "loss": 0.0565,
+ "step": 271
+ },
+ {
+ "epoch": 0.136,
+ "grad_norm": 0.19049740233006035,
+ "learning_rate": 1e-05,
+ "loss": 0.0575,
+ "step": 272
+ },
+ {
+ "epoch": 0.1365,
+ "grad_norm": 0.18444934835307936,
+ "learning_rate": 1e-05,
+ "loss": 0.0495,
+ "step": 273
+ },
+ {
+ "epoch": 0.137,
+ "grad_norm": 0.2029153556589725,
+ "learning_rate": 1e-05,
+ "loss": 0.0632,
+ "step": 274
+ },
+ {
+ "epoch": 0.1375,
+ "grad_norm": 0.17742276348080663,
+ "learning_rate": 1e-05,
+ "loss": 0.0591,
+ "step": 275
+ },
+ {
+ "epoch": 0.138,
+ "grad_norm": 0.2086941032177491,
+ "learning_rate": 1e-05,
+ "loss": 0.0453,
+ "step": 276
+ },
+ {
+ "epoch": 0.1385,
+ "grad_norm": 0.1599025673655474,
+ "learning_rate": 1e-05,
+ "loss": 0.0346,
+ "step": 277
+ },
+ {
+ "epoch": 0.139,
+ "grad_norm": 0.20223917188200294,
+ "learning_rate": 1e-05,
+ "loss": 0.0516,
+ "step": 278
+ },
+ {
+ "epoch": 0.1395,
+ "grad_norm": 0.168572629555483,
+ "learning_rate": 1e-05,
+ "loss": 0.0502,
+ "step": 279
+ },
+ {
+ "epoch": 0.14,
+ "grad_norm": 0.21316857087336016,
+ "learning_rate": 1e-05,
+ "loss": 0.0585,
+ "step": 280
+ },
+ {
+ "epoch": 0.1405,
+ "grad_norm": 0.1533009935920478,
+ "learning_rate": 1e-05,
+ "loss": 0.0732,
+ "step": 281
+ },
+ {
+ "epoch": 0.141,
+ "grad_norm": 0.2226592493652288,
+ "learning_rate": 1e-05,
+ "loss": 0.0683,
+ "step": 282
+ },
+ {
+ "epoch": 0.1415,
+ "grad_norm": 0.2005052634299014,
+ "learning_rate": 1e-05,
+ "loss": 0.064,
+ "step": 283
+ },
+ {
+ "epoch": 0.142,
+ "grad_norm": 0.16977898059181232,
+ "learning_rate": 1e-05,
+ "loss": 0.0759,
+ "step": 284
+ },
+ {
+ "epoch": 0.1425,
+ "grad_norm": 0.17622048198257903,
+ "learning_rate": 1e-05,
+ "loss": 0.0546,
+ "step": 285
+ },
+ {
+ "epoch": 0.143,
+ "grad_norm": 0.15734345344681852,
+ "learning_rate": 1e-05,
+ "loss": 0.0371,
+ "step": 286
+ },
+ {
+ "epoch": 0.1435,
+ "grad_norm": 0.16097527322211574,
+ "learning_rate": 1e-05,
+ "loss": 0.0523,
+ "step": 287
+ },
+ {
+ "epoch": 0.144,
+ "grad_norm": 0.18490744056020517,
+ "learning_rate": 1e-05,
+ "loss": 0.0518,
+ "step": 288
+ },
+ {
+ "epoch": 0.144,
+ "eval_dev_acc": 0.5234375,
+ "eval_dev_token": 5945.451171875,
+ "eval_runtime": 402.5197,
+ "eval_samples_per_second": 0.159,
+ "eval_steps_per_second": 0.002,
+ "step": 288
+ },
+ {
+ "epoch": 0.1445,
+ "grad_norm": 0.16803932674344507,
+ "learning_rate": 1e-05,
+ "loss": 0.0564,
+ "step": 289
+ },
+ {
+ "epoch": 0.145,
+ "grad_norm": 0.18279683357896828,
+ "learning_rate": 1e-05,
+ "loss": 0.0604,
+ "step": 290
+ },
+ {
+ "epoch": 0.1455,
+ "grad_norm": 0.1773550481655194,
+ "learning_rate": 1e-05,
+ "loss": 0.0465,
+ "step": 291
+ },
+ {
+ "epoch": 0.146,
+ "grad_norm": 0.37855180864427374,
+ "learning_rate": 1e-05,
+ "loss": 0.0687,
+ "step": 292
+ },
+ {
+ "epoch": 0.1465,
+ "grad_norm": 0.2256173969538139,
+ "learning_rate": 1e-05,
+ "loss": 0.0597,
+ "step": 293
+ },
+ {
+ "epoch": 0.147,
+ "grad_norm": 0.19318315047474646,
+ "learning_rate": 1e-05,
+ "loss": 0.05,
+ "step": 294
+ },
+ {
+ "epoch": 0.1475,
+ "grad_norm": 0.1949573691249157,
+ "learning_rate": 1e-05,
+ "loss": 0.054,
+ "step": 295
+ },
+ {
+ "epoch": 0.148,
+ "grad_norm": 0.1784893290182381,
+ "learning_rate": 1e-05,
+ "loss": 0.0551,
+ "step": 296
+ },
+ {
+ "epoch": 0.1485,
+ "grad_norm": 0.1893518286787237,
+ "learning_rate": 1e-05,
+ "loss": 0.0582,
+ "step": 297
+ },
+ {
+ "epoch": 0.149,
+ "grad_norm": 0.16491423015511872,
+ "learning_rate": 1e-05,
+ "loss": 0.0393,
+ "step": 298
+ },
+ {
+ "epoch": 0.1495,
+ "grad_norm": 0.1762274160177828,
+ "learning_rate": 1e-05,
+ "loss": 0.0485,
+ "step": 299
+ },
+ {
+ "epoch": 0.15,
+ "grad_norm": 0.18398727773770782,
+ "learning_rate": 1e-05,
+ "loss": 0.0573,
+ "step": 300
+ },
+ {
+ "epoch": 0.1505,
+ "grad_norm": 0.18217281656040227,
+ "learning_rate": 1e-05,
+ "loss": 0.0485,
+ "step": 301
+ },
+ {
+ "epoch": 0.151,
+ "grad_norm": 0.17276763991718358,
+ "learning_rate": 1e-05,
+ "loss": 0.0571,
+ "step": 302
+ },
+ {
+ "epoch": 0.1515,
+ "grad_norm": 0.15668609882813492,
+ "learning_rate": 1e-05,
+ "loss": 0.0741,
+ "step": 303
+ },
+ {
+ "epoch": 0.152,
+ "grad_norm": 0.18965888700513778,
+ "learning_rate": 1e-05,
+ "loss": 0.0548,
+ "step": 304
+ },
+ {
+ "epoch": 0.1525,
+ "grad_norm": 0.1514861962476675,
+ "learning_rate": 1e-05,
+ "loss": 0.0408,
+ "step": 305
+ },
+ {
+ "epoch": 0.153,
+ "grad_norm": 0.21027930287961952,
+ "learning_rate": 1e-05,
+ "loss": 0.0473,
+ "step": 306
+ },
+ {
+ "epoch": 0.1535,
+ "grad_norm": 0.20086505297048218,
+ "learning_rate": 1e-05,
+ "loss": 0.0457,
+ "step": 307
+ },
+ {
+ "epoch": 0.154,
+ "grad_norm": 0.1834058060370301,
+ "learning_rate": 1e-05,
+ "loss": 0.0391,
+ "step": 308
+ },
+ {
+ "epoch": 0.1545,
+ "grad_norm": 0.1675035648173745,
+ "learning_rate": 1e-05,
+ "loss": 0.0346,
+ "step": 309
+ },
+ {
+ "epoch": 0.155,
+ "grad_norm": 0.19041217604042332,
+ "learning_rate": 1e-05,
+ "loss": 0.0447,
+ "step": 310
+ },
+ {
+ "epoch": 0.1555,
+ "grad_norm": 0.2063641120441124,
+ "learning_rate": 1e-05,
+ "loss": 0.0454,
+ "step": 311
+ },
+ {
+ "epoch": 0.156,
+ "grad_norm": 0.18205494058640856,
+ "learning_rate": 1e-05,
+ "loss": 0.0402,
+ "step": 312
+ },
+ {
+ "epoch": 0.1565,
+ "grad_norm": 0.18642039675473027,
+ "learning_rate": 1e-05,
+ "loss": 0.0397,
+ "step": 313
+ },
+ {
+ "epoch": 0.157,
+ "grad_norm": 0.16971154384699963,
+ "learning_rate": 1e-05,
+ "loss": 0.0426,
+ "step": 314
+ },
+ {
+ "epoch": 0.1575,
+ "grad_norm": 0.19779499600374534,
+ "learning_rate": 1e-05,
+ "loss": 0.042,
+ "step": 315
+ },
+ {
+ "epoch": 0.158,
+ "grad_norm": 0.1597506922805199,
+ "learning_rate": 1e-05,
+ "loss": 0.0561,
+ "step": 316
+ },
+ {
+ "epoch": 0.1585,
+ "grad_norm": 0.22455898943377722,
+ "learning_rate": 1e-05,
+ "loss": 0.0508,
+ "step": 317
+ },
+ {
+ "epoch": 0.159,
+ "grad_norm": 0.22816888070811367,
+ "learning_rate": 1e-05,
+ "loss": 0.0684,
+ "step": 318
+ },
+ {
+ "epoch": 0.1595,
+ "grad_norm": 0.20829314487846406,
+ "learning_rate": 1e-05,
+ "loss": 0.0558,
+ "step": 319
+ },
+ {
+ "epoch": 0.16,
+ "grad_norm": 0.1726376642892394,
+ "learning_rate": 1e-05,
+ "loss": 0.0382,
+ "step": 320
+ },
+ {
+ "epoch": 0.16,
+ "eval_dev_acc": 0.62109375,
+ "eval_dev_token": 5348.43359375,
+ "eval_runtime": 348.0694,
+ "eval_samples_per_second": 0.184,
+ "eval_steps_per_second": 0.003,
+ "step": 320
+ },
+ {
+ "epoch": 0.1605,
+ "grad_norm": 0.21533911997871472,
+ "learning_rate": 1e-05,
+ "loss": 0.0573,
+ "step": 321
+ },
+ {
+ "epoch": 0.161,
+ "grad_norm": 0.1945926926705701,
+ "learning_rate": 1e-05,
+ "loss": 0.0482,
+ "step": 322
+ },
+ {
+ "epoch": 0.1615,
+ "grad_norm": 0.16282045186801866,
+ "learning_rate": 1e-05,
+ "loss": 0.0395,
+ "step": 323
+ },
+ {
+ "epoch": 0.162,
+ "grad_norm": 0.21192465805738625,
+ "learning_rate": 1e-05,
+ "loss": 0.0534,
+ "step": 324
+ },
+ {
+ "epoch": 0.1625,
+ "grad_norm": 0.18412473332196624,
+ "learning_rate": 1e-05,
+ "loss": 0.0457,
+ "step": 325
+ },
+ {
+ "epoch": 0.163,
+ "grad_norm": 0.1760683716395308,
+ "learning_rate": 1e-05,
+ "loss": 0.0621,
+ "step": 326
+ },
+ {
+ "epoch": 0.1635,
+ "grad_norm": 0.1977120327808734,
+ "learning_rate": 1e-05,
+ "loss": 0.0443,
+ "step": 327
+ },
+ {
+ "epoch": 0.164,
+ "grad_norm": 0.17834928432327893,
+ "learning_rate": 1e-05,
+ "loss": 0.0474,
+ "step": 328
+ },
+ {
+ "epoch": 0.1645,
+ "grad_norm": 0.19810362807929732,
+ "learning_rate": 1e-05,
+ "loss": 0.0472,
+ "step": 329
+ },
+ {
+ "epoch": 0.165,
+ "grad_norm": 0.17306554655229037,
+ "learning_rate": 1e-05,
+ "loss": 0.0514,
+ "step": 330
+ },
+ {
+ "epoch": 0.1655,
+ "grad_norm": 0.1732660830513622,
+ "learning_rate": 1e-05,
+ "loss": 0.0487,
+ "step": 331
+ },
+ {
+ "epoch": 0.166,
+ "grad_norm": 0.15153749458139032,
+ "learning_rate": 1e-05,
+ "loss": 0.044,
+ "step": 332
+ },
+ {
+ "epoch": 0.1665,
+ "grad_norm": 0.20187085045913772,
+ "learning_rate": 1e-05,
+ "loss": 0.0551,
+ "step": 333
+ },
+ {
+ "epoch": 0.167,
+ "grad_norm": 0.16579582791974742,
+ "learning_rate": 1e-05,
+ "loss": 0.0497,
+ "step": 334
+ },
+ {
+ "epoch": 0.1675,
+ "grad_norm": 0.19316064563692958,
+ "learning_rate": 1e-05,
+ "loss": 0.0549,
+ "step": 335
+ },
+ {
+ "epoch": 0.168,
+ "grad_norm": 0.16491714800111232,
+ "learning_rate": 1e-05,
+ "loss": 0.0371,
+ "step": 336
+ },
+ {
+ "epoch": 0.1685,
+ "grad_norm": 0.17551519178449135,
+ "learning_rate": 1e-05,
+ "loss": 0.041,
+ "step": 337
+ },
+ {
+ "epoch": 0.169,
+ "grad_norm": 0.1734781492111704,
+ "learning_rate": 1e-05,
+ "loss": 0.0441,
+ "step": 338
+ },
+ {
+ "epoch": 0.1695,
+ "grad_norm": 0.18684315556716974,
+ "learning_rate": 1e-05,
+ "loss": 0.0488,
+ "step": 339
+ },
+ {
+ "epoch": 0.17,
+ "grad_norm": 0.16917376679576818,
+ "learning_rate": 1e-05,
+ "loss": 0.0437,
+ "step": 340
+ },
+ {
+ "epoch": 0.1705,
+ "grad_norm": 0.1501957333162884,
+ "learning_rate": 1e-05,
+ "loss": 0.0393,
+ "step": 341
+ },
+ {
+ "epoch": 0.171,
+ "grad_norm": 0.2205121105977978,
+ "learning_rate": 1e-05,
+ "loss": 0.0579,
+ "step": 342
+ },
+ {
+ "epoch": 0.1715,
+ "grad_norm": 0.18041963938373073,
+ "learning_rate": 1e-05,
+ "loss": 0.0492,
+ "step": 343
+ },
+ {
+ "epoch": 0.172,
+ "grad_norm": 0.17449103579952038,
+ "learning_rate": 1e-05,
+ "loss": 0.0401,
+ "step": 344
+ },
+ {
+ "epoch": 0.1725,
+ "grad_norm": 0.15528177663303303,
+ "learning_rate": 1e-05,
+ "loss": 0.0379,
+ "step": 345
+ },
+ {
+ "epoch": 0.173,
+ "grad_norm": 0.20473861699417553,
+ "learning_rate": 1e-05,
+ "loss": 0.0499,
+ "step": 346
+ },
+ {
+ "epoch": 0.1735,
+ "grad_norm": 0.22954222855804748,
+ "learning_rate": 1e-05,
+ "loss": 0.066,
+ "step": 347
+ },
+ {
+ "epoch": 0.174,
+ "grad_norm": 0.18144976711639327,
+ "learning_rate": 1e-05,
+ "loss": 0.0417,
+ "step": 348
+ },
+ {
+ "epoch": 0.1745,
+ "grad_norm": 0.1647885822020398,
+ "learning_rate": 1e-05,
+ "loss": 0.0406,
+ "step": 349
+ },
+ {
+ "epoch": 0.175,
+ "grad_norm": 0.13381930823784724,
+ "learning_rate": 1e-05,
+ "loss": 0.039,
+ "step": 350
+ },
+ {
+ "epoch": 0.1755,
+ "grad_norm": 0.17233840695200286,
+ "learning_rate": 1e-05,
+ "loss": 0.0504,
+ "step": 351
+ },
+ {
+ "epoch": 0.176,
+ "grad_norm": 0.18220085493590332,
+ "learning_rate": 1e-05,
+ "loss": 0.0411,
+ "step": 352
+ },
+ {
+ "epoch": 0.176,
+ "eval_dev_acc": 0.53515625,
+ "eval_dev_token": 5787.59375,
+ "eval_runtime": 364.0085,
+ "eval_samples_per_second": 0.176,
+ "eval_steps_per_second": 0.003,
+ "step": 352
+ },
+ {
+ "epoch": 0.1765,
+ "grad_norm": 0.20744185724682074,
+ "learning_rate": 1e-05,
+ "loss": 0.0606,
+ "step": 353
+ },
+ {
+ "epoch": 0.177,
+ "grad_norm": 0.18600570998480834,
+ "learning_rate": 1e-05,
+ "loss": 0.0416,
+ "step": 354
+ },
+ {
+ "epoch": 0.1775,
+ "grad_norm": 0.1776469292641811,
+ "learning_rate": 1e-05,
+ "loss": 0.042,
+ "step": 355
+ },
+ {
+ "epoch": 0.178,
+ "grad_norm": 0.1783568782453835,
+ "learning_rate": 1e-05,
+ "loss": 0.0457,
+ "step": 356
+ },
+ {
+ "epoch": 0.1785,
+ "grad_norm": 0.1981729227656145,
+ "learning_rate": 1e-05,
+ "loss": 0.0578,
+ "step": 357
+ },
+ {
+ "epoch": 0.179,
+ "grad_norm": 0.18984703197303243,
+ "learning_rate": 1e-05,
+ "loss": 0.0427,
+ "step": 358
+ },
+ {
+ "epoch": 0.1795,
+ "grad_norm": 0.21799846739281004,
+ "learning_rate": 1e-05,
+ "loss": 0.0492,
+ "step": 359
+ },
+ {
+ "epoch": 0.18,
+ "grad_norm": 0.2263278306065525,
+ "learning_rate": 1e-05,
+ "loss": 0.0708,
+ "step": 360
+ },
+ {
+ "epoch": 0.1805,
+ "grad_norm": 0.16612125798758726,
+ "learning_rate": 1e-05,
+ "loss": 0.0447,
+ "step": 361
+ },
+ {
+ "epoch": 0.181,
+ "grad_norm": 0.15311766872023147,
+ "learning_rate": 1e-05,
+ "loss": 0.0369,
+ "step": 362
+ },
+ {
+ "epoch": 0.1815,
+ "grad_norm": 0.18614757917185834,
+ "learning_rate": 1e-05,
+ "loss": 0.0484,
+ "step": 363
+ },
+ {
+ "epoch": 0.182,
+ "grad_norm": 0.18253431030668557,
+ "learning_rate": 1e-05,
+ "loss": 0.044,
+ "step": 364
+ },
+ {
+ "epoch": 0.1825,
+ "grad_norm": 0.19238661256236803,
+ "learning_rate": 1e-05,
+ "loss": 0.0615,
+ "step": 365
+ },
+ {
+ "epoch": 0.183,
+ "grad_norm": 0.1808155264273284,
+ "learning_rate": 1e-05,
+ "loss": 0.0444,
+ "step": 366
+ },
+ {
+ "epoch": 0.1835,
+ "grad_norm": 0.1743762662626829,
+ "learning_rate": 1e-05,
+ "loss": 0.0501,
+ "step": 367
+ },
+ {
+ "epoch": 0.184,
+ "grad_norm": 0.16508774246157967,
+ "learning_rate": 1e-05,
+ "loss": 0.0448,
+ "step": 368
+ },
+ {
+ "epoch": 0.1845,
+ "grad_norm": 0.1546243168773746,
+ "learning_rate": 1e-05,
+ "loss": 0.0366,
+ "step": 369
+ },
+ {
+ "epoch": 0.185,
+ "grad_norm": 0.1746189186464954,
+ "learning_rate": 1e-05,
+ "loss": 0.0471,
+ "step": 370
+ },
+ {
+ "epoch": 0.1855,
+ "grad_norm": 0.17995461422580256,
+ "learning_rate": 1e-05,
+ "loss": 0.0405,
+ "step": 371
+ },
+ {
+ "epoch": 0.186,
+ "grad_norm": 0.16745033647841967,
+ "learning_rate": 1e-05,
+ "loss": 0.0371,
+ "step": 372
+ },
+ {
+ "epoch": 0.1865,
+ "grad_norm": 0.14177227347565124,
+ "learning_rate": 1e-05,
+ "loss": 0.0336,
+ "step": 373
+ },
+ {
+ "epoch": 0.187,
+ "grad_norm": 0.19568633642105135,
+ "learning_rate": 1e-05,
+ "loss": 0.0419,
+ "step": 374
+ },
+ {
+ "epoch": 0.1875,
+ "grad_norm": 0.1694809590901385,
+ "learning_rate": 1e-05,
+ "loss": 0.0365,
+ "step": 375
+ },
+ {
+ "epoch": 0.188,
+ "grad_norm": 0.16086017791775223,
+ "learning_rate": 1e-05,
+ "loss": 0.0382,
+ "step": 376
+ },
+ {
+ "epoch": 0.1885,
+ "grad_norm": 0.14863922525565496,
+ "learning_rate": 1e-05,
+ "loss": 0.039,
+ "step": 377
+ },
+ {
+ "epoch": 0.189,
+ "grad_norm": 0.16084357735487792,
+ "learning_rate": 1e-05,
+ "loss": 0.0312,
+ "step": 378
+ },
+ {
+ "epoch": 0.1895,
+ "grad_norm": 0.19070881724879324,
+ "learning_rate": 1e-05,
+ "loss": 0.0491,
+ "step": 379
+ },
+ {
+ "epoch": 0.19,
+ "grad_norm": 0.17240390839318184,
+ "learning_rate": 1e-05,
+ "loss": 0.0455,
+ "step": 380
+ },
+ {
+ "epoch": 0.1905,
+ "grad_norm": 0.13331277326103189,
+ "learning_rate": 1e-05,
+ "loss": 0.0289,
+ "step": 381
+ },
+ {
+ "epoch": 0.191,
+ "grad_norm": 0.18756729894366522,
+ "learning_rate": 1e-05,
+ "loss": 0.0468,
+ "step": 382
+ },
+ {
+ "epoch": 0.1915,
+ "grad_norm": 0.1660248717735821,
+ "learning_rate": 1e-05,
+ "loss": 0.0424,
+ "step": 383
+ },
+ {
+ "epoch": 0.192,
+ "grad_norm": 0.16346974130070938,
+ "learning_rate": 1e-05,
+ "loss": 0.0311,
+ "step": 384
+ },
+ {
+ "epoch": 0.192,
+ "eval_dev_acc": 0.599609375,
+ "eval_dev_token": 5596.130859375,
+ "eval_runtime": 352.4793,
+ "eval_samples_per_second": 0.182,
+ "eval_steps_per_second": 0.003,
+ "step": 384
+ },
+ {
+ "epoch": 0.1925,
+ "grad_norm": 0.16858862798497806,
+ "learning_rate": 1e-05,
+ "loss": 0.0389,
+ "step": 385
+ },
+ {
+ "epoch": 0.193,
+ "grad_norm": 0.1484958580298565,
+ "learning_rate": 1e-05,
+ "loss": 0.0397,
+ "step": 386
+ },
+ {
+ "epoch": 0.1935,
+ "grad_norm": 0.17660261356555002,
+ "learning_rate": 1e-05,
+ "loss": 0.0515,
+ "step": 387
+ },
+ {
+ "epoch": 0.194,
+ "grad_norm": 0.1783517215939047,
+ "learning_rate": 1e-05,
+ "loss": 0.0431,
+ "step": 388
+ },
+ {
+ "epoch": 0.1945,
+ "grad_norm": 0.14136150090913457,
+ "learning_rate": 1e-05,
+ "loss": 0.0323,
+ "step": 389
+ },
+ {
+ "epoch": 0.195,
+ "grad_norm": 0.16595913921658337,
+ "learning_rate": 1e-05,
+ "loss": 0.0394,
+ "step": 390
+ },
+ {
+ "epoch": 0.1955,
+ "grad_norm": 0.17788297569443248,
+ "learning_rate": 1e-05,
+ "loss": 0.0698,
+ "step": 391
+ },
+ {
+ "epoch": 0.196,
+ "grad_norm": 0.14755167079389797,
+ "learning_rate": 1e-05,
+ "loss": 0.0308,
+ "step": 392
+ },
+ {
+ "epoch": 0.1965,
+ "grad_norm": 0.20681855290430337,
+ "learning_rate": 1e-05,
+ "loss": 0.0494,
+ "step": 393
+ },
+ {
+ "epoch": 0.197,
+ "grad_norm": 0.19060439020439998,
+ "learning_rate": 1e-05,
+ "loss": 0.0445,
+ "step": 394
+ },
+ {
+ "epoch": 0.1975,
+ "grad_norm": 0.17199443698076167,
+ "learning_rate": 1e-05,
+ "loss": 0.0414,
+ "step": 395
+ },
+ {
+ "epoch": 0.198,
+ "grad_norm": 0.15210077373082737,
+ "learning_rate": 1e-05,
+ "loss": 0.0296,
+ "step": 396
+ },
+ {
+ "epoch": 0.1985,
+ "grad_norm": 0.17482591540638856,
+ "learning_rate": 1e-05,
+ "loss": 0.044,
+ "step": 397
+ },
+ {
+ "epoch": 0.199,
+ "grad_norm": 0.15501601608099658,
+ "learning_rate": 1e-05,
+ "loss": 0.0376,
+ "step": 398
+ },
+ {
+ "epoch": 0.1995,
+ "grad_norm": 0.17142493205422682,
+ "learning_rate": 1e-05,
+ "loss": 0.0386,
+ "step": 399
+ },
+ {
+ "epoch": 0.2,
+ "grad_norm": 0.1921162644413309,
+ "learning_rate": 1e-05,
+ "loss": 0.0469,
+ "step": 400
+ },
+ {
+ "epoch": 0.2005,
+ "grad_norm": 0.15938080403417312,
+ "learning_rate": 1e-05,
+ "loss": 0.0496,
+ "step": 401
+ },
+ {
+ "epoch": 0.201,
+ "grad_norm": 0.14786848292294155,
+ "learning_rate": 1e-05,
+ "loss": 0.0426,
+ "step": 402
+ },
+ {
+ "epoch": 0.2015,
+ "grad_norm": 0.18628997533329272,
+ "learning_rate": 1e-05,
+ "loss": 0.0581,
+ "step": 403
+ },
+ {
+ "epoch": 0.202,
+ "grad_norm": 0.16058096254934043,
+ "learning_rate": 1e-05,
+ "loss": 0.0336,
+ "step": 404
+ },
+ {
+ "epoch": 0.2025,
+ "grad_norm": 0.19319024386507233,
+ "learning_rate": 1e-05,
+ "loss": 0.047,
+ "step": 405
+ },
+ {
+ "epoch": 0.203,
+ "grad_norm": 0.17328115011013,
+ "learning_rate": 1e-05,
+ "loss": 0.049,
+ "step": 406
+ },
+ {
+ "epoch": 0.2035,
+ "grad_norm": 0.13258378170371796,
+ "learning_rate": 1e-05,
+ "loss": 0.0286,
+ "step": 407
+ },
+ {
+ "epoch": 0.204,
+ "grad_norm": 0.17945245697241183,
+ "learning_rate": 1e-05,
+ "loss": 0.0518,
+ "step": 408
+ },
+ {
+ "epoch": 0.2045,
+ "grad_norm": 0.16689764407399071,
+ "learning_rate": 1e-05,
+ "loss": 0.0458,
+ "step": 409
+ },
+ {
+ "epoch": 0.205,
+ "grad_norm": 0.18446815699746041,
+ "learning_rate": 1e-05,
+ "loss": 0.0408,
+ "step": 410
+ },
+ {
+ "epoch": 0.2055,
+ "grad_norm": 0.1489326060726689,
+ "learning_rate": 1e-05,
+ "loss": 0.0656,
+ "step": 411
+ },
+ {
+ "epoch": 0.206,
+ "grad_norm": 0.14974593012017515,
+ "learning_rate": 1e-05,
+ "loss": 0.0297,
+ "step": 412
+ },
+ {
+ "epoch": 0.2065,
+ "grad_norm": 0.1918114395748189,
+ "learning_rate": 1e-05,
+ "loss": 0.0439,
+ "step": 413
+ },
+ {
+ "epoch": 0.207,
+ "grad_norm": 0.1689953495305046,
+ "learning_rate": 1e-05,
+ "loss": 0.04,
+ "step": 414
+ },
+ {
+ "epoch": 0.2075,
+ "grad_norm": 0.1403733317703667,
+ "learning_rate": 1e-05,
+ "loss": 0.0472,
+ "step": 415
+ },
+ {
+ "epoch": 0.208,
+ "grad_norm": 0.173982074128614,
+ "learning_rate": 1e-05,
+ "loss": 0.0395,
+ "step": 416
+ },
+ {
+ "epoch": 0.208,
+ "eval_dev_acc": 0.49609375,
+ "eval_dev_token": 5482.95703125,
+ "eval_runtime": 374.469,
+ "eval_samples_per_second": 0.171,
+ "eval_steps_per_second": 0.003,
+ "step": 416
+ },
+ {
+ "epoch": 0.2085,
+ "grad_norm": 0.16717306009352031,
+ "learning_rate": 1e-05,
+ "loss": 0.0369,
+ "step": 417
+ },
+ {
+ "epoch": 0.209,
+ "grad_norm": 0.17317803700896214,
+ "learning_rate": 1e-05,
+ "loss": 0.0581,
+ "step": 418
+ },
+ {
+ "epoch": 0.2095,
+ "grad_norm": 0.14729050118039705,
+ "learning_rate": 1e-05,
+ "loss": 0.0469,
+ "step": 419
+ },
+ {
+ "epoch": 0.21,
+ "grad_norm": 0.14599122830811173,
+ "learning_rate": 1e-05,
+ "loss": 0.0571,
+ "step": 420
+ },
+ {
+ "epoch": 0.2105,
+ "grad_norm": 0.16285142688584706,
+ "learning_rate": 1e-05,
+ "loss": 0.0291,
+ "step": 421
+ },
+ {
+ "epoch": 0.211,
+ "grad_norm": 0.19044973230329837,
+ "learning_rate": 1e-05,
+ "loss": 0.0599,
+ "step": 422
+ },
+ {
+ "epoch": 0.2115,
+ "grad_norm": 0.188861960333507,
+ "learning_rate": 1e-05,
+ "loss": 0.0471,
+ "step": 423
+ },
+ {
+ "epoch": 0.212,
+ "grad_norm": 0.19188548951756218,
+ "learning_rate": 1e-05,
+ "loss": 0.0529,
+ "step": 424
+ },
+ {
+ "epoch": 0.2125,
+ "grad_norm": 0.16267402517673002,
+ "learning_rate": 1e-05,
+ "loss": 0.0305,
+ "step": 425
+ },
+ {
+ "epoch": 0.213,
+ "grad_norm": 0.1447850696130614,
+ "learning_rate": 1e-05,
+ "loss": 0.0324,
+ "step": 426
+ },
+ {
+ "epoch": 0.2135,
+ "grad_norm": 0.15248164794588065,
+ "learning_rate": 1e-05,
+ "loss": 0.0388,
+ "step": 427
+ },
+ {
+ "epoch": 0.214,
+ "grad_norm": 0.1661241871100943,
+ "learning_rate": 1e-05,
+ "loss": 0.0328,
+ "step": 428
+ },
+ {
+ "epoch": 0.2145,
+ "grad_norm": 0.16566625624023265,
+ "learning_rate": 1e-05,
+ "loss": 0.031,
+ "step": 429
+ },
+ {
+ "epoch": 0.215,
+ "grad_norm": 0.15249287061514458,
+ "learning_rate": 1e-05,
+ "loss": 0.0319,
+ "step": 430
+ },
+ {
+ "epoch": 0.2155,
+ "grad_norm": 0.12995530917181783,
+ "learning_rate": 1e-05,
+ "loss": 0.0233,
+ "step": 431
+ },
+ {
+ "epoch": 0.216,
+ "grad_norm": 0.1704276552962093,
+ "learning_rate": 1e-05,
+ "loss": 0.0405,
+ "step": 432
+ },
+ {
+ "epoch": 0.2165,
+ "grad_norm": 0.17386329346754434,
+ "learning_rate": 1e-05,
+ "loss": 0.0336,
+ "step": 433
+ },
+ {
+ "epoch": 0.217,
+ "grad_norm": 0.15704760833763615,
+ "learning_rate": 1e-05,
+ "loss": 0.0325,
+ "step": 434
+ },
+ {
+ "epoch": 0.2175,
+ "grad_norm": 0.1495524799308763,
+ "learning_rate": 1e-05,
+ "loss": 0.0341,
+ "step": 435
+ },
+ {
+ "epoch": 0.218,
+ "grad_norm": 0.1686891909288217,
+ "learning_rate": 1e-05,
+ "loss": 0.0343,
+ "step": 436
+ },
+ {
+ "epoch": 0.2185,
+ "grad_norm": 0.13995459985426573,
+ "learning_rate": 1e-05,
+ "loss": 0.0398,
+ "step": 437
+ },
+ {
+ "epoch": 0.219,
+ "grad_norm": 0.15473569116081692,
+ "learning_rate": 1e-05,
+ "loss": 0.0412,
+ "step": 438
+ },
+ {
+ "epoch": 0.2195,
+ "grad_norm": 0.1801609077983992,
+ "learning_rate": 1e-05,
+ "loss": 0.0534,
+ "step": 439
+ },
+ {
+ "epoch": 0.22,
+ "grad_norm": 0.17809364795872226,
+ "learning_rate": 1e-05,
+ "loss": 0.0548,
+ "step": 440
+ },
+ {
+ "epoch": 0.2205,
+ "grad_norm": 0.1535032114151188,
+ "learning_rate": 1e-05,
+ "loss": 0.0593,
+ "step": 441
+ },
+ {
+ "epoch": 0.221,
+ "grad_norm": 0.20882248884544774,
+ "learning_rate": 1e-05,
+ "loss": 0.0402,
+ "step": 442
+ },
+ {
+ "epoch": 0.2215,
+ "grad_norm": 0.14517381058327564,
+ "learning_rate": 1e-05,
+ "loss": 0.0436,
+ "step": 443
+ },
+ {
+ "epoch": 0.222,
+ "grad_norm": 0.17014179155102424,
+ "learning_rate": 1e-05,
+ "loss": 0.0333,
+ "step": 444
+ },
+ {
+ "epoch": 0.2225,
+ "grad_norm": 0.1729306341614305,
+ "learning_rate": 1e-05,
+ "loss": 0.0301,
+ "step": 445
+ },
+ {
+ "epoch": 0.223,
+ "grad_norm": 0.1686712423851483,
+ "learning_rate": 1e-05,
+ "loss": 0.0406,
+ "step": 446
+ },
+ {
+ "epoch": 0.2235,
+ "grad_norm": 0.1535287640721648,
+ "learning_rate": 1e-05,
+ "loss": 0.0282,
+ "step": 447
+ },
+ {
+ "epoch": 0.224,
+ "grad_norm": 0.1406783148617548,
+ "learning_rate": 1e-05,
+ "loss": 0.0264,
+ "step": 448
+ },
+ {
+ "epoch": 0.224,
+ "eval_dev_acc": 0.576171875,
+ "eval_dev_token": 5738.91015625,
+ "eval_runtime": 360.4892,
+ "eval_samples_per_second": 0.178,
+ "eval_steps_per_second": 0.003,
+ "step": 448
+ },
+ {
+ "epoch": 0.2245,
+ "grad_norm": 0.15218760519443988,
+ "learning_rate": 1e-05,
+ "loss": 0.0227,
+ "step": 449
+ },
+ {
+ "epoch": 0.225,
+ "grad_norm": 0.13165646820927943,
+ "learning_rate": 1e-05,
+ "loss": 0.0244,
+ "step": 450
+ },
+ {
+ "epoch": 0.2255,
+ "grad_norm": 0.13658776890249372,
+ "learning_rate": 1e-05,
+ "loss": 0.0235,
+ "step": 451
+ },
+ {
+ "epoch": 0.226,
+ "grad_norm": 0.1595102880182028,
+ "learning_rate": 1e-05,
+ "loss": 0.0361,
+ "step": 452
+ },
+ {
+ "epoch": 0.2265,
+ "grad_norm": 0.18272272767076744,
+ "learning_rate": 1e-05,
+ "loss": 0.0341,
+ "step": 453
+ },
+ {
+ "epoch": 0.227,
+ "grad_norm": 0.15970250529787045,
+ "learning_rate": 1e-05,
+ "loss": 0.0277,
+ "step": 454
+ },
+ {
+ "epoch": 0.2275,
+ "grad_norm": 0.1641447638351716,
+ "learning_rate": 1e-05,
+ "loss": 0.0284,
+ "step": 455
+ },
+ {
+ "epoch": 0.228,
+ "grad_norm": 0.1294308434226962,
+ "learning_rate": 1e-05,
+ "loss": 0.022,
+ "step": 456
+ },
+ {
+ "epoch": 0.2285,
+ "grad_norm": 0.11954195360401737,
+ "learning_rate": 1e-05,
+ "loss": 0.0202,
+ "step": 457
+ },
+ {
+ "epoch": 0.229,
+ "grad_norm": 0.16068276912989043,
+ "learning_rate": 1e-05,
+ "loss": 0.0251,
+ "step": 458
+ },
+ {
+ "epoch": 0.2295,
+ "grad_norm": 0.15307414897001792,
+ "learning_rate": 1e-05,
+ "loss": 0.027,
+ "step": 459
+ },
+ {
+ "epoch": 0.23,
+ "grad_norm": 0.15979117725555442,
+ "learning_rate": 1e-05,
+ "loss": 0.0375,
+ "step": 460
+ },
+ {
+ "epoch": 0.2305,
+ "grad_norm": 0.2020059964338148,
+ "learning_rate": 1e-05,
+ "loss": 0.043,
+ "step": 461
+ },
+ {
+ "epoch": 0.231,
+ "grad_norm": 0.15074817260440432,
+ "learning_rate": 1e-05,
+ "loss": 0.0241,
+ "step": 462
+ },
+ {
+ "epoch": 0.2315,
+ "grad_norm": 0.16521962645814686,
+ "learning_rate": 1e-05,
+ "loss": 0.0447,
+ "step": 463
+ },
+ {
+ "epoch": 0.232,
+ "grad_norm": 0.14710027259702427,
+ "learning_rate": 1e-05,
+ "loss": 0.0322,
+ "step": 464
+ },
+ {
+ "epoch": 0.2325,
+ "grad_norm": 0.1879875645942318,
+ "learning_rate": 1e-05,
+ "loss": 0.0324,
+ "step": 465
+ },
+ {
+ "epoch": 0.233,
+ "grad_norm": 0.16624112738581265,
+ "learning_rate": 1e-05,
+ "loss": 0.0308,
+ "step": 466
+ },
+ {
+ "epoch": 0.2335,
+ "grad_norm": 0.19050875236463863,
+ "learning_rate": 1e-05,
+ "loss": 0.0342,
+ "step": 467
+ },
+ {
+ "epoch": 0.234,
+ "grad_norm": 0.11998525830480301,
+ "learning_rate": 1e-05,
+ "loss": 0.021,
+ "step": 468
+ },
+ {
+ "epoch": 0.2345,
+ "grad_norm": 0.1656701890014892,
+ "learning_rate": 1e-05,
+ "loss": 0.027,
+ "step": 469
+ },
+ {
+ "epoch": 0.235,
+ "grad_norm": 0.15307753326429366,
+ "learning_rate": 1e-05,
+ "loss": 0.0452,
+ "step": 470
+ },
+ {
+ "epoch": 0.2355,
+ "grad_norm": 0.1897260442883158,
+ "learning_rate": 1e-05,
+ "loss": 0.0245,
+ "step": 471
+ },
+ {
+ "epoch": 0.236,
+ "grad_norm": 0.11931107646228578,
+ "learning_rate": 1e-05,
+ "loss": 0.017,
+ "step": 472
+ },
+ {
+ "epoch": 0.2365,
+ "grad_norm": 0.18498950301005707,
+ "learning_rate": 1e-05,
+ "loss": 0.0342,
+ "step": 473
+ },
+ {
+ "epoch": 0.237,
+ "grad_norm": 0.14355239628818517,
+ "learning_rate": 1e-05,
+ "loss": 0.0221,
+ "step": 474
+ },
+ {
+ "epoch": 0.2375,
+ "grad_norm": 0.16525653932908532,
+ "learning_rate": 1e-05,
+ "loss": 0.029,
+ "step": 475
+ },
+ {
+ "epoch": 0.238,
+ "grad_norm": 0.1518153688638394,
+ "learning_rate": 1e-05,
+ "loss": 0.0267,
+ "step": 476
+ },
+ {
+ "epoch": 0.2385,
+ "grad_norm": 0.15987321641272437,
+ "learning_rate": 1e-05,
+ "loss": 0.0279,
+ "step": 477
+ },
+ {
+ "epoch": 0.239,
+ "grad_norm": 0.1442274823944727,
+ "learning_rate": 1e-05,
+ "loss": 0.0349,
+ "step": 478
+ },
+ {
+ "epoch": 0.2395,
+ "grad_norm": 0.11710766704672448,
+ "learning_rate": 1e-05,
+ "loss": 0.0179,
+ "step": 479
+ },
+ {
+ "epoch": 0.24,
+ "grad_norm": 0.15497604683020938,
+ "learning_rate": 1e-05,
+ "loss": 0.023,
+ "step": 480
+ },
+ {
+ "epoch": 0.24,
+ "eval_dev_acc": 0.544921875,
+ "eval_dev_token": 5840.61328125,
+ "eval_runtime": 373.4708,
+ "eval_samples_per_second": 0.171,
+ "eval_steps_per_second": 0.003,
+ "step": 480
+ },
+ {
+ "epoch": 0.2405,
+ "grad_norm": 0.13492229768745556,
+ "learning_rate": 1e-05,
+ "loss": 0.0205,
+ "step": 481
+ },
+ {
+ "epoch": 0.241,
+ "grad_norm": 0.1704648731314998,
+ "learning_rate": 1e-05,
+ "loss": 0.0353,
+ "step": 482
+ },
+ {
+ "epoch": 0.2415,
+ "grad_norm": 0.1491861836462168,
+ "learning_rate": 1e-05,
+ "loss": 0.0365,
+ "step": 483
+ },
+ {
+ "epoch": 0.242,
+ "grad_norm": 0.17050828891525746,
+ "learning_rate": 1e-05,
+ "loss": 0.0277,
+ "step": 484
+ },
+ {
+ "epoch": 0.2425,
+ "grad_norm": 0.17980691606220936,
+ "learning_rate": 1e-05,
+ "loss": 0.0335,
+ "step": 485
+ },
+ {
+ "epoch": 0.243,
+ "grad_norm": 0.16998825524724584,
+ "learning_rate": 1e-05,
+ "loss": 0.0362,
+ "step": 486
+ },
+ {
+ "epoch": 0.2435,
+ "grad_norm": 0.11641133365996917,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 487
+ },
+ {
+ "epoch": 0.244,
+ "grad_norm": 0.14362674831456992,
+ "learning_rate": 1e-05,
+ "loss": 0.0365,
+ "step": 488
+ },
+ {
+ "epoch": 0.2445,
+ "grad_norm": 0.14488123923452778,
+ "learning_rate": 1e-05,
+ "loss": 0.024,
+ "step": 489
+ },
+ {
+ "epoch": 0.245,
+ "grad_norm": 0.1517003378019991,
+ "learning_rate": 1e-05,
+ "loss": 0.0271,
+ "step": 490
+ },
+ {
+ "epoch": 0.2455,
+ "grad_norm": 0.14967074987714707,
+ "learning_rate": 1e-05,
+ "loss": 0.0294,
+ "step": 491
+ },
+ {
+ "epoch": 0.246,
+ "grad_norm": 0.15791993394836015,
+ "learning_rate": 1e-05,
+ "loss": 0.0283,
+ "step": 492
+ },
+ {
+ "epoch": 0.2465,
+ "grad_norm": 0.13495006239387555,
+ "learning_rate": 1e-05,
+ "loss": 0.0251,
+ "step": 493
+ },
+ {
+ "epoch": 0.247,
+ "grad_norm": 0.18930054102351096,
+ "learning_rate": 1e-05,
+ "loss": 0.0373,
+ "step": 494
+ },
+ {
+ "epoch": 0.2475,
+ "grad_norm": 0.13152234060084034,
+ "learning_rate": 1e-05,
+ "loss": 0.0233,
+ "step": 495
+ },
+ {
+ "epoch": 0.248,
+ "grad_norm": 0.1341531691510106,
+ "learning_rate": 1e-05,
+ "loss": 0.0269,
+ "step": 496
+ },
+ {
+ "epoch": 0.2485,
+ "grad_norm": 0.13741586371551992,
+ "learning_rate": 1e-05,
+ "loss": 0.0277,
+ "step": 497
+ },
+ {
+ "epoch": 0.249,
+ "grad_norm": 0.1554051684617337,
+ "learning_rate": 1e-05,
+ "loss": 0.0276,
+ "step": 498
+ },
+ {
+ "epoch": 0.2495,
+ "grad_norm": 0.14814577647609775,
+ "learning_rate": 1e-05,
+ "loss": 0.0235,
+ "step": 499
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.14930720560226657,
+ "learning_rate": 1e-05,
+ "loss": 0.039,
+ "step": 500
+ },
+ {
+ "epoch": 0.2505,
+ "grad_norm": 0.1244942117603243,
+ "learning_rate": 1e-05,
+ "loss": 0.0202,
+ "step": 501
+ },
+ {
+ "epoch": 0.251,
+ "grad_norm": 0.14244145658079232,
+ "learning_rate": 1e-05,
+ "loss": 0.0209,
+ "step": 502
+ },
+ {
+ "epoch": 0.2515,
+ "grad_norm": 0.146145096145696,
+ "learning_rate": 1e-05,
+ "loss": 0.024,
+ "step": 503
+ },
+ {
+ "epoch": 0.252,
+ "grad_norm": 0.13594585715406687,
+ "learning_rate": 1e-05,
+ "loss": 0.0269,
+ "step": 504
+ },
+ {
+ "epoch": 0.2525,
+ "grad_norm": 0.1490412459954878,
+ "learning_rate": 1e-05,
+ "loss": 0.0345,
+ "step": 505
+ },
+ {
+ "epoch": 0.253,
+ "grad_norm": 0.11950170266380834,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 506
+ },
+ {
+ "epoch": 0.2535,
+ "grad_norm": 0.18548215823845707,
+ "learning_rate": 1e-05,
+ "loss": 0.0275,
+ "step": 507
+ },
+ {
+ "epoch": 0.254,
+ "grad_norm": 0.15108980653404058,
+ "learning_rate": 1e-05,
+ "loss": 0.0197,
+ "step": 508
+ },
+ {
+ "epoch": 0.2545,
+ "grad_norm": 0.16504836098536718,
+ "learning_rate": 1e-05,
+ "loss": 0.0289,
+ "step": 509
+ },
+ {
+ "epoch": 0.255,
+ "grad_norm": 0.15746261920489785,
+ "learning_rate": 1e-05,
+ "loss": 0.0253,
+ "step": 510
+ },
+ {
+ "epoch": 0.2555,
+ "grad_norm": 0.14071771991438595,
+ "learning_rate": 1e-05,
+ "loss": 0.0219,
+ "step": 511
+ },
+ {
+ "epoch": 0.256,
+ "grad_norm": 0.16079872072377113,
+ "learning_rate": 1e-05,
+ "loss": 0.0204,
+ "step": 512
+ },
+ {
+ "epoch": 0.256,
+ "eval_dev_acc": 0.56640625,
+ "eval_dev_token": 5634.1015625,
+ "eval_runtime": 361.9891,
+ "eval_samples_per_second": 0.177,
+ "eval_steps_per_second": 0.003,
+ "step": 512
+ },
+ {
+ "epoch": 0.2565,
+ "grad_norm": 0.13549471484008802,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 513
+ },
+ {
+ "epoch": 0.257,
+ "grad_norm": 0.12072963489745359,
+ "learning_rate": 1e-05,
+ "loss": 0.0302,
+ "step": 514
+ },
+ {
+ "epoch": 0.2575,
+ "grad_norm": 0.14026647684897994,
+ "learning_rate": 1e-05,
+ "loss": 0.0194,
+ "step": 515
+ },
+ {
+ "epoch": 0.258,
+ "grad_norm": 0.1634484411344168,
+ "learning_rate": 1e-05,
+ "loss": 0.0277,
+ "step": 516
+ },
+ {
+ "epoch": 0.2585,
+ "grad_norm": 0.15844211231505426,
+ "learning_rate": 1e-05,
+ "loss": 0.0214,
+ "step": 517
+ },
+ {
+ "epoch": 0.259,
+ "grad_norm": 0.1567910605652928,
+ "learning_rate": 1e-05,
+ "loss": 0.024,
+ "step": 518
+ },
+ {
+ "epoch": 0.2595,
+ "grad_norm": 0.17902606156745304,
+ "learning_rate": 1e-05,
+ "loss": 0.033,
+ "step": 519
+ },
+ {
+ "epoch": 0.26,
+ "grad_norm": 0.12339744538286439,
+ "learning_rate": 1e-05,
+ "loss": 0.0172,
+ "step": 520
+ },
+ {
+ "epoch": 0.2605,
+ "grad_norm": 0.13532209502494125,
+ "learning_rate": 1e-05,
+ "loss": 0.0206,
+ "step": 521
+ },
+ {
+ "epoch": 0.261,
+ "grad_norm": 0.15623082886780087,
+ "learning_rate": 1e-05,
+ "loss": 0.0213,
+ "step": 522
+ },
+ {
+ "epoch": 0.2615,
+ "grad_norm": 0.14428427308597647,
+ "learning_rate": 1e-05,
+ "loss": 0.0201,
+ "step": 523
+ },
+ {
+ "epoch": 0.262,
+ "grad_norm": 0.14835567545470982,
+ "learning_rate": 1e-05,
+ "loss": 0.0244,
+ "step": 524
+ },
+ {
+ "epoch": 0.2625,
+ "grad_norm": 0.14068070672711747,
+ "learning_rate": 1e-05,
+ "loss": 0.0239,
+ "step": 525
+ },
+ {
+ "epoch": 0.263,
+ "grad_norm": 0.1460843289248216,
+ "learning_rate": 1e-05,
+ "loss": 0.0223,
+ "step": 526
+ },
+ {
+ "epoch": 0.2635,
+ "grad_norm": 0.13777430449621855,
+ "learning_rate": 1e-05,
+ "loss": 0.0229,
+ "step": 527
+ },
+ {
+ "epoch": 0.264,
+ "grad_norm": 0.15161607294549337,
+ "learning_rate": 1e-05,
+ "loss": 0.0272,
+ "step": 528
+ },
+ {
+ "epoch": 0.2645,
+ "grad_norm": 0.13410519048089503,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 529
+ },
+ {
+ "epoch": 0.265,
+ "grad_norm": 0.15931617673254456,
+ "learning_rate": 1e-05,
+ "loss": 0.0244,
+ "step": 530
+ },
+ {
+ "epoch": 0.2655,
+ "grad_norm": 0.1410700523457689,
+ "learning_rate": 1e-05,
+ "loss": 0.0251,
+ "step": 531
+ },
+ {
+ "epoch": 0.266,
+ "grad_norm": 0.11388951846034073,
+ "learning_rate": 1e-05,
+ "loss": 0.0144,
+ "step": 532
+ },
+ {
+ "epoch": 0.2665,
+ "grad_norm": 0.12253780956369799,
+ "learning_rate": 1e-05,
+ "loss": 0.0177,
+ "step": 533
+ },
+ {
+ "epoch": 0.267,
+ "grad_norm": 0.15575473599510573,
+ "learning_rate": 1e-05,
+ "loss": 0.0192,
+ "step": 534
+ },
+ {
+ "epoch": 0.2675,
+ "grad_norm": 0.14690747155640696,
+ "learning_rate": 1e-05,
+ "loss": 0.0222,
+ "step": 535
+ },
+ {
+ "epoch": 0.268,
+ "grad_norm": 0.13584546405544728,
+ "learning_rate": 1e-05,
+ "loss": 0.0237,
+ "step": 536
+ },
+ {
+ "epoch": 0.2685,
+ "grad_norm": 0.13430763220790742,
+ "learning_rate": 1e-05,
+ "loss": 0.0291,
+ "step": 537
+ },
+ {
+ "epoch": 0.269,
+ "grad_norm": 0.14208572873353734,
+ "learning_rate": 1e-05,
+ "loss": 0.0187,
+ "step": 538
+ },
+ {
+ "epoch": 0.2695,
+ "grad_norm": 0.14058928149963162,
+ "learning_rate": 1e-05,
+ "loss": 0.0199,
+ "step": 539
+ },
+ {
+ "epoch": 0.27,
+ "grad_norm": 0.15100703501541832,
+ "learning_rate": 1e-05,
+ "loss": 0.0348,
+ "step": 540
+ },
+ {
+ "epoch": 0.2705,
+ "grad_norm": 0.12269452397268416,
+ "learning_rate": 1e-05,
+ "loss": 0.0247,
+ "step": 541
+ },
+ {
+ "epoch": 0.271,
+ "grad_norm": 0.1364796501674048,
+ "learning_rate": 1e-05,
+ "loss": 0.0227,
+ "step": 542
+ },
+ {
+ "epoch": 0.2715,
+ "grad_norm": 0.13163932605554884,
+ "learning_rate": 1e-05,
+ "loss": 0.0262,
+ "step": 543
+ },
+ {
+ "epoch": 0.272,
+ "grad_norm": 0.13497428740182482,
+ "learning_rate": 1e-05,
+ "loss": 0.0206,
+ "step": 544
+ },
+ {
+ "epoch": 0.272,
+ "eval_dev_acc": 0.59765625,
+ "eval_dev_token": 5639.98828125,
+ "eval_runtime": 359.2369,
+ "eval_samples_per_second": 0.178,
+ "eval_steps_per_second": 0.003,
+ "step": 544
+ },
+ {
+ "epoch": 0.2725,
+ "grad_norm": 0.13930735859181714,
+ "learning_rate": 1e-05,
+ "loss": 0.0234,
+ "step": 545
+ },
+ {
+ "epoch": 0.273,
+ "grad_norm": 0.11985280096835198,
+ "learning_rate": 1e-05,
+ "loss": 0.0269,
+ "step": 546
+ },
+ {
+ "epoch": 0.2735,
+ "grad_norm": 0.17031723198491708,
+ "learning_rate": 1e-05,
+ "loss": 0.028,
+ "step": 547
+ },
+ {
+ "epoch": 0.274,
+ "grad_norm": 0.17166197772315975,
+ "learning_rate": 1e-05,
+ "loss": 0.0281,
+ "step": 548
+ },
+ {
+ "epoch": 0.2745,
+ "grad_norm": 0.1167335581681914,
+ "learning_rate": 1e-05,
+ "loss": 0.022,
+ "step": 549
+ },
+ {
+ "epoch": 0.275,
+ "grad_norm": 0.1443441971157384,
+ "learning_rate": 1e-05,
+ "loss": 0.0198,
+ "step": 550
+ },
+ {
+ "epoch": 0.2755,
+ "grad_norm": 0.1268787923602722,
+ "learning_rate": 1e-05,
+ "loss": 0.017,
+ "step": 551
+ },
+ {
+ "epoch": 0.276,
+ "grad_norm": 0.11065296478824395,
+ "learning_rate": 1e-05,
+ "loss": 0.0157,
+ "step": 552
+ },
+ {
+ "epoch": 0.2765,
+ "grad_norm": 0.12047907824944362,
+ "learning_rate": 1e-05,
+ "loss": 0.016,
+ "step": 553
+ },
+ {
+ "epoch": 0.277,
+ "grad_norm": 0.13956303855472266,
+ "learning_rate": 1e-05,
+ "loss": 0.0216,
+ "step": 554
+ },
+ {
+ "epoch": 0.2775,
+ "grad_norm": 0.10533407777378404,
+ "learning_rate": 1e-05,
+ "loss": 0.0137,
+ "step": 555
+ },
+ {
+ "epoch": 0.278,
+ "grad_norm": 0.11532013491755984,
+ "learning_rate": 1e-05,
+ "loss": 0.0282,
+ "step": 556
+ },
+ {
+ "epoch": 0.2785,
+ "grad_norm": 0.11921463919727264,
+ "learning_rate": 1e-05,
+ "loss": 0.0163,
+ "step": 557
+ },
+ {
+ "epoch": 0.279,
+ "grad_norm": 0.15645731769207732,
+ "learning_rate": 1e-05,
+ "loss": 0.0241,
+ "step": 558
+ },
+ {
+ "epoch": 0.2795,
+ "grad_norm": 0.12096274696840706,
+ "learning_rate": 1e-05,
+ "loss": 0.0167,
+ "step": 559
+ },
+ {
+ "epoch": 0.28,
+ "grad_norm": 0.149157783124579,
+ "learning_rate": 1e-05,
+ "loss": 0.0193,
+ "step": 560
+ },
+ {
+ "epoch": 0.2805,
+ "grad_norm": 0.16982490839988412,
+ "learning_rate": 1e-05,
+ "loss": 0.0283,
+ "step": 561
+ },
+ {
+ "epoch": 0.281,
+ "grad_norm": 0.12038107977310454,
+ "learning_rate": 1e-05,
+ "loss": 0.0154,
+ "step": 562
+ },
+ {
+ "epoch": 0.2815,
+ "grad_norm": 0.16469919524412158,
+ "learning_rate": 1e-05,
+ "loss": 0.0214,
+ "step": 563
+ },
+ {
+ "epoch": 0.282,
+ "grad_norm": 0.15827423056846177,
+ "learning_rate": 1e-05,
+ "loss": 0.0216,
+ "step": 564
+ },
+ {
+ "epoch": 0.2825,
+ "grad_norm": 0.12058245559465251,
+ "learning_rate": 1e-05,
+ "loss": 0.0141,
+ "step": 565
+ },
+ {
+ "epoch": 0.283,
+ "grad_norm": 0.163789727088167,
+ "learning_rate": 1e-05,
+ "loss": 0.0241,
+ "step": 566
+ },
+ {
+ "epoch": 0.2835,
+ "grad_norm": 0.1390884369932456,
+ "learning_rate": 1e-05,
+ "loss": 0.0221,
+ "step": 567
+ },
+ {
+ "epoch": 0.284,
+ "grad_norm": 0.14472941005878595,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 568
+ },
+ {
+ "epoch": 0.2845,
+ "grad_norm": 0.15437454042645973,
+ "learning_rate": 1e-05,
+ "loss": 0.024,
+ "step": 569
+ },
+ {
+ "epoch": 0.285,
+ "grad_norm": 0.1207487307624573,
+ "learning_rate": 1e-05,
+ "loss": 0.0172,
+ "step": 570
+ },
+ {
+ "epoch": 0.2855,
+ "grad_norm": 0.1502409849611173,
+ "learning_rate": 1e-05,
+ "loss": 0.0298,
+ "step": 571
+ },
+ {
+ "epoch": 0.286,
+ "grad_norm": 0.16401355690597133,
+ "learning_rate": 1e-05,
+ "loss": 0.0225,
+ "step": 572
+ },
+ {
+ "epoch": 0.2865,
+ "grad_norm": 0.15181464752177645,
+ "learning_rate": 1e-05,
+ "loss": 0.0189,
+ "step": 573
+ },
+ {
+ "epoch": 0.287,
+ "grad_norm": 0.14560432645081878,
+ "learning_rate": 1e-05,
+ "loss": 0.0186,
+ "step": 574
+ },
+ {
+ "epoch": 0.2875,
+ "grad_norm": 0.12603042660981642,
+ "learning_rate": 1e-05,
+ "loss": 0.0155,
+ "step": 575
+ },
+ {
+ "epoch": 0.288,
+ "grad_norm": 0.11638577111126014,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 576
+ },
+ {
+ "epoch": 0.288,
+ "eval_dev_acc": 0.54296875,
+ "eval_dev_token": 5801.126953125,
+ "eval_runtime": 373.1703,
+ "eval_samples_per_second": 0.172,
+ "eval_steps_per_second": 0.003,
+ "step": 576
+ },
+ {
+ "epoch": 0.2885,
+ "grad_norm": 0.13420942083968396,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 577
+ },
+ {
+ "epoch": 0.289,
+ "grad_norm": 0.12122809121871923,
+ "learning_rate": 1e-05,
+ "loss": 0.0134,
+ "step": 578
+ },
+ {
+ "epoch": 0.2895,
+ "grad_norm": 0.13114866603642533,
+ "learning_rate": 1e-05,
+ "loss": 0.0174,
+ "step": 579
+ },
+ {
+ "epoch": 0.29,
+ "grad_norm": 0.1498609312158644,
+ "learning_rate": 1e-05,
+ "loss": 0.0214,
+ "step": 580
+ },
+ {
+ "epoch": 0.2905,
+ "grad_norm": 0.1527812218308566,
+ "learning_rate": 1e-05,
+ "loss": 0.0194,
+ "step": 581
+ },
+ {
+ "epoch": 0.291,
+ "grad_norm": 0.14711876695343454,
+ "learning_rate": 1e-05,
+ "loss": 0.018,
+ "step": 582
+ },
+ {
+ "epoch": 0.2915,
+ "grad_norm": 0.11529220604038168,
+ "learning_rate": 1e-05,
+ "loss": 0.0144,
+ "step": 583
+ },
+ {
+ "epoch": 0.292,
+ "grad_norm": 0.16180671831014115,
+ "learning_rate": 1e-05,
+ "loss": 0.0165,
+ "step": 584
+ },
+ {
+ "epoch": 0.2925,
+ "grad_norm": 0.13621545825638848,
+ "learning_rate": 1e-05,
+ "loss": 0.015,
+ "step": 585
+ },
+ {
+ "epoch": 0.293,
+ "grad_norm": 0.15473239935591382,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 586
+ },
+ {
+ "epoch": 0.2935,
+ "grad_norm": 0.15716799171541335,
+ "learning_rate": 1e-05,
+ "loss": 0.0194,
+ "step": 587
+ },
+ {
+ "epoch": 0.294,
+ "grad_norm": 0.1684941322847538,
+ "learning_rate": 1e-05,
+ "loss": 0.022,
+ "step": 588
+ },
+ {
+ "epoch": 0.2945,
+ "grad_norm": 0.15453918821249785,
+ "learning_rate": 1e-05,
+ "loss": 0.0188,
+ "step": 589
+ },
+ {
+ "epoch": 0.295,
+ "grad_norm": 0.140163345657633,
+ "learning_rate": 1e-05,
+ "loss": 0.0208,
+ "step": 590
+ },
+ {
+ "epoch": 0.2955,
+ "grad_norm": 0.15010258665645038,
+ "learning_rate": 1e-05,
+ "loss": 0.0215,
+ "step": 591
+ },
+ {
+ "epoch": 0.296,
+ "grad_norm": 0.14661643221841641,
+ "learning_rate": 1e-05,
+ "loss": 0.0191,
+ "step": 592
+ },
+ {
+ "epoch": 0.2965,
+ "grad_norm": 0.15435066476462508,
+ "learning_rate": 1e-05,
+ "loss": 0.0231,
+ "step": 593
+ },
+ {
+ "epoch": 0.297,
+ "grad_norm": 0.17094702806791945,
+ "learning_rate": 1e-05,
+ "loss": 0.0251,
+ "step": 594
+ },
+ {
+ "epoch": 0.2975,
+ "grad_norm": 0.1371139566901347,
+ "learning_rate": 1e-05,
+ "loss": 0.0186,
+ "step": 595
+ },
+ {
+ "epoch": 0.298,
+ "grad_norm": 0.11779673830033237,
+ "learning_rate": 1e-05,
+ "loss": 0.0269,
+ "step": 596
+ },
+ {
+ "epoch": 0.2985,
+ "grad_norm": 0.11853976704548681,
+ "learning_rate": 1e-05,
+ "loss": 0.0154,
+ "step": 597
+ },
+ {
+ "epoch": 0.299,
+ "grad_norm": 0.14881574569113099,
+ "learning_rate": 1e-05,
+ "loss": 0.0246,
+ "step": 598
+ },
+ {
+ "epoch": 0.2995,
+ "grad_norm": 0.11792409287393274,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 599
+ },
+ {
+ "epoch": 0.3,
+ "grad_norm": 0.13831559531762572,
+ "learning_rate": 1e-05,
+ "loss": 0.0257,
+ "step": 600
+ },
+ {
+ "epoch": 0.3005,
+ "grad_norm": 0.13756632720301187,
+ "learning_rate": 1e-05,
+ "loss": 0.0214,
+ "step": 601
+ },
+ {
+ "epoch": 0.301,
+ "grad_norm": 0.10998907458045305,
+ "learning_rate": 1e-05,
+ "loss": 0.015,
+ "step": 602
+ },
+ {
+ "epoch": 0.3015,
+ "grad_norm": 0.135955562101373,
+ "learning_rate": 1e-05,
+ "loss": 0.0211,
+ "step": 603
+ },
+ {
+ "epoch": 0.302,
+ "grad_norm": 0.1214956422000124,
+ "learning_rate": 1e-05,
+ "loss": 0.0219,
+ "step": 604
+ },
+ {
+ "epoch": 0.3025,
+ "grad_norm": 0.15757702522309963,
+ "learning_rate": 1e-05,
+ "loss": 0.018,
+ "step": 605
+ },
+ {
+ "epoch": 0.303,
+ "grad_norm": 0.1350858708023801,
+ "learning_rate": 1e-05,
+ "loss": 0.0211,
+ "step": 606
+ },
+ {
+ "epoch": 0.3035,
+ "grad_norm": 0.10610433140412452,
+ "learning_rate": 1e-05,
+ "loss": 0.0147,
+ "step": 607
+ },
+ {
+ "epoch": 0.304,
+ "grad_norm": 0.11514647079357257,
+ "learning_rate": 1e-05,
+ "loss": 0.0192,
+ "step": 608
+ },
+ {
+ "epoch": 0.304,
+ "eval_dev_acc": 0.62109375,
+ "eval_dev_token": 5367.916015625,
+ "eval_runtime": 354.8095,
+ "eval_samples_per_second": 0.18,
+ "eval_steps_per_second": 0.003,
+ "step": 608
+ },
+ {
+ "epoch": 0.3045,
+ "grad_norm": 0.12603567923188372,
+ "learning_rate": 1e-05,
+ "loss": 0.0188,
+ "step": 609
+ },
+ {
+ "epoch": 0.305,
+ "grad_norm": 0.14277125754270012,
+ "learning_rate": 1e-05,
+ "loss": 0.022,
+ "step": 610
+ },
+ {
+ "epoch": 0.3055,
+ "grad_norm": 0.12862855181841676,
+ "learning_rate": 1e-05,
+ "loss": 0.021,
+ "step": 611
+ },
+ {
+ "epoch": 0.306,
+ "grad_norm": 0.1227953424403543,
+ "learning_rate": 1e-05,
+ "loss": 0.018,
+ "step": 612
+ },
+ {
+ "epoch": 0.3065,
+ "grad_norm": 0.11646820367498804,
+ "learning_rate": 1e-05,
+ "loss": 0.0131,
+ "step": 613
+ },
+ {
+ "epoch": 0.307,
+ "grad_norm": 0.14701145754992329,
+ "learning_rate": 1e-05,
+ "loss": 0.0186,
+ "step": 614
+ },
+ {
+ "epoch": 0.3075,
+ "grad_norm": 0.1493073818813876,
+ "learning_rate": 1e-05,
+ "loss": 0.0254,
+ "step": 615
+ },
+ {
+ "epoch": 0.308,
+ "grad_norm": 0.1352952895732537,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 616
+ },
+ {
+ "epoch": 0.3085,
+ "grad_norm": 0.13007743097982305,
+ "learning_rate": 1e-05,
+ "loss": 0.0171,
+ "step": 617
+ },
+ {
+ "epoch": 0.309,
+ "grad_norm": 0.1665432351262121,
+ "learning_rate": 1e-05,
+ "loss": 0.0228,
+ "step": 618
+ },
+ {
+ "epoch": 0.3095,
+ "grad_norm": 0.16442931730443322,
+ "learning_rate": 1e-05,
+ "loss": 0.0238,
+ "step": 619
+ },
+ {
+ "epoch": 0.31,
+ "grad_norm": 0.16320986192220768,
+ "learning_rate": 1e-05,
+ "loss": 0.0238,
+ "step": 620
+ },
+ {
+ "epoch": 0.3105,
+ "grad_norm": 0.13880254871235365,
+ "learning_rate": 1e-05,
+ "loss": 0.0179,
+ "step": 621
+ },
+ {
+ "epoch": 0.311,
+ "grad_norm": 0.13609379700738453,
+ "learning_rate": 1e-05,
+ "loss": 0.0195,
+ "step": 622
+ },
+ {
+ "epoch": 0.3115,
+ "grad_norm": 0.1368415516519621,
+ "learning_rate": 1e-05,
+ "loss": 0.024,
+ "step": 623
+ },
+ {
+ "epoch": 0.312,
+ "grad_norm": 0.12821586481120512,
+ "learning_rate": 1e-05,
+ "loss": 0.0191,
+ "step": 624
+ },
+ {
+ "epoch": 0.3125,
+ "grad_norm": 0.13644927854222083,
+ "learning_rate": 1e-05,
+ "loss": 0.0154,
+ "step": 625
+ },
+ {
+ "epoch": 0.313,
+ "grad_norm": 0.15158164143556496,
+ "learning_rate": 1e-05,
+ "loss": 0.0318,
+ "step": 626
+ },
+ {
+ "epoch": 0.3135,
+ "grad_norm": 0.12404550422721679,
+ "learning_rate": 1e-05,
+ "loss": 0.0202,
+ "step": 627
+ },
+ {
+ "epoch": 0.314,
+ "grad_norm": 0.1235074023832298,
+ "learning_rate": 1e-05,
+ "loss": 0.0167,
+ "step": 628
+ },
+ {
+ "epoch": 0.3145,
+ "grad_norm": 0.16094487436899907,
+ "learning_rate": 1e-05,
+ "loss": 0.0227,
+ "step": 629
+ },
+ {
+ "epoch": 0.315,
+ "grad_norm": 0.11086598912590964,
+ "learning_rate": 1e-05,
+ "loss": 0.0158,
+ "step": 630
+ },
+ {
+ "epoch": 0.3155,
+ "grad_norm": 0.1147741974179167,
+ "learning_rate": 1e-05,
+ "loss": 0.0136,
+ "step": 631
+ },
+ {
+ "epoch": 0.316,
+ "grad_norm": 0.12346095617438974,
+ "learning_rate": 1e-05,
+ "loss": 0.017,
+ "step": 632
+ },
+ {
+ "epoch": 0.3165,
+ "grad_norm": 0.1235267138232638,
+ "learning_rate": 1e-05,
+ "loss": 0.0131,
+ "step": 633
+ },
+ {
+ "epoch": 0.317,
+ "grad_norm": 0.11979162262432065,
+ "learning_rate": 1e-05,
+ "loss": 0.0194,
+ "step": 634
+ },
+ {
+ "epoch": 0.3175,
+ "grad_norm": 0.12253729986288973,
+ "learning_rate": 1e-05,
+ "loss": 0.0205,
+ "step": 635
+ },
+ {
+ "epoch": 0.318,
+ "grad_norm": 0.1374736081434109,
+ "learning_rate": 1e-05,
+ "loss": 0.0207,
+ "step": 636
+ },
+ {
+ "epoch": 0.3185,
+ "grad_norm": 0.11667911740285354,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 637
+ },
+ {
+ "epoch": 0.319,
+ "grad_norm": 0.13725799823509804,
+ "learning_rate": 1e-05,
+ "loss": 0.0177,
+ "step": 638
+ },
+ {
+ "epoch": 0.3195,
+ "grad_norm": 0.1461325036101512,
+ "learning_rate": 1e-05,
+ "loss": 0.0233,
+ "step": 639
+ },
+ {
+ "epoch": 0.32,
+ "grad_norm": 0.1486586288765987,
+ "learning_rate": 1e-05,
+ "loss": 0.022,
+ "step": 640
+ },
+ {
+ "epoch": 0.32,
+ "eval_dev_acc": 0.52734375,
+ "eval_dev_token": 5585.80859375,
+ "eval_runtime": 360.5581,
+ "eval_samples_per_second": 0.178,
+ "eval_steps_per_second": 0.003,
+ "step": 640
+ },
+ {
+ "epoch": 0.3205,
+ "grad_norm": 0.13037027619165104,
+ "learning_rate": 1e-05,
+ "loss": 0.0181,
+ "step": 641
+ },
+ {
+ "epoch": 0.321,
+ "grad_norm": 0.1384777662387777,
+ "learning_rate": 1e-05,
+ "loss": 0.0284,
+ "step": 642
+ },
+ {
+ "epoch": 0.3215,
+ "grad_norm": 0.11731142200376247,
+ "learning_rate": 1e-05,
+ "loss": 0.0136,
+ "step": 643
+ },
+ {
+ "epoch": 0.322,
+ "grad_norm": 0.13199000719975476,
+ "learning_rate": 1e-05,
+ "loss": 0.0147,
+ "step": 644
+ },
+ {
+ "epoch": 0.3225,
+ "grad_norm": 0.145679314202878,
+ "learning_rate": 1e-05,
+ "loss": 0.0227,
+ "step": 645
+ },
+ {
+ "epoch": 0.323,
+ "grad_norm": 0.13813521110883425,
+ "learning_rate": 1e-05,
+ "loss": 0.0173,
+ "step": 646
+ },
+ {
+ "epoch": 0.3235,
+ "grad_norm": 0.11216370610734963,
+ "learning_rate": 1e-05,
+ "loss": 0.0125,
+ "step": 647
+ },
+ {
+ "epoch": 0.324,
+ "grad_norm": 0.09898218700430327,
+ "learning_rate": 1e-05,
+ "loss": 0.0165,
+ "step": 648
+ },
+ {
+ "epoch": 0.3245,
+ "grad_norm": 0.13555813085878698,
+ "learning_rate": 1e-05,
+ "loss": 0.0163,
+ "step": 649
+ },
+ {
+ "epoch": 0.325,
+ "grad_norm": 0.11552480540546263,
+ "learning_rate": 1e-05,
+ "loss": 0.0161,
+ "step": 650
+ },
+ {
+ "epoch": 0.3255,
+ "grad_norm": 0.12028253181729011,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 651
+ },
+ {
+ "epoch": 0.326,
+ "grad_norm": 0.11773491790468957,
+ "learning_rate": 1e-05,
+ "loss": 0.0143,
+ "step": 652
+ },
+ {
+ "epoch": 0.3265,
+ "grad_norm": 0.11052002031571376,
+ "learning_rate": 1e-05,
+ "loss": 0.0177,
+ "step": 653
+ },
+ {
+ "epoch": 0.327,
+ "grad_norm": 0.1251016102612686,
+ "learning_rate": 1e-05,
+ "loss": 0.0134,
+ "step": 654
+ },
+ {
+ "epoch": 0.3275,
+ "grad_norm": 0.13329112874236815,
+ "learning_rate": 1e-05,
+ "loss": 0.0148,
+ "step": 655
+ },
+ {
+ "epoch": 0.328,
+ "grad_norm": 0.11382501473889628,
+ "learning_rate": 1e-05,
+ "loss": 0.0145,
+ "step": 656
+ },
+ {
+ "epoch": 0.3285,
+ "grad_norm": 0.1319495587226548,
+ "learning_rate": 1e-05,
+ "loss": 0.0139,
+ "step": 657
+ },
+ {
+ "epoch": 0.329,
+ "grad_norm": 0.12070785233460224,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 658
+ },
+ {
+ "epoch": 0.3295,
+ "grad_norm": 0.12142680030465443,
+ "learning_rate": 1e-05,
+ "loss": 0.0159,
+ "step": 659
+ },
+ {
+ "epoch": 0.33,
+ "grad_norm": 0.12182666413282645,
+ "learning_rate": 1e-05,
+ "loss": 0.0114,
+ "step": 660
+ },
+ {
+ "epoch": 0.3305,
+ "grad_norm": 0.13190056217824564,
+ "learning_rate": 1e-05,
+ "loss": 0.0163,
+ "step": 661
+ },
+ {
+ "epoch": 0.331,
+ "grad_norm": 0.12804397999806036,
+ "learning_rate": 1e-05,
+ "loss": 0.016,
+ "step": 662
+ },
+ {
+ "epoch": 0.3315,
+ "grad_norm": 0.1461414693983946,
+ "learning_rate": 1e-05,
+ "loss": 0.0179,
+ "step": 663
+ },
+ {
+ "epoch": 0.332,
+ "grad_norm": 0.1467248720157808,
+ "learning_rate": 1e-05,
+ "loss": 0.0171,
+ "step": 664
+ },
+ {
+ "epoch": 0.3325,
+ "grad_norm": 0.1393703645756879,
+ "learning_rate": 1e-05,
+ "loss": 0.017,
+ "step": 665
+ },
+ {
+ "epoch": 0.333,
+ "grad_norm": 0.1677851073130961,
+ "learning_rate": 1e-05,
+ "loss": 0.033,
+ "step": 666
+ },
+ {
+ "epoch": 0.3335,
+ "grad_norm": 0.13245384270982133,
+ "learning_rate": 1e-05,
+ "loss": 0.0163,
+ "step": 667
+ },
+ {
+ "epoch": 0.334,
+ "grad_norm": 0.12289205645457782,
+ "learning_rate": 1e-05,
+ "loss": 0.015,
+ "step": 668
+ },
+ {
+ "epoch": 0.3345,
+ "grad_norm": 0.13290806824539741,
+ "learning_rate": 1e-05,
+ "loss": 0.0131,
+ "step": 669
+ },
+ {
+ "epoch": 0.335,
+ "grad_norm": 0.12256221521384754,
+ "learning_rate": 1e-05,
+ "loss": 0.0138,
+ "step": 670
+ },
+ {
+ "epoch": 0.3355,
+ "grad_norm": 0.11871572464334247,
+ "learning_rate": 1e-05,
+ "loss": 0.0156,
+ "step": 671
+ },
+ {
+ "epoch": 0.336,
+ "grad_norm": 0.1425273432040885,
+ "learning_rate": 1e-05,
+ "loss": 0.0132,
+ "step": 672
+ },
+ {
+ "epoch": 0.336,
+ "eval_dev_acc": 0.5625,
+ "eval_dev_token": 5492.04296875,
+ "eval_runtime": 348.043,
+ "eval_samples_per_second": 0.184,
+ "eval_steps_per_second": 0.003,
+ "step": 672
+ },
+ {
+ "epoch": 0.3365,
+ "grad_norm": 0.12612838576017849,
+ "learning_rate": 1e-05,
+ "loss": 0.0127,
+ "step": 673
+ },
+ {
+ "epoch": 0.337,
+ "grad_norm": 0.1294118778329942,
+ "learning_rate": 1e-05,
+ "loss": 0.0161,
+ "step": 674
+ },
+ {
+ "epoch": 0.3375,
+ "grad_norm": 0.11771619437889824,
+ "learning_rate": 1e-05,
+ "loss": 0.0142,
+ "step": 675
+ },
+ {
+ "epoch": 0.338,
+ "grad_norm": 0.13434389137713848,
+ "learning_rate": 1e-05,
+ "loss": 0.0157,
+ "step": 676
+ },
+ {
+ "epoch": 0.3385,
+ "grad_norm": 0.2579148520419769,
+ "learning_rate": 1e-05,
+ "loss": 0.0322,
+ "step": 677
+ },
+ {
+ "epoch": 0.339,
+ "grad_norm": 0.12876645046050272,
+ "learning_rate": 1e-05,
+ "loss": 0.0127,
+ "step": 678
+ },
+ {
+ "epoch": 0.3395,
+ "grad_norm": 0.12336988858308351,
+ "learning_rate": 1e-05,
+ "loss": 0.0114,
+ "step": 679
+ },
+ {
+ "epoch": 0.34,
+ "grad_norm": 0.11038801232074134,
+ "learning_rate": 1e-05,
+ "loss": 0.0112,
+ "step": 680
+ },
+ {
+ "epoch": 0.3405,
+ "grad_norm": 0.13782079916676085,
+ "learning_rate": 1e-05,
+ "loss": 0.0156,
+ "step": 681
+ },
+ {
+ "epoch": 0.341,
+ "grad_norm": 0.12593807949317973,
+ "learning_rate": 1e-05,
+ "loss": 0.0179,
+ "step": 682
+ },
+ {
+ "epoch": 0.3415,
+ "grad_norm": 0.13416291611922937,
+ "learning_rate": 1e-05,
+ "loss": 0.0167,
+ "step": 683
+ },
+ {
+ "epoch": 0.342,
+ "grad_norm": 0.14107509427243767,
+ "learning_rate": 1e-05,
+ "loss": 0.0182,
+ "step": 684
+ },
+ {
+ "epoch": 0.3425,
+ "grad_norm": 0.13829290958101634,
+ "learning_rate": 1e-05,
+ "loss": 0.0133,
+ "step": 685
+ },
+ {
+ "epoch": 0.343,
+ "grad_norm": 0.12072602797225344,
+ "learning_rate": 1e-05,
+ "loss": 0.0135,
+ "step": 686
+ },
+ {
+ "epoch": 0.3435,
+ "grad_norm": 0.09808985286323638,
+ "learning_rate": 1e-05,
+ "loss": 0.0109,
+ "step": 687
+ },
+ {
+ "epoch": 0.344,
+ "grad_norm": 0.1237155662701831,
+ "learning_rate": 1e-05,
+ "loss": 0.0137,
+ "step": 688
+ },
+ {
+ "epoch": 0.3445,
+ "grad_norm": 0.11512509686864711,
+ "learning_rate": 1e-05,
+ "loss": 0.0131,
+ "step": 689
+ },
+ {
+ "epoch": 0.345,
+ "grad_norm": 0.10310189822258317,
+ "learning_rate": 1e-05,
+ "loss": 0.011,
+ "step": 690
+ },
+ {
+ "epoch": 0.3455,
+ "grad_norm": 0.17008589258309467,
+ "learning_rate": 1e-05,
+ "loss": 0.0219,
+ "step": 691
+ },
+ {
+ "epoch": 0.346,
+ "grad_norm": 0.12175425158539896,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 692
+ },
+ {
+ "epoch": 0.3465,
+ "grad_norm": 0.11242731302801981,
+ "learning_rate": 1e-05,
+ "loss": 0.0142,
+ "step": 693
+ },
+ {
+ "epoch": 0.347,
+ "grad_norm": 0.13975508336225442,
+ "learning_rate": 1e-05,
+ "loss": 0.0149,
+ "step": 694
+ },
+ {
+ "epoch": 0.3475,
+ "grad_norm": 0.08820258618918772,
+ "learning_rate": 1e-05,
+ "loss": 0.008,
+ "step": 695
+ },
+ {
+ "epoch": 0.348,
+ "grad_norm": 0.1343553646574964,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 696
+ },
+ {
+ "epoch": 0.3485,
+ "grad_norm": 0.13274844121268298,
+ "learning_rate": 1e-05,
+ "loss": 0.0143,
+ "step": 697
+ },
+ {
+ "epoch": 0.349,
+ "grad_norm": 0.1412865550899799,
+ "learning_rate": 1e-05,
+ "loss": 0.0164,
+ "step": 698
+ },
+ {
+ "epoch": 0.3495,
+ "grad_norm": 0.1471608978417448,
+ "learning_rate": 1e-05,
+ "loss": 0.0144,
+ "step": 699
+ },
+ {
+ "epoch": 0.35,
+ "grad_norm": 0.13861936123681107,
+ "learning_rate": 1e-05,
+ "loss": 0.0151,
+ "step": 700
+ },
+ {
+ "epoch": 0.3505,
+ "grad_norm": 0.11790173787959472,
+ "learning_rate": 1e-05,
+ "loss": 0.0252,
+ "step": 701
+ },
+ {
+ "epoch": 0.351,
+ "grad_norm": 0.12166501219045824,
+ "learning_rate": 1e-05,
+ "loss": 0.0145,
+ "step": 702
+ },
+ {
+ "epoch": 0.3515,
+ "grad_norm": 0.1082871078184047,
+ "learning_rate": 1e-05,
+ "loss": 0.0095,
+ "step": 703
+ },
+ {
+ "epoch": 0.352,
+ "grad_norm": 0.12226846848831563,
+ "learning_rate": 1e-05,
+ "loss": 0.0141,
+ "step": 704
+ },
+ {
+ "epoch": 0.352,
+ "eval_dev_acc": 0.52734375,
+ "eval_dev_token": 5760.470703125,
+ "eval_runtime": 408.3746,
+ "eval_samples_per_second": 0.157,
+ "eval_steps_per_second": 0.002,
+ "step": 704
+ },
+ {
+ "epoch": 0.3525,
+ "grad_norm": 0.09666366634628189,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 705
+ },
+ {
+ "epoch": 0.353,
+ "grad_norm": 0.11283217042776503,
+ "learning_rate": 1e-05,
+ "loss": 0.0143,
+ "step": 706
+ },
+ {
+ "epoch": 0.3535,
+ "grad_norm": 0.12289011751838193,
+ "learning_rate": 1e-05,
+ "loss": 0.0173,
+ "step": 707
+ },
+ {
+ "epoch": 0.354,
+ "grad_norm": 0.11335657340497375,
+ "learning_rate": 1e-05,
+ "loss": 0.0193,
+ "step": 708
+ },
+ {
+ "epoch": 0.3545,
+ "grad_norm": 0.12878402333619293,
+ "learning_rate": 1e-05,
+ "loss": 0.0198,
+ "step": 709
+ },
+ {
+ "epoch": 0.355,
+ "grad_norm": 0.09768170830344952,
+ "learning_rate": 1e-05,
+ "loss": 0.0123,
+ "step": 710
+ },
+ {
+ "epoch": 0.3555,
+ "grad_norm": 0.11142711749521704,
+ "learning_rate": 1e-05,
+ "loss": 0.0142,
+ "step": 711
+ },
+ {
+ "epoch": 0.356,
+ "grad_norm": 0.12263582497303413,
+ "learning_rate": 1e-05,
+ "loss": 0.0125,
+ "step": 712
+ },
+ {
+ "epoch": 0.3565,
+ "grad_norm": 0.10602173709981424,
+ "learning_rate": 1e-05,
+ "loss": 0.013,
+ "step": 713
+ },
+ {
+ "epoch": 0.357,
+ "grad_norm": 0.09797092044857854,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 714
+ },
+ {
+ "epoch": 0.3575,
+ "grad_norm": 0.10267028455310084,
+ "learning_rate": 1e-05,
+ "loss": 0.0153,
+ "step": 715
+ },
+ {
+ "epoch": 0.358,
+ "grad_norm": 0.11247269902696218,
+ "learning_rate": 1e-05,
+ "loss": 0.0124,
+ "step": 716
+ },
+ {
+ "epoch": 0.3585,
+ "grad_norm": 0.10192090643527008,
+ "learning_rate": 1e-05,
+ "loss": 0.01,
+ "step": 717
+ },
+ {
+ "epoch": 0.359,
+ "grad_norm": 0.15447030723754146,
+ "learning_rate": 1e-05,
+ "loss": 0.0191,
+ "step": 718
+ },
+ {
+ "epoch": 0.3595,
+ "grad_norm": 0.12653173296866044,
+ "learning_rate": 1e-05,
+ "loss": 0.0144,
+ "step": 719
+ },
+ {
+ "epoch": 0.36,
+ "grad_norm": 0.09798115770372441,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 720
+ },
+ {
+ "epoch": 0.3605,
+ "grad_norm": 0.13673705245890774,
+ "learning_rate": 1e-05,
+ "loss": 0.0176,
+ "step": 721
+ },
+ {
+ "epoch": 0.361,
+ "grad_norm": 0.10484924187213118,
+ "learning_rate": 1e-05,
+ "loss": 0.011,
+ "step": 722
+ },
+ {
+ "epoch": 0.3615,
+ "grad_norm": 0.12026390551992476,
+ "learning_rate": 1e-05,
+ "loss": 0.0161,
+ "step": 723
+ },
+ {
+ "epoch": 0.362,
+ "grad_norm": 0.12248845158519388,
+ "learning_rate": 1e-05,
+ "loss": 0.0135,
+ "step": 724
+ },
+ {
+ "epoch": 0.3625,
+ "grad_norm": 0.08242254382606763,
+ "learning_rate": 1e-05,
+ "loss": 0.0103,
+ "step": 725
+ },
+ {
+ "epoch": 0.363,
+ "grad_norm": 0.1310711546429888,
+ "learning_rate": 1e-05,
+ "loss": 0.0213,
+ "step": 726
+ },
+ {
+ "epoch": 0.3635,
+ "grad_norm": 0.11504216894290854,
+ "learning_rate": 1e-05,
+ "loss": 0.0122,
+ "step": 727
+ },
+ {
+ "epoch": 0.364,
+ "grad_norm": 0.12368075631500317,
+ "learning_rate": 1e-05,
+ "loss": 0.0184,
+ "step": 728
+ },
+ {
+ "epoch": 0.3645,
+ "grad_norm": 0.12437061544598803,
+ "learning_rate": 1e-05,
+ "loss": 0.0152,
+ "step": 729
+ },
+ {
+ "epoch": 0.365,
+ "grad_norm": 0.13397286445240938,
+ "learning_rate": 1e-05,
+ "loss": 0.0158,
+ "step": 730
+ },
+ {
+ "epoch": 0.3655,
+ "grad_norm": 0.0982171426550068,
+ "learning_rate": 1e-05,
+ "loss": 0.0096,
+ "step": 731
+ },
+ {
+ "epoch": 0.366,
+ "grad_norm": 0.10415731829601732,
+ "learning_rate": 1e-05,
+ "loss": 0.0105,
+ "step": 732
+ },
+ {
+ "epoch": 0.3665,
+ "grad_norm": 0.13524982542339864,
+ "learning_rate": 1e-05,
+ "loss": 0.0183,
+ "step": 733
+ },
+ {
+ "epoch": 0.367,
+ "grad_norm": 0.09664339538491498,
+ "learning_rate": 1e-05,
+ "loss": 0.0115,
+ "step": 734
+ },
+ {
+ "epoch": 0.3675,
+ "grad_norm": 0.0851015567710096,
+ "learning_rate": 1e-05,
+ "loss": 0.0112,
+ "step": 735
+ },
+ {
+ "epoch": 0.368,
+ "grad_norm": 0.10062979284816816,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 736
+ },
+ {
+ "epoch": 0.368,
+ "eval_dev_acc": 0.53515625,
+ "eval_dev_token": 5898.23046875,
+ "eval_runtime": 365.57,
+ "eval_samples_per_second": 0.175,
+ "eval_steps_per_second": 0.003,
+ "step": 736
+ },
+ {
+ "epoch": 0.3685,
+ "grad_norm": 0.11264161967062039,
+ "learning_rate": 1e-05,
+ "loss": 0.0108,
+ "step": 737
+ },
+ {
+ "epoch": 0.369,
+ "grad_norm": 0.11222433927286389,
+ "learning_rate": 1e-05,
+ "loss": 0.0104,
+ "step": 738
+ },
+ {
+ "epoch": 0.3695,
+ "grad_norm": 0.20055167671089585,
+ "learning_rate": 1e-05,
+ "loss": 0.013,
+ "step": 739
+ },
+ {
+ "epoch": 0.37,
+ "grad_norm": 0.10491538007846005,
+ "learning_rate": 1e-05,
+ "loss": 0.0131,
+ "step": 740
+ },
+ {
+ "epoch": 0.3705,
+ "grad_norm": 0.09174967069041229,
+ "learning_rate": 1e-05,
+ "loss": 0.0095,
+ "step": 741
+ },
+ {
+ "epoch": 0.371,
+ "grad_norm": 0.10582756451393364,
+ "learning_rate": 1e-05,
+ "loss": 0.012,
+ "step": 742
+ },
+ {
+ "epoch": 0.3715,
+ "grad_norm": 0.09194993895839906,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 743
+ },
+ {
+ "epoch": 0.372,
+ "grad_norm": 0.08858743590625234,
+ "learning_rate": 1e-05,
+ "loss": 0.01,
+ "step": 744
+ },
+ {
+ "epoch": 0.3725,
+ "grad_norm": 0.10826764845042156,
+ "learning_rate": 1e-05,
+ "loss": 0.0145,
+ "step": 745
+ },
+ {
+ "epoch": 0.373,
+ "grad_norm": 0.11033032167695773,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 746
+ },
+ {
+ "epoch": 0.3735,
+ "grad_norm": 0.15107498232603372,
+ "learning_rate": 1e-05,
+ "loss": 0.0157,
+ "step": 747
+ },
+ {
+ "epoch": 0.374,
+ "grad_norm": 0.11411131491498813,
+ "learning_rate": 1e-05,
+ "loss": 0.0114,
+ "step": 748
+ },
+ {
+ "epoch": 0.3745,
+ "grad_norm": 0.12425138196940645,
+ "learning_rate": 1e-05,
+ "loss": 0.0138,
+ "step": 749
+ },
+ {
+ "epoch": 0.375,
+ "grad_norm": 0.10647093971928946,
+ "learning_rate": 1e-05,
+ "loss": 0.0107,
+ "step": 750
+ },
+ {
+ "epoch": 0.3755,
+ "grad_norm": 0.12413448325714936,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 751
+ },
+ {
+ "epoch": 0.376,
+ "grad_norm": 0.105340917878943,
+ "learning_rate": 1e-05,
+ "loss": 0.0103,
+ "step": 752
+ },
+ {
+ "epoch": 0.3765,
+ "grad_norm": 0.11802541191692037,
+ "learning_rate": 1e-05,
+ "loss": 0.0127,
+ "step": 753
+ },
+ {
+ "epoch": 0.377,
+ "grad_norm": 0.09864810460683521,
+ "learning_rate": 1e-05,
+ "loss": 0.0128,
+ "step": 754
+ },
+ {
+ "epoch": 0.3775,
+ "grad_norm": 0.12491513055109611,
+ "learning_rate": 1e-05,
+ "loss": 0.0102,
+ "step": 755
+ },
+ {
+ "epoch": 0.378,
+ "grad_norm": 0.11778790720208179,
+ "learning_rate": 1e-05,
+ "loss": 0.0093,
+ "step": 756
+ },
+ {
+ "epoch": 0.3785,
+ "grad_norm": 0.11902239371415295,
+ "learning_rate": 1e-05,
+ "loss": 0.0115,
+ "step": 757
+ },
+ {
+ "epoch": 0.379,
+ "grad_norm": 0.1025498591924567,
+ "learning_rate": 1e-05,
+ "loss": 0.0099,
+ "step": 758
+ },
+ {
+ "epoch": 0.3795,
+ "grad_norm": 0.14120840112868438,
+ "learning_rate": 1e-05,
+ "loss": 0.0123,
+ "step": 759
+ },
+ {
+ "epoch": 0.38,
+ "grad_norm": 0.08964665828244849,
+ "learning_rate": 1e-05,
+ "loss": 0.0116,
+ "step": 760
+ },
+ {
+ "epoch": 0.3805,
+ "grad_norm": 0.13533754381134197,
+ "learning_rate": 1e-05,
+ "loss": 0.0142,
+ "step": 761
+ },
+ {
+ "epoch": 0.381,
+ "grad_norm": 0.11151852649444051,
+ "learning_rate": 1e-05,
+ "loss": 0.0122,
+ "step": 762
+ },
+ {
+ "epoch": 0.3815,
+ "grad_norm": 0.12448616858204287,
+ "learning_rate": 1e-05,
+ "loss": 0.0148,
+ "step": 763
+ },
+ {
+ "epoch": 0.382,
+ "grad_norm": 0.10160526390201502,
+ "learning_rate": 1e-05,
+ "loss": 0.0102,
+ "step": 764
+ },
+ {
+ "epoch": 0.3825,
+ "grad_norm": 0.12329039775788013,
+ "learning_rate": 1e-05,
+ "loss": 0.0226,
+ "step": 765
+ },
+ {
+ "epoch": 0.383,
+ "grad_norm": 0.1706851101549876,
+ "learning_rate": 1e-05,
+ "loss": 0.013,
+ "step": 766
+ },
+ {
+ "epoch": 0.3835,
+ "grad_norm": 0.11518698920716465,
+ "learning_rate": 1e-05,
+ "loss": 0.0136,
+ "step": 767
+ },
+ {
+ "epoch": 0.384,
+ "grad_norm": 0.1130084278865893,
+ "learning_rate": 1e-05,
+ "loss": 0.0151,
+ "step": 768
+ },
+ {
+ "epoch": 0.384,
+ "eval_dev_acc": 0.505859375,
+ "eval_dev_token": 5660.353515625,
+ "eval_runtime": 362.2613,
+ "eval_samples_per_second": 0.177,
+ "eval_steps_per_second": 0.003,
+ "step": 768
+ },
+ {
+ "epoch": 0.3845,
+ "grad_norm": 0.12092852490034717,
+ "learning_rate": 1e-05,
+ "loss": 0.0111,
+ "step": 769
+ },
+ {
+ "epoch": 0.385,
+ "grad_norm": 0.12041159291779986,
+ "learning_rate": 1e-05,
+ "loss": 0.0157,
+ "step": 770
+ },
+ {
+ "epoch": 0.3855,
+ "grad_norm": 0.13135896730332378,
+ "learning_rate": 1e-05,
+ "loss": 0.0149,
+ "step": 771
+ },
+ {
+ "epoch": 0.386,
+ "grad_norm": 0.09838622926936438,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 772
+ },
+ {
+ "epoch": 0.3865,
+ "grad_norm": 0.1118823935585986,
+ "learning_rate": 1e-05,
+ "loss": 0.016,
+ "step": 773
+ },
+ {
+ "epoch": 0.387,
+ "grad_norm": 0.10549287704509497,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 774
+ },
+ {
+ "epoch": 0.3875,
+ "grad_norm": 0.1232106501038667,
+ "learning_rate": 1e-05,
+ "loss": 0.013,
+ "step": 775
+ },
+ {
+ "epoch": 0.388,
+ "grad_norm": 0.12975165037101072,
+ "learning_rate": 1e-05,
+ "loss": 0.0121,
+ "step": 776
+ },
+ {
+ "epoch": 0.3885,
+ "grad_norm": 0.13595262265606586,
+ "learning_rate": 1e-05,
+ "loss": 0.0126,
+ "step": 777
+ },
+ {
+ "epoch": 0.389,
+ "grad_norm": 0.1371181387315353,
+ "learning_rate": 1e-05,
+ "loss": 0.0123,
+ "step": 778
+ },
+ {
+ "epoch": 0.3895,
+ "grad_norm": 0.13359565264106144,
+ "learning_rate": 1e-05,
+ "loss": 0.0153,
+ "step": 779
+ },
+ {
+ "epoch": 0.39,
+ "grad_norm": 0.11480062436980976,
+ "learning_rate": 1e-05,
+ "loss": 0.0107,
+ "step": 780
+ },
+ {
+ "epoch": 0.3905,
+ "grad_norm": 0.09947922362782227,
+ "learning_rate": 1e-05,
+ "loss": 0.0086,
+ "step": 781
+ },
+ {
+ "epoch": 0.391,
+ "grad_norm": 0.08734624126570777,
+ "learning_rate": 1e-05,
+ "loss": 0.0076,
+ "step": 782
+ },
+ {
+ "epoch": 0.3915,
+ "grad_norm": 0.10267346744796824,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 783
+ },
+ {
+ "epoch": 0.392,
+ "grad_norm": 0.09576192367034056,
+ "learning_rate": 1e-05,
+ "loss": 0.0083,
+ "step": 784
+ },
+ {
+ "epoch": 0.3925,
+ "grad_norm": 0.14141610877259214,
+ "learning_rate": 1e-05,
+ "loss": 0.0139,
+ "step": 785
+ },
+ {
+ "epoch": 0.393,
+ "grad_norm": 0.12071117391984963,
+ "learning_rate": 1e-05,
+ "loss": 0.0115,
+ "step": 786
+ },
+ {
+ "epoch": 0.3935,
+ "grad_norm": 0.13172358788988653,
+ "learning_rate": 1e-05,
+ "loss": 0.0129,
+ "step": 787
+ },
+ {
+ "epoch": 0.394,
+ "grad_norm": 0.1345424657278948,
+ "learning_rate": 1e-05,
+ "loss": 0.014,
+ "step": 788
+ },
+ {
+ "epoch": 0.3945,
+ "grad_norm": 0.1166885723717584,
+ "learning_rate": 1e-05,
+ "loss": 0.0125,
+ "step": 789
+ },
+ {
+ "epoch": 0.395,
+ "grad_norm": 0.1513194581227132,
+ "learning_rate": 1e-05,
+ "loss": 0.0145,
+ "step": 790
+ },
+ {
+ "epoch": 0.3955,
+ "grad_norm": 0.11105132556458801,
+ "learning_rate": 1e-05,
+ "loss": 0.011,
+ "step": 791
+ },
+ {
+ "epoch": 0.396,
+ "grad_norm": 0.0989302250949553,
+ "learning_rate": 1e-05,
+ "loss": 0.0086,
+ "step": 792
+ },
+ {
+ "epoch": 0.3965,
+ "grad_norm": 0.1384321782028591,
+ "learning_rate": 1e-05,
+ "loss": 0.0122,
+ "step": 793
+ },
+ {
+ "epoch": 0.397,
+ "grad_norm": 0.1439582094147518,
+ "learning_rate": 1e-05,
+ "loss": 0.0142,
+ "step": 794
+ },
+ {
+ "epoch": 0.3975,
+ "grad_norm": 0.11905534302900607,
+ "learning_rate": 1e-05,
+ "loss": 0.0102,
+ "step": 795
+ },
+ {
+ "epoch": 0.398,
+ "grad_norm": 0.1353943654211476,
+ "learning_rate": 1e-05,
+ "loss": 0.015,
+ "step": 796
+ },
+ {
+ "epoch": 0.3985,
+ "grad_norm": 0.1227008050574365,
+ "learning_rate": 1e-05,
+ "loss": 0.0159,
+ "step": 797
+ },
+ {
+ "epoch": 0.399,
+ "grad_norm": 0.10084479670557596,
+ "learning_rate": 1e-05,
+ "loss": 0.0103,
+ "step": 798
+ },
+ {
+ "epoch": 0.3995,
+ "grad_norm": 0.11430101131806415,
+ "learning_rate": 1e-05,
+ "loss": 0.0128,
+ "step": 799
+ },
+ {
+ "epoch": 0.4,
+ "grad_norm": 0.12720193634725216,
+ "learning_rate": 1e-05,
+ "loss": 0.0166,
+ "step": 800
+ },
+ {
+ "epoch": 0.4,
+ "eval_dev_acc": 0.49609375,
+ "eval_dev_token": 5933.87109375,
+ "eval_runtime": 371.4205,
+ "eval_samples_per_second": 0.172,
+ "eval_steps_per_second": 0.003,
+ "step": 800
+ },
+ {
+ "epoch": 0.4005,
+ "grad_norm": 0.10747534966369286,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 801
+ },
+ {
+ "epoch": 0.401,
+ "grad_norm": 0.09755157150747987,
+ "learning_rate": 1e-05,
+ "loss": 0.0094,
+ "step": 802
+ },
+ {
+ "epoch": 0.4015,
+ "grad_norm": 0.1093754763997552,
+ "learning_rate": 1e-05,
+ "loss": 0.0136,
+ "step": 803
+ },
+ {
+ "epoch": 0.402,
+ "grad_norm": 0.12186788180512477,
+ "learning_rate": 1e-05,
+ "loss": 0.0112,
+ "step": 804
+ },
+ {
+ "epoch": 0.4025,
+ "grad_norm": 0.12341274893925132,
+ "learning_rate": 1e-05,
+ "loss": 0.0126,
+ "step": 805
+ },
+ {
+ "epoch": 0.403,
+ "grad_norm": 0.1000051489528446,
+ "learning_rate": 1e-05,
+ "loss": 0.0171,
+ "step": 806
+ },
+ {
+ "epoch": 0.4035,
+ "grad_norm": 0.09210699513663904,
+ "learning_rate": 1e-05,
+ "loss": 0.0097,
+ "step": 807
+ },
+ {
+ "epoch": 0.404,
+ "grad_norm": 0.12629053026155362,
+ "learning_rate": 1e-05,
+ "loss": 0.0126,
+ "step": 808
+ },
+ {
+ "epoch": 0.4045,
+ "grad_norm": 0.11979707428750866,
+ "learning_rate": 1e-05,
+ "loss": 0.0129,
+ "step": 809
+ },
+ {
+ "epoch": 0.405,
+ "grad_norm": 0.13240620090939892,
+ "learning_rate": 1e-05,
+ "loss": 0.0173,
+ "step": 810
+ },
+ {
+ "epoch": 0.4055,
+ "grad_norm": 0.11509826968311627,
+ "learning_rate": 1e-05,
+ "loss": 0.0133,
+ "step": 811
+ },
+ {
+ "epoch": 0.406,
+ "grad_norm": 0.09930674216692448,
+ "learning_rate": 1e-05,
+ "loss": 0.0103,
+ "step": 812
+ },
+ {
+ "epoch": 0.4065,
+ "grad_norm": 0.12486374889735856,
+ "learning_rate": 1e-05,
+ "loss": 0.0152,
+ "step": 813
+ },
+ {
+ "epoch": 0.407,
+ "grad_norm": 0.09439307719243419,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 814
+ },
+ {
+ "epoch": 0.4075,
+ "grad_norm": 0.09010069214916468,
+ "learning_rate": 1e-05,
+ "loss": 0.0101,
+ "step": 815
+ },
+ {
+ "epoch": 0.408,
+ "grad_norm": 0.10807091465236611,
+ "learning_rate": 1e-05,
+ "loss": 0.0136,
+ "step": 816
+ },
+ {
+ "epoch": 0.4085,
+ "grad_norm": 0.09320554728801374,
+ "learning_rate": 1e-05,
+ "loss": 0.0102,
+ "step": 817
+ },
+ {
+ "epoch": 0.409,
+ "grad_norm": 0.09578310039513883,
+ "learning_rate": 1e-05,
+ "loss": 0.0105,
+ "step": 818
+ },
+ {
+ "epoch": 0.4095,
+ "grad_norm": 0.10795646602867415,
+ "learning_rate": 1e-05,
+ "loss": 0.0141,
+ "step": 819
+ },
+ {
+ "epoch": 0.41,
+ "grad_norm": 0.11260765818079684,
+ "learning_rate": 1e-05,
+ "loss": 0.0123,
+ "step": 820
+ },
+ {
+ "epoch": 0.4105,
+ "grad_norm": 0.13162780397133028,
+ "learning_rate": 1e-05,
+ "loss": 0.0141,
+ "step": 821
+ },
+ {
+ "epoch": 0.411,
+ "grad_norm": 0.10884500486087925,
+ "learning_rate": 1e-05,
+ "loss": 0.0122,
+ "step": 822
+ },
+ {
+ "epoch": 0.4115,
+ "grad_norm": 0.12472839162847292,
+ "learning_rate": 1e-05,
+ "loss": 0.0135,
+ "step": 823
+ },
+ {
+ "epoch": 0.412,
+ "grad_norm": 0.13289735991638021,
+ "learning_rate": 1e-05,
+ "loss": 0.0112,
+ "step": 824
+ },
+ {
+ "epoch": 0.4125,
+ "grad_norm": 0.12509974441211302,
+ "learning_rate": 1e-05,
+ "loss": 0.0256,
+ "step": 825
+ },
+ {
+ "epoch": 0.413,
+ "grad_norm": 0.12014632147622897,
+ "learning_rate": 1e-05,
+ "loss": 0.0113,
+ "step": 826
+ },
+ {
+ "epoch": 0.4135,
+ "grad_norm": 0.09172916013688245,
+ "learning_rate": 1e-05,
+ "loss": 0.0082,
+ "step": 827
+ },
+ {
+ "epoch": 0.414,
+ "grad_norm": 0.09305774811224422,
+ "learning_rate": 1e-05,
+ "loss": 0.0074,
+ "step": 828
+ },
+ {
+ "epoch": 0.4145,
+ "grad_norm": 0.12720506111352092,
+ "learning_rate": 1e-05,
+ "loss": 0.0091,
+ "step": 829
+ },
+ {
+ "epoch": 0.415,
+ "grad_norm": 0.0815065287803298,
+ "learning_rate": 1e-05,
+ "loss": 0.0061,
+ "step": 830
+ },
+ {
+ "epoch": 0.4155,
+ "grad_norm": 0.09730425117259746,
+ "learning_rate": 1e-05,
+ "loss": 0.0079,
+ "step": 831
+ },
+ {
+ "epoch": 0.416,
+ "grad_norm": 0.09579694307116488,
+ "learning_rate": 1e-05,
+ "loss": 0.0098,
+ "step": 832
+ },
+ {
+ "epoch": 0.416,
+ "eval_dev_acc": 0.513671875,
+ "eval_dev_token": 4446.53125,
+ "eval_runtime": 329.208,
+ "eval_samples_per_second": 0.194,
+ "eval_steps_per_second": 0.003,
+ "step": 832
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2000,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 9223372036854775807,
+ "save_steps": 32,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 224786117410816.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/tldr-14b-step-832/training_args.bin b/tldr-14b-step-832/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..4f5d02ddd18e1788b01766ecb83eb47b67f647d9
--- /dev/null
+++ b/tldr-14b-step-832/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e58fa949caa5f03669bd0d8bb18548b753852f3f732f36df28a00d4b854effa3
+size 8440
diff --git a/tldr-14b-step-832/zero_to_fp32.py b/tldr-14b-step-832/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69ecd9acb5a235ffbf927091051106d902b3d39
--- /dev/null
+++ b/tldr-14b-step-832/zero_to_fp32.py
@@ -0,0 +1,674 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import json
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ state_dict_split = split_torch_state_dict_into_shards(state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard, output_path)
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/tldr-7b-checkpoint-256/adapter_model/config.json b/tldr-7b-checkpoint-256/adapter_model/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..302a300f7b516601023158ee87709cc00f22298e
--- /dev/null
+++ b/tldr-7b-checkpoint-256/adapter_model/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "/cpfs/user/lizhongzhi/huggingface_model/huggingface_model/DeepSeek-R1-Distill-Qwen-7B/",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151643,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 3584,
+ "initializer_range": 0.02,
+ "intermediate_size": 18944,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 28,
+ "model_type": "qwen2",
+ "num_attention_heads": 28,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 4,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.46.3",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 152064
+}
diff --git a/tldr-7b-checkpoint-256/adapter_model/generation_config.json b/tldr-7b-checkpoint-256/adapter_model/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..59e60f99f4acabf5f765a866cb6d7060779fdcdf
--- /dev/null
+++ b/tldr-7b-checkpoint-256/adapter_model/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.46.3"
+}
diff --git a/tldr-7b-checkpoint-256/adapter_model/model.safetensors b/tldr-7b-checkpoint-256/adapter_model/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9dbadae1e8fdc500705b641ecdd022147b2b651b
--- /dev/null
+++ b/tldr-7b-checkpoint-256/adapter_model/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6c395ebed7ddc70201e0286c0c4e97807244814db18d40bf2bd3b6dd88c08f7b
+size 701024
diff --git a/tldr-7b-checkpoint-256/config.json b/tldr-7b-checkpoint-256/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..302a300f7b516601023158ee87709cc00f22298e
--- /dev/null
+++ b/tldr-7b-checkpoint-256/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "/cpfs/user/lizhongzhi/huggingface_model/huggingface_model/DeepSeek-R1-Distill-Qwen-7B/",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151643,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 3584,
+ "initializer_range": 0.02,
+ "intermediate_size": 18944,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 28,
+ "model_type": "qwen2",
+ "num_attention_heads": 28,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 4,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "bfloat16",
+ "transformers_version": "4.46.3",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 152064
+}
diff --git a/tldr-7b-checkpoint-256/generation_config.json b/tldr-7b-checkpoint-256/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..59e60f99f4acabf5f765a866cb6d7060779fdcdf
--- /dev/null
+++ b/tldr-7b-checkpoint-256/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.46.3"
+}
diff --git a/tldr-7b-checkpoint-256/latest b/tldr-7b-checkpoint-256/latest
new file mode 100644
index 0000000000000000000000000000000000000000..b747f9725067064e241a7a3bed90583971af8ad1
--- /dev/null
+++ b/tldr-7b-checkpoint-256/latest
@@ -0,0 +1 @@
+global_step256
\ No newline at end of file
diff --git a/tldr-7b-checkpoint-256/long2short_proportions.json b/tldr-7b-checkpoint-256/long2short_proportions.json
new file mode 100644
index 0000000000000000000000000000000000000000..b92c68e811ab70ad5401991a9e596fa895341726
--- /dev/null
+++ b/tldr-7b-checkpoint-256/long2short_proportions.json
@@ -0,0 +1,365 @@
+[
+ {
+ "global_step": 0,
+ "cot_domain_weight": [
+ 0.8,
+ 0.2
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 8,
+ "cot_domain_weight": [
+ 0.81966233253479,
+ 0.18033766746520996
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 16,
+ "cot_domain_weight": [
+ 0.7872583270072937,
+ 0.2127416729927063
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 24,
+ "cot_domain_weight": [
+ 0.7460198998451233,
+ 0.2539801001548767
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 32,
+ "cot_domain_weight": [
+ 0.6744258410715319,
+ 0.32557415892846814
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 40,
+ "cot_domain_weight": [
+ 0.5970645546913147,
+ 0.4029354453086853
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 48,
+ "cot_domain_weight": [
+ 0.3999738454834315,
+ 0.6000261545165685
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 56,
+ "cot_domain_weight": [
+ 0.2720071835522165,
+ 0.7279928164477835
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 64,
+ "cot_domain_weight": [
+ 0.2883644063798553,
+ 0.7116355936201447
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 72,
+ "cot_domain_weight": [
+ 0.3323897124180455,
+ 0.6676102875819545
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 80,
+ "cot_domain_weight": [
+ 0.3198286903057673,
+ 0.6801713096942327
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 88,
+ "cot_domain_weight": [
+ 0.30956872162632476,
+ 0.6904312783736752
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 96,
+ "cot_domain_weight": [
+ 0.28148205765974865,
+ 0.7185179423402513
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 104,
+ "cot_domain_weight": [
+ 0.19276975382521383,
+ 0.8072302461747862
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 112,
+ "cot_domain_weight": [
+ 0.11667832421803193,
+ 0.8833216757819681
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 120,
+ "cot_domain_weight": [
+ 0.10653018285729127,
+ 0.8934698171427087
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 128,
+ "cot_domain_weight": [
+ 0.08691881046262705,
+ 0.9130811895373729
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 136,
+ "cot_domain_weight": [
+ 0.07620099413993937,
+ 0.9237990058600606
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 144,
+ "cot_domain_weight": [
+ 0.09678315443384017,
+ 0.9032168455661598
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 152,
+ "cot_domain_weight": [
+ 0.06039547920227051,
+ 0.9396045207977295
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 160,
+ "cot_domain_weight": [
+ 0.04663034069109073,
+ 0.9533696593089093
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 168,
+ "cot_domain_weight": [
+ 0.026384488927624624,
+ 0.9736155110723754
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 176,
+ "cot_domain_weight": [
+ 0.01369204708991822,
+ 0.9863079529100818
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 184,
+ "cot_domain_weight": [
+ 0.008808859025084685,
+ 0.9911911409749153
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 192,
+ "cot_domain_weight": [
+ 0.008046488434985519,
+ 0.9919535115650144
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 200,
+ "cot_domain_weight": [
+ 0.006645676632023577,
+ 0.9933543233679765
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 208,
+ "cot_domain_weight": [
+ 0.006015583141017519,
+ 0.9939844168589825
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 216,
+ "cot_domain_weight": [
+ 0.004511566495152915,
+ 0.9954884335048471
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 224,
+ "cot_domain_weight": [
+ 0.002732270716591211,
+ 0.9972677292834088
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 232,
+ "cot_domain_weight": [
+ 0.0021831512748239114,
+ 0.9978168487251761
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 240,
+ "cot_domain_weight": [
+ 0.0022364268058571715,
+ 0.9977635731941429
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 248,
+ "cot_domain_weight": [
+ 0.003314645357360453,
+ 0.9966853546426395
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ },
+ {
+ "global_step": 256,
+ "cot_domain_weight": [
+ 0.0036412973637667506,
+ 0.9963587026362333
+ ],
+ "cot_domain_name": [
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/long2short_prm12k_gsm8k_merge_data/dynamic_long_short_data/gsm8k_prm12k_s1/prm12k_gsm8k_short_shortest.jsonl",
+ "/cpfs/user/lizhongzhi/data/data/gsm8k/liang/s1_7b_r1_prompt.jsonl"
+ ]
+ }
+]
\ No newline at end of file
diff --git a/tldr-7b-checkpoint-256/model-00001-of-00004.safetensors b/tldr-7b-checkpoint-256/model-00001-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dc6c4437d6047ca8d68e0cf79772b4af2dcc4f8d
--- /dev/null
+++ b/tldr-7b-checkpoint-256/model-00001-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd3a2c81fafdb17a408b824be2275f058cb26c4e480e3e735a0f15aa19e14151
+size 4877660776
diff --git a/tldr-7b-checkpoint-256/model-00002-of-00004.safetensors b/tldr-7b-checkpoint-256/model-00002-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..40dc5321a7a336ad4444b655b5b1508533052e5b
--- /dev/null
+++ b/tldr-7b-checkpoint-256/model-00002-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3dd211dd8f37c904872ea4ed8153c6bd012a34630da2a664d8ebdbaf76d4d15e
+size 4932751008
diff --git a/tldr-7b-checkpoint-256/model-00003-of-00004.safetensors b/tldr-7b-checkpoint-256/model-00003-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..860c0ec9471a5c55c4c6232e70ba6893a0c17235
--- /dev/null
+++ b/tldr-7b-checkpoint-256/model-00003-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2a2faecbc251c1baceeadb56f43a7fc3f7224b771a9665ad74609069f5c9a934
+size 4330865200
diff --git a/tldr-7b-checkpoint-256/model-00004-of-00004.safetensors b/tldr-7b-checkpoint-256/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..594d524977c529a7257f70e6ce50380c952a318a
--- /dev/null
+++ b/tldr-7b-checkpoint-256/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1e94c33e6f48f80370536173c5059e771a69081642e879e48f40fe67bcb990a
+size 1089994880
diff --git a/tldr-7b-checkpoint-256/model.safetensors.index.json b/tldr-7b-checkpoint-256/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..6ca5084b88f1a95fef37f1b94c6e87ff69422bb7
--- /dev/null
+++ b/tldr-7b-checkpoint-256/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 15231233024
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00004-of-00004.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
+ "model.norm.weight": "model-00003-of-00004.safetensors"
+ }
+}
diff --git a/tldr-7b-checkpoint-256/rng_state_0.pth b/tldr-7b-checkpoint-256/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7577efaa8690b667d5be6dc7081597fca6d05418
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:99597ef7442635c9fc7e33f58ab65a1f097883076cda723846953702c5b1bb41
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_1.pth b/tldr-7b-checkpoint-256/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e2acc4b52c2fd5341e236a599394a92e9439e434
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e9986a5b4d0c0854c31b0a552b36c8d61c4166799896370911745287d59eb2f1
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_2.pth b/tldr-7b-checkpoint-256/rng_state_2.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7eb35633e784fe907ab4362e0bf1aa58ec5278a8
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_2.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:62d0a03a6899b1d9c6471730a5f27a61c6d5decea4fa364e33de7339d74afa6a
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_3.pth b/tldr-7b-checkpoint-256/rng_state_3.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c1e144f3ae453cec3ad456f9b835b08bb304b911
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_3.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5d2deda76dfce536554e9a353390020e1434a02190925740d534a1f14df2db5
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_4.pth b/tldr-7b-checkpoint-256/rng_state_4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..2ca3b5614575ca55699acf0c75e8ffce6b66f5df
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ea03f5389460add8ac4bb962ff352abda89dc8283120a1b75a0b08a8701c3d0
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_5.pth b/tldr-7b-checkpoint-256/rng_state_5.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0c053487eec8542fed1a43e14a7fc28de8e514b6
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_5.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d0efd6bdda2dc5f3a2dc45fe493b81426aefc76e034baaf43d92cd400d55abbf
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_6.pth b/tldr-7b-checkpoint-256/rng_state_6.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c6fd7fb3dae2ea67456cc5f1b4a27cfca728711d
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_6.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:15abd0cbcd121c12ef3480b67be50888f282d5deb1f190c5739144faa876b79e
+size 15920
diff --git a/tldr-7b-checkpoint-256/rng_state_7.pth b/tldr-7b-checkpoint-256/rng_state_7.pth
new file mode 100644
index 0000000000000000000000000000000000000000..5341ca9ae1d22502999bfae1f88178bd73e95e2d
--- /dev/null
+++ b/tldr-7b-checkpoint-256/rng_state_7.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dfbfa4971e4f22128a2a3938f9e09ee46a74b13ba2c990c12f6badfb97ceb345
+size 15920
diff --git a/tldr-7b-checkpoint-256/scheduler.pt b/tldr-7b-checkpoint-256/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..51c73463fced862c9e3af7b2d86f814e4e517fc7
--- /dev/null
+++ b/tldr-7b-checkpoint-256/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fb3eaf688658712768ae645e4a2b5f778a59201f17641a26e210365d8c2ef6f
+size 1064
diff --git a/tldr-7b-checkpoint-256/special_tokens_map.json b/tldr-7b-checkpoint-256/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/tldr-7b-checkpoint-256/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tldr-7b-checkpoint-256/tokenizer.json b/tldr-7b-checkpoint-256/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/tldr-7b-checkpoint-256/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/tldr-7b-checkpoint-256/tokenizer_config.json b/tldr-7b-checkpoint-256/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..d36d867d39875eaa6078ba7c77be919699f19889
--- /dev/null
+++ b/tldr-7b-checkpoint-256/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "legacy": true,
+ "model_max_length": 8192,
+ "pad_token": "<|end▁of▁sentence|>",
+ "padding_side": "left",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/tldr-7b-checkpoint-256/trainer_state.json b/tldr-7b-checkpoint-256/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3a18f191c1cda758a652765a591c76d124cad5d6
--- /dev/null
+++ b/tldr-7b-checkpoint-256/trainer_state.json
@@ -0,0 +1,2113 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.0,
+ "eval_steps": 8,
+ "global_step": 256,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.00390625,
+ "grad_norm": 3.380525042530954,
+ "learning_rate": 1e-05,
+ "loss": 0.2859,
+ "step": 1
+ },
+ {
+ "epoch": 0.0078125,
+ "grad_norm": 2.6901012326349156,
+ "learning_rate": 1e-05,
+ "loss": 0.2117,
+ "step": 2
+ },
+ {
+ "epoch": 0.01171875,
+ "grad_norm": 3.191447237922227,
+ "learning_rate": 1e-05,
+ "loss": 0.2602,
+ "step": 3
+ },
+ {
+ "epoch": 0.015625,
+ "grad_norm": 2.204083519446381,
+ "learning_rate": 1e-05,
+ "loss": 0.1972,
+ "step": 4
+ },
+ {
+ "epoch": 0.01953125,
+ "grad_norm": 2.0481149317155687,
+ "learning_rate": 1e-05,
+ "loss": 0.2338,
+ "step": 5
+ },
+ {
+ "epoch": 0.0234375,
+ "grad_norm": 1.6269814174466988,
+ "learning_rate": 1e-05,
+ "loss": 0.214,
+ "step": 6
+ },
+ {
+ "epoch": 0.02734375,
+ "grad_norm": 1.6908703624878527,
+ "learning_rate": 1e-05,
+ "loss": 0.2088,
+ "step": 7
+ },
+ {
+ "epoch": 0.03125,
+ "grad_norm": 1.2059719622160197,
+ "learning_rate": 1e-05,
+ "loss": 0.1975,
+ "step": 8
+ },
+ {
+ "epoch": 0.03125,
+ "eval_dev_acc": 0.515625,
+ "eval_dev_token": 4849.7578125,
+ "eval_runtime": 168.4394,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 8
+ },
+ {
+ "epoch": 0.03515625,
+ "grad_norm": 1.6837720712641369,
+ "learning_rate": 1e-05,
+ "loss": 0.1873,
+ "step": 9
+ },
+ {
+ "epoch": 0.0390625,
+ "grad_norm": 1.2504651087103098,
+ "learning_rate": 1e-05,
+ "loss": 0.1959,
+ "step": 10
+ },
+ {
+ "epoch": 0.04296875,
+ "grad_norm": 1.3187603751382884,
+ "learning_rate": 1e-05,
+ "loss": 0.2135,
+ "step": 11
+ },
+ {
+ "epoch": 0.046875,
+ "grad_norm": 1.3545446581007174,
+ "learning_rate": 1e-05,
+ "loss": 0.2428,
+ "step": 12
+ },
+ {
+ "epoch": 0.05078125,
+ "grad_norm": 1.6286051945906104,
+ "learning_rate": 1e-05,
+ "loss": 0.1708,
+ "step": 13
+ },
+ {
+ "epoch": 0.0546875,
+ "grad_norm": 1.6081830921647842,
+ "learning_rate": 1e-05,
+ "loss": 0.1471,
+ "step": 14
+ },
+ {
+ "epoch": 0.05859375,
+ "grad_norm": 1.4305460955933824,
+ "learning_rate": 1e-05,
+ "loss": 0.1837,
+ "step": 15
+ },
+ {
+ "epoch": 0.0625,
+ "grad_norm": 1.3961670104174644,
+ "learning_rate": 1e-05,
+ "loss": 0.1352,
+ "step": 16
+ },
+ {
+ "epoch": 0.0625,
+ "eval_dev_acc": 0.4296875,
+ "eval_dev_token": 5067.265625,
+ "eval_runtime": 167.2848,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 16
+ },
+ {
+ "epoch": 0.06640625,
+ "grad_norm": 1.5507019702345457,
+ "learning_rate": 1e-05,
+ "loss": 0.1657,
+ "step": 17
+ },
+ {
+ "epoch": 0.0703125,
+ "grad_norm": 1.3395286968352729,
+ "learning_rate": 1e-05,
+ "loss": 0.1824,
+ "step": 18
+ },
+ {
+ "epoch": 0.07421875,
+ "grad_norm": 2.201219146342779,
+ "learning_rate": 1e-05,
+ "loss": 0.1391,
+ "step": 19
+ },
+ {
+ "epoch": 0.078125,
+ "grad_norm": 1.75559779570709,
+ "learning_rate": 1e-05,
+ "loss": 0.1351,
+ "step": 20
+ },
+ {
+ "epoch": 0.08203125,
+ "grad_norm": 2.0359121335172428,
+ "learning_rate": 1e-05,
+ "loss": 0.1748,
+ "step": 21
+ },
+ {
+ "epoch": 0.0859375,
+ "grad_norm": 1.6822343317370052,
+ "learning_rate": 1e-05,
+ "loss": 0.1582,
+ "step": 22
+ },
+ {
+ "epoch": 0.08984375,
+ "grad_norm": 1.9664935447837442,
+ "learning_rate": 1e-05,
+ "loss": 0.1338,
+ "step": 23
+ },
+ {
+ "epoch": 0.09375,
+ "grad_norm": 1.1463903797363937,
+ "learning_rate": 1e-05,
+ "loss": 0.1139,
+ "step": 24
+ },
+ {
+ "epoch": 0.09375,
+ "eval_dev_acc": 0.4296875,
+ "eval_dev_token": 4994.296875,
+ "eval_runtime": 168.4043,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 24
+ },
+ {
+ "epoch": 0.09765625,
+ "grad_norm": 2.1728621095149627,
+ "learning_rate": 1e-05,
+ "loss": 0.1471,
+ "step": 25
+ },
+ {
+ "epoch": 0.1015625,
+ "grad_norm": 1.6714738223766954,
+ "learning_rate": 1e-05,
+ "loss": 0.1349,
+ "step": 26
+ },
+ {
+ "epoch": 0.10546875,
+ "grad_norm": 1.5574316583381629,
+ "learning_rate": 1e-05,
+ "loss": 0.1356,
+ "step": 27
+ },
+ {
+ "epoch": 0.109375,
+ "grad_norm": 1.4728847084572547,
+ "learning_rate": 1e-05,
+ "loss": 0.1509,
+ "step": 28
+ },
+ {
+ "epoch": 0.11328125,
+ "grad_norm": 1.4769394661942852,
+ "learning_rate": 1e-05,
+ "loss": 0.1294,
+ "step": 29
+ },
+ {
+ "epoch": 0.1171875,
+ "grad_norm": 1.8550097520759188,
+ "learning_rate": 1e-05,
+ "loss": 0.1208,
+ "step": 30
+ },
+ {
+ "epoch": 0.12109375,
+ "grad_norm": 1.75157088447911,
+ "learning_rate": 1e-05,
+ "loss": 0.0993,
+ "step": 31
+ },
+ {
+ "epoch": 0.125,
+ "grad_norm": 1.6233472727407252,
+ "learning_rate": 1e-05,
+ "loss": 0.1412,
+ "step": 32
+ },
+ {
+ "epoch": 0.125,
+ "eval_dev_acc": 0.4609375,
+ "eval_dev_token": 4228.15625,
+ "eval_runtime": 159.0398,
+ "eval_samples_per_second": 0.101,
+ "eval_steps_per_second": 0.006,
+ "step": 32
+ },
+ {
+ "epoch": 0.12890625,
+ "grad_norm": 1.5246001678514782,
+ "learning_rate": 1e-05,
+ "loss": 0.1268,
+ "step": 33
+ },
+ {
+ "epoch": 0.1328125,
+ "grad_norm": 1.020147996755851,
+ "learning_rate": 1e-05,
+ "loss": 0.166,
+ "step": 34
+ },
+ {
+ "epoch": 0.13671875,
+ "grad_norm": 0.9795032964583498,
+ "learning_rate": 1e-05,
+ "loss": 0.1223,
+ "step": 35
+ },
+ {
+ "epoch": 0.140625,
+ "grad_norm": 1.0328587053324862,
+ "learning_rate": 1e-05,
+ "loss": 0.0889,
+ "step": 36
+ },
+ {
+ "epoch": 0.14453125,
+ "grad_norm": 0.8587530858129762,
+ "learning_rate": 1e-05,
+ "loss": 0.1618,
+ "step": 37
+ },
+ {
+ "epoch": 0.1484375,
+ "grad_norm": 1.0451234874371433,
+ "learning_rate": 1e-05,
+ "loss": 0.1973,
+ "step": 38
+ },
+ {
+ "epoch": 0.15234375,
+ "grad_norm": 1.032741287831154,
+ "learning_rate": 1e-05,
+ "loss": 0.1999,
+ "step": 39
+ },
+ {
+ "epoch": 0.15625,
+ "grad_norm": 1.0128010813738295,
+ "learning_rate": 1e-05,
+ "loss": 0.1314,
+ "step": 40
+ },
+ {
+ "epoch": 0.15625,
+ "eval_dev_acc": 0.40625,
+ "eval_dev_token": 5015.7421875,
+ "eval_runtime": 167.9354,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 40
+ },
+ {
+ "epoch": 0.16015625,
+ "grad_norm": 0.7085331860395175,
+ "learning_rate": 1e-05,
+ "loss": 0.1424,
+ "step": 41
+ },
+ {
+ "epoch": 0.1640625,
+ "grad_norm": 0.8522197113830303,
+ "learning_rate": 1e-05,
+ "loss": 0.1523,
+ "step": 42
+ },
+ {
+ "epoch": 0.16796875,
+ "grad_norm": 0.9700458234990689,
+ "learning_rate": 1e-05,
+ "loss": 0.1655,
+ "step": 43
+ },
+ {
+ "epoch": 0.171875,
+ "grad_norm": 2.0713947251278855,
+ "learning_rate": 1e-05,
+ "loss": 0.2946,
+ "step": 44
+ },
+ {
+ "epoch": 0.17578125,
+ "grad_norm": 1.6441862242379885,
+ "learning_rate": 1e-05,
+ "loss": 0.2547,
+ "step": 45
+ },
+ {
+ "epoch": 0.1796875,
+ "grad_norm": 1.7959964112861366,
+ "learning_rate": 1e-05,
+ "loss": 0.3009,
+ "step": 46
+ },
+ {
+ "epoch": 0.18359375,
+ "grad_norm": 1.3449858551505456,
+ "learning_rate": 1e-05,
+ "loss": 0.2094,
+ "step": 47
+ },
+ {
+ "epoch": 0.1875,
+ "grad_norm": 1.2087309569022056,
+ "learning_rate": 1e-05,
+ "loss": 0.1908,
+ "step": 48
+ },
+ {
+ "epoch": 0.1875,
+ "eval_dev_acc": 0.34375,
+ "eval_dev_token": 4538.84375,
+ "eval_runtime": 161.6976,
+ "eval_samples_per_second": 0.099,
+ "eval_steps_per_second": 0.006,
+ "step": 48
+ },
+ {
+ "epoch": 0.19140625,
+ "grad_norm": 1.1559146316352948,
+ "learning_rate": 1e-05,
+ "loss": 0.3036,
+ "step": 49
+ },
+ {
+ "epoch": 0.1953125,
+ "grad_norm": 1.131769529502962,
+ "learning_rate": 1e-05,
+ "loss": 0.2441,
+ "step": 50
+ },
+ {
+ "epoch": 0.19921875,
+ "grad_norm": 1.4116452844735226,
+ "learning_rate": 1e-05,
+ "loss": 0.2028,
+ "step": 51
+ },
+ {
+ "epoch": 0.203125,
+ "grad_norm": 0.7550364491986332,
+ "learning_rate": 1e-05,
+ "loss": 0.215,
+ "step": 52
+ },
+ {
+ "epoch": 0.20703125,
+ "grad_norm": 1.3915284765850489,
+ "learning_rate": 1e-05,
+ "loss": 0.2878,
+ "step": 53
+ },
+ {
+ "epoch": 0.2109375,
+ "grad_norm": 1.6351241901381652,
+ "learning_rate": 1e-05,
+ "loss": 0.2446,
+ "step": 54
+ },
+ {
+ "epoch": 0.21484375,
+ "grad_norm": 1.6083218458029132,
+ "learning_rate": 1e-05,
+ "loss": 0.2088,
+ "step": 55
+ },
+ {
+ "epoch": 0.21875,
+ "grad_norm": 0.7434150303822764,
+ "learning_rate": 1e-05,
+ "loss": 0.2262,
+ "step": 56
+ },
+ {
+ "epoch": 0.21875,
+ "eval_dev_acc": 0.30708661675453186,
+ "eval_dev_token": 5670.251953125,
+ "eval_runtime": 174.7692,
+ "eval_samples_per_second": 0.092,
+ "eval_steps_per_second": 0.006,
+ "step": 56
+ },
+ {
+ "epoch": 0.22265625,
+ "grad_norm": 1.0769799759099778,
+ "learning_rate": 1e-05,
+ "loss": 0.208,
+ "step": 57
+ },
+ {
+ "epoch": 0.2265625,
+ "grad_norm": 0.9298141621627772,
+ "learning_rate": 1e-05,
+ "loss": 0.1687,
+ "step": 58
+ },
+ {
+ "epoch": 0.23046875,
+ "grad_norm": 1.285492123129724,
+ "learning_rate": 1e-05,
+ "loss": 0.2427,
+ "step": 59
+ },
+ {
+ "epoch": 0.234375,
+ "grad_norm": 0.8346778861730894,
+ "learning_rate": 1e-05,
+ "loss": 0.219,
+ "step": 60
+ },
+ {
+ "epoch": 0.23828125,
+ "grad_norm": 0.9873196942775492,
+ "learning_rate": 1e-05,
+ "loss": 0.242,
+ "step": 61
+ },
+ {
+ "epoch": 0.2421875,
+ "grad_norm": 0.9596507860915271,
+ "learning_rate": 1e-05,
+ "loss": 0.2148,
+ "step": 62
+ },
+ {
+ "epoch": 0.24609375,
+ "grad_norm": 1.0988562593647762,
+ "learning_rate": 1e-05,
+ "loss": 0.2396,
+ "step": 63
+ },
+ {
+ "epoch": 0.25,
+ "grad_norm": 0.9707635131928222,
+ "learning_rate": 1e-05,
+ "loss": 0.238,
+ "step": 64
+ },
+ {
+ "epoch": 0.25,
+ "eval_dev_acc": 0.5390625,
+ "eval_dev_token": 4394.921875,
+ "eval_runtime": 161.3481,
+ "eval_samples_per_second": 0.099,
+ "eval_steps_per_second": 0.006,
+ "step": 64
+ },
+ {
+ "epoch": 0.25390625,
+ "grad_norm": 0.8083595053544823,
+ "learning_rate": 1e-05,
+ "loss": 0.293,
+ "step": 65
+ },
+ {
+ "epoch": 0.2578125,
+ "grad_norm": 0.6893947679382126,
+ "learning_rate": 1e-05,
+ "loss": 0.2866,
+ "step": 66
+ },
+ {
+ "epoch": 0.26171875,
+ "grad_norm": 1.0271679359276198,
+ "learning_rate": 1e-05,
+ "loss": 0.2276,
+ "step": 67
+ },
+ {
+ "epoch": 0.265625,
+ "grad_norm": 1.1776528602190077,
+ "learning_rate": 1e-05,
+ "loss": 0.1887,
+ "step": 68
+ },
+ {
+ "epoch": 0.26953125,
+ "grad_norm": 1.163717423684938,
+ "learning_rate": 1e-05,
+ "loss": 0.2147,
+ "step": 69
+ },
+ {
+ "epoch": 0.2734375,
+ "grad_norm": 0.8134427746893115,
+ "learning_rate": 1e-05,
+ "loss": 0.2342,
+ "step": 70
+ },
+ {
+ "epoch": 0.27734375,
+ "grad_norm": 1.4269332848478926,
+ "learning_rate": 1e-05,
+ "loss": 0.1919,
+ "step": 71
+ },
+ {
+ "epoch": 0.28125,
+ "grad_norm": 0.8200789264174901,
+ "learning_rate": 1e-05,
+ "loss": 0.2175,
+ "step": 72
+ },
+ {
+ "epoch": 0.28125,
+ "eval_dev_acc": 0.53125,
+ "eval_dev_token": 4859.7421875,
+ "eval_runtime": 166.6197,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 72
+ },
+ {
+ "epoch": 0.28515625,
+ "grad_norm": 1.007316679088458,
+ "learning_rate": 1e-05,
+ "loss": 0.3108,
+ "step": 73
+ },
+ {
+ "epoch": 0.2890625,
+ "grad_norm": 0.6637709768510952,
+ "learning_rate": 1e-05,
+ "loss": 0.1794,
+ "step": 74
+ },
+ {
+ "epoch": 0.29296875,
+ "grad_norm": 1.0144512803754202,
+ "learning_rate": 1e-05,
+ "loss": 0.1905,
+ "step": 75
+ },
+ {
+ "epoch": 0.296875,
+ "grad_norm": 1.2499777112248354,
+ "learning_rate": 1e-05,
+ "loss": 0.2014,
+ "step": 76
+ },
+ {
+ "epoch": 0.30078125,
+ "grad_norm": 1.0642239482819718,
+ "learning_rate": 1e-05,
+ "loss": 0.1648,
+ "step": 77
+ },
+ {
+ "epoch": 0.3046875,
+ "grad_norm": 0.8739614674360524,
+ "learning_rate": 1e-05,
+ "loss": 0.1537,
+ "step": 78
+ },
+ {
+ "epoch": 0.30859375,
+ "grad_norm": 0.5320613340314281,
+ "learning_rate": 1e-05,
+ "loss": 0.2128,
+ "step": 79
+ },
+ {
+ "epoch": 0.3125,
+ "grad_norm": 1.2802208673828028,
+ "learning_rate": 1e-05,
+ "loss": 0.1939,
+ "step": 80
+ },
+ {
+ "epoch": 0.3125,
+ "eval_dev_acc": 0.4609375,
+ "eval_dev_token": 5065.421875,
+ "eval_runtime": 168.4523,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 80
+ },
+ {
+ "epoch": 0.31640625,
+ "grad_norm": 1.1564057868614226,
+ "learning_rate": 1e-05,
+ "loss": 0.2215,
+ "step": 81
+ },
+ {
+ "epoch": 0.3203125,
+ "grad_norm": 0.7104999594850884,
+ "learning_rate": 1e-05,
+ "loss": 0.1224,
+ "step": 82
+ },
+ {
+ "epoch": 0.32421875,
+ "grad_norm": 0.6466657594813067,
+ "learning_rate": 1e-05,
+ "loss": 0.145,
+ "step": 83
+ },
+ {
+ "epoch": 0.328125,
+ "grad_norm": 1.3499118701284736,
+ "learning_rate": 1e-05,
+ "loss": 0.1963,
+ "step": 84
+ },
+ {
+ "epoch": 0.33203125,
+ "grad_norm": 0.6363338361760021,
+ "learning_rate": 1e-05,
+ "loss": 0.1781,
+ "step": 85
+ },
+ {
+ "epoch": 0.3359375,
+ "grad_norm": 0.8807906150832371,
+ "learning_rate": 1e-05,
+ "loss": 0.1426,
+ "step": 86
+ },
+ {
+ "epoch": 0.33984375,
+ "grad_norm": 0.7466707582875238,
+ "learning_rate": 1e-05,
+ "loss": 0.1629,
+ "step": 87
+ },
+ {
+ "epoch": 0.34375,
+ "grad_norm": 0.7773292125565866,
+ "learning_rate": 1e-05,
+ "loss": 0.181,
+ "step": 88
+ },
+ {
+ "epoch": 0.34375,
+ "eval_dev_acc": 0.4609375,
+ "eval_dev_token": 5092.8984375,
+ "eval_runtime": 168.9275,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 88
+ },
+ {
+ "epoch": 0.34765625,
+ "grad_norm": 0.9798290139606278,
+ "learning_rate": 1e-05,
+ "loss": 0.1725,
+ "step": 89
+ },
+ {
+ "epoch": 0.3515625,
+ "grad_norm": 1.2761428002675261,
+ "learning_rate": 1e-05,
+ "loss": 0.175,
+ "step": 90
+ },
+ {
+ "epoch": 0.35546875,
+ "grad_norm": 0.5042091805859357,
+ "learning_rate": 1e-05,
+ "loss": 0.218,
+ "step": 91
+ },
+ {
+ "epoch": 0.359375,
+ "grad_norm": 1.017358230975041,
+ "learning_rate": 1e-05,
+ "loss": 0.2502,
+ "step": 92
+ },
+ {
+ "epoch": 0.36328125,
+ "grad_norm": 0.7366049175316091,
+ "learning_rate": 1e-05,
+ "loss": 0.1656,
+ "step": 93
+ },
+ {
+ "epoch": 0.3671875,
+ "grad_norm": 0.9422427666318486,
+ "learning_rate": 1e-05,
+ "loss": 0.1455,
+ "step": 94
+ },
+ {
+ "epoch": 0.37109375,
+ "grad_norm": 0.7689775552730859,
+ "learning_rate": 1e-05,
+ "loss": 0.1485,
+ "step": 95
+ },
+ {
+ "epoch": 0.375,
+ "grad_norm": 0.9090457524355386,
+ "learning_rate": 1e-05,
+ "loss": 0.1411,
+ "step": 96
+ },
+ {
+ "epoch": 0.375,
+ "eval_dev_acc": 0.453125,
+ "eval_dev_token": 4948.8359375,
+ "eval_runtime": 165.5377,
+ "eval_samples_per_second": 0.097,
+ "eval_steps_per_second": 0.006,
+ "step": 96
+ },
+ {
+ "epoch": 0.37890625,
+ "grad_norm": 0.7235724828873173,
+ "learning_rate": 1e-05,
+ "loss": 0.2193,
+ "step": 97
+ },
+ {
+ "epoch": 0.3828125,
+ "grad_norm": 0.7200445685294068,
+ "learning_rate": 1e-05,
+ "loss": 0.1985,
+ "step": 98
+ },
+ {
+ "epoch": 0.38671875,
+ "grad_norm": 0.6060156821220763,
+ "learning_rate": 1e-05,
+ "loss": 0.2096,
+ "step": 99
+ },
+ {
+ "epoch": 0.390625,
+ "grad_norm": 0.7114968462244617,
+ "learning_rate": 1e-05,
+ "loss": 0.1928,
+ "step": 100
+ },
+ {
+ "epoch": 0.39453125,
+ "grad_norm": 0.6397518359548336,
+ "learning_rate": 1e-05,
+ "loss": 0.2165,
+ "step": 101
+ },
+ {
+ "epoch": 0.3984375,
+ "grad_norm": 0.7027126137819094,
+ "learning_rate": 1e-05,
+ "loss": 0.2263,
+ "step": 102
+ },
+ {
+ "epoch": 0.40234375,
+ "grad_norm": 0.8648981933002193,
+ "learning_rate": 1e-05,
+ "loss": 0.2874,
+ "step": 103
+ },
+ {
+ "epoch": 0.40625,
+ "grad_norm": 0.9742992968412495,
+ "learning_rate": 1e-05,
+ "loss": 0.1755,
+ "step": 104
+ },
+ {
+ "epoch": 0.40625,
+ "eval_dev_acc": 0.3515625,
+ "eval_dev_token": 5303.1796875,
+ "eval_runtime": 173.9477,
+ "eval_samples_per_second": 0.092,
+ "eval_steps_per_second": 0.006,
+ "step": 104
+ },
+ {
+ "epoch": 0.41015625,
+ "grad_norm": 0.6358933759276069,
+ "learning_rate": 1e-05,
+ "loss": 0.1907,
+ "step": 105
+ },
+ {
+ "epoch": 0.4140625,
+ "grad_norm": 0.7859972506268991,
+ "learning_rate": 1e-05,
+ "loss": 0.1731,
+ "step": 106
+ },
+ {
+ "epoch": 0.41796875,
+ "grad_norm": 0.6429885607052577,
+ "learning_rate": 1e-05,
+ "loss": 0.187,
+ "step": 107
+ },
+ {
+ "epoch": 0.421875,
+ "grad_norm": 0.6314004528855494,
+ "learning_rate": 1e-05,
+ "loss": 0.2185,
+ "step": 108
+ },
+ {
+ "epoch": 0.42578125,
+ "grad_norm": 0.8243656111706104,
+ "learning_rate": 1e-05,
+ "loss": 0.1384,
+ "step": 109
+ },
+ {
+ "epoch": 0.4296875,
+ "grad_norm": 0.7310074535827911,
+ "learning_rate": 1e-05,
+ "loss": 0.1724,
+ "step": 110
+ },
+ {
+ "epoch": 0.43359375,
+ "grad_norm": 1.8710293554497974,
+ "learning_rate": 1e-05,
+ "loss": 0.273,
+ "step": 111
+ },
+ {
+ "epoch": 0.4375,
+ "grad_norm": 1.3308164398688347,
+ "learning_rate": 1e-05,
+ "loss": 0.2852,
+ "step": 112
+ },
+ {
+ "epoch": 0.4375,
+ "eval_dev_acc": 0.296875,
+ "eval_dev_token": 5770.9375,
+ "eval_runtime": 175.5918,
+ "eval_samples_per_second": 0.091,
+ "eval_steps_per_second": 0.006,
+ "step": 112
+ },
+ {
+ "epoch": 0.44140625,
+ "grad_norm": 0.4499041384963393,
+ "learning_rate": 1e-05,
+ "loss": 0.1845,
+ "step": 113
+ },
+ {
+ "epoch": 0.4453125,
+ "grad_norm": 0.5818915994231291,
+ "learning_rate": 1e-05,
+ "loss": 0.2709,
+ "step": 114
+ },
+ {
+ "epoch": 0.44921875,
+ "grad_norm": 0.6130904000526848,
+ "learning_rate": 1e-05,
+ "loss": 0.231,
+ "step": 115
+ },
+ {
+ "epoch": 0.453125,
+ "grad_norm": 0.7266034880537791,
+ "learning_rate": 1e-05,
+ "loss": 0.1555,
+ "step": 116
+ },
+ {
+ "epoch": 0.45703125,
+ "grad_norm": 0.425032745279421,
+ "learning_rate": 1e-05,
+ "loss": 0.1733,
+ "step": 117
+ },
+ {
+ "epoch": 0.4609375,
+ "grad_norm": 0.41408811254876093,
+ "learning_rate": 1e-05,
+ "loss": 0.1793,
+ "step": 118
+ },
+ {
+ "epoch": 0.46484375,
+ "grad_norm": 0.8433491024471641,
+ "learning_rate": 1e-05,
+ "loss": 0.2335,
+ "step": 119
+ },
+ {
+ "epoch": 0.46875,
+ "grad_norm": 0.5585183306922875,
+ "learning_rate": 1e-05,
+ "loss": 0.2515,
+ "step": 120
+ },
+ {
+ "epoch": 0.46875,
+ "eval_dev_acc": 0.4724409580230713,
+ "eval_dev_token": 4777.55126953125,
+ "eval_runtime": 165.1485,
+ "eval_samples_per_second": 0.097,
+ "eval_steps_per_second": 0.006,
+ "step": 120
+ },
+ {
+ "epoch": 0.47265625,
+ "grad_norm": 0.9520218462259554,
+ "learning_rate": 1e-05,
+ "loss": 0.2613,
+ "step": 121
+ },
+ {
+ "epoch": 0.4765625,
+ "grad_norm": 0.4858585527334522,
+ "learning_rate": 1e-05,
+ "loss": 0.2379,
+ "step": 122
+ },
+ {
+ "epoch": 0.48046875,
+ "grad_norm": 0.5772160567620949,
+ "learning_rate": 1e-05,
+ "loss": 0.241,
+ "step": 123
+ },
+ {
+ "epoch": 0.484375,
+ "grad_norm": 0.731954162407159,
+ "learning_rate": 1e-05,
+ "loss": 0.2482,
+ "step": 124
+ },
+ {
+ "epoch": 0.48828125,
+ "grad_norm": 0.49226621710163243,
+ "learning_rate": 1e-05,
+ "loss": 0.2333,
+ "step": 125
+ },
+ {
+ "epoch": 0.4921875,
+ "grad_norm": 0.43779404197089106,
+ "learning_rate": 1e-05,
+ "loss": 0.185,
+ "step": 126
+ },
+ {
+ "epoch": 0.49609375,
+ "grad_norm": 0.6856986141306837,
+ "learning_rate": 1e-05,
+ "loss": 0.1943,
+ "step": 127
+ },
+ {
+ "epoch": 0.5,
+ "grad_norm": 0.6558122415773976,
+ "learning_rate": 1e-05,
+ "loss": 0.2185,
+ "step": 128
+ },
+ {
+ "epoch": 0.5,
+ "eval_dev_acc": 0.4765625,
+ "eval_dev_token": 4368.859375,
+ "eval_runtime": 161.9718,
+ "eval_samples_per_second": 0.099,
+ "eval_steps_per_second": 0.006,
+ "step": 128
+ },
+ {
+ "epoch": 0.50390625,
+ "grad_norm": 0.4099906022533745,
+ "learning_rate": 1e-05,
+ "loss": 0.2113,
+ "step": 129
+ },
+ {
+ "epoch": 0.5078125,
+ "grad_norm": 0.49752415105495956,
+ "learning_rate": 1e-05,
+ "loss": 0.2217,
+ "step": 130
+ },
+ {
+ "epoch": 0.51171875,
+ "grad_norm": 0.8912790018467623,
+ "learning_rate": 1e-05,
+ "loss": 0.3422,
+ "step": 131
+ },
+ {
+ "epoch": 0.515625,
+ "grad_norm": 0.6764829647253893,
+ "learning_rate": 1e-05,
+ "loss": 0.2055,
+ "step": 132
+ },
+ {
+ "epoch": 0.51953125,
+ "grad_norm": 0.8399641090693946,
+ "learning_rate": 1e-05,
+ "loss": 0.2087,
+ "step": 133
+ },
+ {
+ "epoch": 0.5234375,
+ "grad_norm": 0.4594160953603203,
+ "learning_rate": 1e-05,
+ "loss": 0.2093,
+ "step": 134
+ },
+ {
+ "epoch": 0.52734375,
+ "grad_norm": 0.7432138703184232,
+ "learning_rate": 1e-05,
+ "loss": 0.1969,
+ "step": 135
+ },
+ {
+ "epoch": 0.53125,
+ "grad_norm": 0.4584467325236011,
+ "learning_rate": 1e-05,
+ "loss": 0.1806,
+ "step": 136
+ },
+ {
+ "epoch": 0.53125,
+ "eval_dev_acc": 0.4765625,
+ "eval_dev_token": 4603.53125,
+ "eval_runtime": 164.3452,
+ "eval_samples_per_second": 0.097,
+ "eval_steps_per_second": 0.006,
+ "step": 136
+ },
+ {
+ "epoch": 0.53515625,
+ "grad_norm": 0.6458588312529675,
+ "learning_rate": 1e-05,
+ "loss": 0.2087,
+ "step": 137
+ },
+ {
+ "epoch": 0.5390625,
+ "grad_norm": 0.7370624067340756,
+ "learning_rate": 1e-05,
+ "loss": 0.1854,
+ "step": 138
+ },
+ {
+ "epoch": 0.54296875,
+ "grad_norm": 0.7141604462138248,
+ "learning_rate": 1e-05,
+ "loss": 0.2535,
+ "step": 139
+ },
+ {
+ "epoch": 0.546875,
+ "grad_norm": 0.8212814690178184,
+ "learning_rate": 1e-05,
+ "loss": 0.1668,
+ "step": 140
+ },
+ {
+ "epoch": 0.55078125,
+ "grad_norm": 0.5799692948316157,
+ "learning_rate": 1e-05,
+ "loss": 0.2375,
+ "step": 141
+ },
+ {
+ "epoch": 0.5546875,
+ "grad_norm": 0.5333639624775814,
+ "learning_rate": 1e-05,
+ "loss": 0.1737,
+ "step": 142
+ },
+ {
+ "epoch": 0.55859375,
+ "grad_norm": 0.4076841439195106,
+ "learning_rate": 1e-05,
+ "loss": 0.1627,
+ "step": 143
+ },
+ {
+ "epoch": 0.5625,
+ "grad_norm": 0.4118175478201596,
+ "learning_rate": 1e-05,
+ "loss": 0.1576,
+ "step": 144
+ },
+ {
+ "epoch": 0.5625,
+ "eval_dev_acc": 0.5234375,
+ "eval_dev_token": 5125.0703125,
+ "eval_runtime": 168.804,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 144
+ },
+ {
+ "epoch": 0.56640625,
+ "grad_norm": 0.5988381099011506,
+ "learning_rate": 1e-05,
+ "loss": 0.1656,
+ "step": 145
+ },
+ {
+ "epoch": 0.5703125,
+ "grad_norm": 0.9328153493065982,
+ "learning_rate": 1e-05,
+ "loss": 0.1788,
+ "step": 146
+ },
+ {
+ "epoch": 0.57421875,
+ "grad_norm": 0.8013592126955402,
+ "learning_rate": 1e-05,
+ "loss": 0.2009,
+ "step": 147
+ },
+ {
+ "epoch": 0.578125,
+ "grad_norm": 0.4868159061171701,
+ "learning_rate": 1e-05,
+ "loss": 0.217,
+ "step": 148
+ },
+ {
+ "epoch": 0.58203125,
+ "grad_norm": 0.6758953539585006,
+ "learning_rate": 1e-05,
+ "loss": 0.2344,
+ "step": 149
+ },
+ {
+ "epoch": 0.5859375,
+ "grad_norm": 0.8609458752061137,
+ "learning_rate": 1e-05,
+ "loss": 0.1939,
+ "step": 150
+ },
+ {
+ "epoch": 0.58984375,
+ "grad_norm": 0.45913847739444186,
+ "learning_rate": 1e-05,
+ "loss": 0.1691,
+ "step": 151
+ },
+ {
+ "epoch": 0.59375,
+ "grad_norm": 0.8064977044716175,
+ "learning_rate": 1e-05,
+ "loss": 0.1949,
+ "step": 152
+ },
+ {
+ "epoch": 0.59375,
+ "eval_dev_acc": 0.40625,
+ "eval_dev_token": 4508.484375,
+ "eval_runtime": 160.3398,
+ "eval_samples_per_second": 0.1,
+ "eval_steps_per_second": 0.006,
+ "step": 152
+ },
+ {
+ "epoch": 0.59765625,
+ "grad_norm": 0.9904042315049291,
+ "learning_rate": 1e-05,
+ "loss": 0.2253,
+ "step": 153
+ },
+ {
+ "epoch": 0.6015625,
+ "grad_norm": 0.5524318414569037,
+ "learning_rate": 1e-05,
+ "loss": 0.2535,
+ "step": 154
+ },
+ {
+ "epoch": 0.60546875,
+ "grad_norm": 0.418186463867415,
+ "learning_rate": 1e-05,
+ "loss": 0.1884,
+ "step": 155
+ },
+ {
+ "epoch": 0.609375,
+ "grad_norm": 0.6311027708045368,
+ "learning_rate": 1e-05,
+ "loss": 0.2408,
+ "step": 156
+ },
+ {
+ "epoch": 0.61328125,
+ "grad_norm": 0.4550696199781805,
+ "learning_rate": 1e-05,
+ "loss": 0.173,
+ "step": 157
+ },
+ {
+ "epoch": 0.6171875,
+ "grad_norm": 0.4596598696608727,
+ "learning_rate": 1e-05,
+ "loss": 0.1592,
+ "step": 158
+ },
+ {
+ "epoch": 0.62109375,
+ "grad_norm": 0.5573937890044522,
+ "learning_rate": 1e-05,
+ "loss": 0.1748,
+ "step": 159
+ },
+ {
+ "epoch": 0.625,
+ "grad_norm": 1.0862165315332113,
+ "learning_rate": 1e-05,
+ "loss": 0.2369,
+ "step": 160
+ },
+ {
+ "epoch": 0.625,
+ "eval_dev_acc": 0.4296875,
+ "eval_dev_token": 4869.8828125,
+ "eval_runtime": 167.2914,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 160
+ },
+ {
+ "epoch": 0.62890625,
+ "grad_norm": 0.46051384064237827,
+ "learning_rate": 1e-05,
+ "loss": 0.2086,
+ "step": 161
+ },
+ {
+ "epoch": 0.6328125,
+ "grad_norm": 0.7125397532570018,
+ "learning_rate": 1e-05,
+ "loss": 0.2212,
+ "step": 162
+ },
+ {
+ "epoch": 0.63671875,
+ "grad_norm": 0.564820498711706,
+ "learning_rate": 1e-05,
+ "loss": 0.3019,
+ "step": 163
+ },
+ {
+ "epoch": 0.640625,
+ "grad_norm": 0.5218656690400247,
+ "learning_rate": 1e-05,
+ "loss": 0.1324,
+ "step": 164
+ },
+ {
+ "epoch": 0.64453125,
+ "grad_norm": 0.4994022980399308,
+ "learning_rate": 1e-05,
+ "loss": 0.1438,
+ "step": 165
+ },
+ {
+ "epoch": 0.6484375,
+ "grad_norm": 0.7016809849517179,
+ "learning_rate": 1e-05,
+ "loss": 0.2791,
+ "step": 166
+ },
+ {
+ "epoch": 0.65234375,
+ "grad_norm": 0.597463304680723,
+ "learning_rate": 1e-05,
+ "loss": 0.1749,
+ "step": 167
+ },
+ {
+ "epoch": 0.65625,
+ "grad_norm": 0.5536855781273838,
+ "learning_rate": 1e-05,
+ "loss": 0.2391,
+ "step": 168
+ },
+ {
+ "epoch": 0.65625,
+ "eval_dev_acc": 0.3203125,
+ "eval_dev_token": 5451.3671875,
+ "eval_runtime": 172.7574,
+ "eval_samples_per_second": 0.093,
+ "eval_steps_per_second": 0.006,
+ "step": 168
+ },
+ {
+ "epoch": 0.66015625,
+ "grad_norm": 0.9103508979108635,
+ "learning_rate": 1e-05,
+ "loss": 0.2613,
+ "step": 169
+ },
+ {
+ "epoch": 0.6640625,
+ "grad_norm": 0.4928845564740678,
+ "learning_rate": 1e-05,
+ "loss": 0.215,
+ "step": 170
+ },
+ {
+ "epoch": 0.66796875,
+ "grad_norm": 0.8690405638773996,
+ "learning_rate": 1e-05,
+ "loss": 0.2355,
+ "step": 171
+ },
+ {
+ "epoch": 0.671875,
+ "grad_norm": 0.5511255682147113,
+ "learning_rate": 1e-05,
+ "loss": 0.2406,
+ "step": 172
+ },
+ {
+ "epoch": 0.67578125,
+ "grad_norm": 0.44346107905460214,
+ "learning_rate": 1e-05,
+ "loss": 0.1867,
+ "step": 173
+ },
+ {
+ "epoch": 0.6796875,
+ "grad_norm": 0.4019557678019079,
+ "learning_rate": 1e-05,
+ "loss": 0.1488,
+ "step": 174
+ },
+ {
+ "epoch": 0.68359375,
+ "grad_norm": 0.4139658009208469,
+ "learning_rate": 1e-05,
+ "loss": 0.1666,
+ "step": 175
+ },
+ {
+ "epoch": 0.6875,
+ "grad_norm": 0.45363011716779816,
+ "learning_rate": 1e-05,
+ "loss": 0.2006,
+ "step": 176
+ },
+ {
+ "epoch": 0.6875,
+ "eval_dev_acc": 0.3385826647281647,
+ "eval_dev_token": 4971.81884765625,
+ "eval_runtime": 166.9967,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 176
+ },
+ {
+ "epoch": 0.69140625,
+ "grad_norm": 0.46674698673244774,
+ "learning_rate": 1e-05,
+ "loss": 0.1788,
+ "step": 177
+ },
+ {
+ "epoch": 0.6953125,
+ "grad_norm": 0.5396579551057291,
+ "learning_rate": 1e-05,
+ "loss": 0.1857,
+ "step": 178
+ },
+ {
+ "epoch": 0.69921875,
+ "grad_norm": 0.42472472699800484,
+ "learning_rate": 1e-05,
+ "loss": 0.1707,
+ "step": 179
+ },
+ {
+ "epoch": 0.703125,
+ "grad_norm": 0.4208916108378261,
+ "learning_rate": 1e-05,
+ "loss": 0.1736,
+ "step": 180
+ },
+ {
+ "epoch": 0.70703125,
+ "grad_norm": 0.5161632347165661,
+ "learning_rate": 1e-05,
+ "loss": 0.2074,
+ "step": 181
+ },
+ {
+ "epoch": 0.7109375,
+ "grad_norm": 0.4851147968745633,
+ "learning_rate": 1e-05,
+ "loss": 0.2183,
+ "step": 182
+ },
+ {
+ "epoch": 0.71484375,
+ "grad_norm": 0.5286494967968609,
+ "learning_rate": 1e-05,
+ "loss": 0.1877,
+ "step": 183
+ },
+ {
+ "epoch": 0.71875,
+ "grad_norm": 0.5399316089624949,
+ "learning_rate": 1e-05,
+ "loss": 0.209,
+ "step": 184
+ },
+ {
+ "epoch": 0.71875,
+ "eval_dev_acc": 0.3984375,
+ "eval_dev_token": 4787.84375,
+ "eval_runtime": 166.2574,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 184
+ },
+ {
+ "epoch": 0.72265625,
+ "grad_norm": 0.7188938790166789,
+ "learning_rate": 1e-05,
+ "loss": 0.2065,
+ "step": 185
+ },
+ {
+ "epoch": 0.7265625,
+ "grad_norm": 0.5843767003652576,
+ "learning_rate": 1e-05,
+ "loss": 0.2356,
+ "step": 186
+ },
+ {
+ "epoch": 0.73046875,
+ "grad_norm": 0.4904003204685076,
+ "learning_rate": 1e-05,
+ "loss": 0.201,
+ "step": 187
+ },
+ {
+ "epoch": 0.734375,
+ "grad_norm": 0.485266158116283,
+ "learning_rate": 1e-05,
+ "loss": 0.1869,
+ "step": 188
+ },
+ {
+ "epoch": 0.73828125,
+ "grad_norm": 0.5242977395658632,
+ "learning_rate": 1e-05,
+ "loss": 0.2122,
+ "step": 189
+ },
+ {
+ "epoch": 0.7421875,
+ "grad_norm": 0.5417537780138298,
+ "learning_rate": 1e-05,
+ "loss": 0.2799,
+ "step": 190
+ },
+ {
+ "epoch": 0.74609375,
+ "grad_norm": 0.48949419193338123,
+ "learning_rate": 1e-05,
+ "loss": 0.212,
+ "step": 191
+ },
+ {
+ "epoch": 0.75,
+ "grad_norm": 0.48118963817889204,
+ "learning_rate": 1e-05,
+ "loss": 0.2195,
+ "step": 192
+ },
+ {
+ "epoch": 0.75,
+ "eval_dev_acc": 0.453125,
+ "eval_dev_token": 5056.7421875,
+ "eval_runtime": 168.273,
+ "eval_samples_per_second": 0.095,
+ "eval_steps_per_second": 0.006,
+ "step": 192
+ },
+ {
+ "epoch": 0.75390625,
+ "grad_norm": 0.6844465372064547,
+ "learning_rate": 1e-05,
+ "loss": 0.1645,
+ "step": 193
+ },
+ {
+ "epoch": 0.7578125,
+ "grad_norm": 0.49653100043792153,
+ "learning_rate": 1e-05,
+ "loss": 0.2023,
+ "step": 194
+ },
+ {
+ "epoch": 0.76171875,
+ "grad_norm": 0.5539027026151374,
+ "learning_rate": 1e-05,
+ "loss": 0.2348,
+ "step": 195
+ },
+ {
+ "epoch": 0.765625,
+ "grad_norm": 0.5003270709383194,
+ "learning_rate": 1e-05,
+ "loss": 0.2545,
+ "step": 196
+ },
+ {
+ "epoch": 0.76953125,
+ "grad_norm": 0.5666703162116131,
+ "learning_rate": 1e-05,
+ "loss": 0.2739,
+ "step": 197
+ },
+ {
+ "epoch": 0.7734375,
+ "grad_norm": 0.5281121627729704,
+ "learning_rate": 1e-05,
+ "loss": 0.1927,
+ "step": 198
+ },
+ {
+ "epoch": 0.77734375,
+ "grad_norm": 0.4691586351966124,
+ "learning_rate": 1e-05,
+ "loss": 0.2101,
+ "step": 199
+ },
+ {
+ "epoch": 0.78125,
+ "grad_norm": 0.43348894899907703,
+ "learning_rate": 1e-05,
+ "loss": 0.1636,
+ "step": 200
+ },
+ {
+ "epoch": 0.78125,
+ "eval_dev_acc": 0.4296875,
+ "eval_dev_token": 5082.265625,
+ "eval_runtime": 169.7777,
+ "eval_samples_per_second": 0.094,
+ "eval_steps_per_second": 0.006,
+ "step": 200
+ },
+ {
+ "epoch": 0.78515625,
+ "grad_norm": 0.4995118305726593,
+ "learning_rate": 1e-05,
+ "loss": 0.2149,
+ "step": 201
+ },
+ {
+ "epoch": 0.7890625,
+ "grad_norm": 0.3958721084761467,
+ "learning_rate": 1e-05,
+ "loss": 0.1732,
+ "step": 202
+ },
+ {
+ "epoch": 0.79296875,
+ "grad_norm": 0.4883258744044862,
+ "learning_rate": 1e-05,
+ "loss": 0.219,
+ "step": 203
+ },
+ {
+ "epoch": 0.796875,
+ "grad_norm": 0.45472746506302575,
+ "learning_rate": 1e-05,
+ "loss": 0.2187,
+ "step": 204
+ },
+ {
+ "epoch": 0.80078125,
+ "grad_norm": 0.45006095039367805,
+ "learning_rate": 1e-05,
+ "loss": 0.1924,
+ "step": 205
+ },
+ {
+ "epoch": 0.8046875,
+ "grad_norm": 0.4127537232406072,
+ "learning_rate": 1e-05,
+ "loss": 0.1736,
+ "step": 206
+ },
+ {
+ "epoch": 0.80859375,
+ "grad_norm": 0.4669392415601201,
+ "learning_rate": 1e-05,
+ "loss": 0.1847,
+ "step": 207
+ },
+ {
+ "epoch": 0.8125,
+ "grad_norm": 0.41469363114093816,
+ "learning_rate": 1e-05,
+ "loss": 0.1556,
+ "step": 208
+ },
+ {
+ "epoch": 0.8125,
+ "eval_dev_acc": 0.4609375,
+ "eval_dev_token": 4918.28125,
+ "eval_runtime": 166.5675,
+ "eval_samples_per_second": 0.096,
+ "eval_steps_per_second": 0.006,
+ "step": 208
+ },
+ {
+ "epoch": 0.81640625,
+ "grad_norm": 0.4433576280938302,
+ "learning_rate": 1e-05,
+ "loss": 0.1934,
+ "step": 209
+ },
+ {
+ "epoch": 0.8203125,
+ "grad_norm": 0.4355305023653351,
+ "learning_rate": 1e-05,
+ "loss": 0.1742,
+ "step": 210
+ },
+ {
+ "epoch": 0.82421875,
+ "grad_norm": 0.44938618579632195,
+ "learning_rate": 1e-05,
+ "loss": 0.1902,
+ "step": 211
+ },
+ {
+ "epoch": 0.828125,
+ "grad_norm": 0.5351771463999816,
+ "learning_rate": 1e-05,
+ "loss": 0.2148,
+ "step": 212
+ },
+ {
+ "epoch": 0.83203125,
+ "grad_norm": 0.5839350362138708,
+ "learning_rate": 1e-05,
+ "loss": 0.275,
+ "step": 213
+ },
+ {
+ "epoch": 0.8359375,
+ "grad_norm": 0.6964110745693202,
+ "learning_rate": 1e-05,
+ "loss": 0.2179,
+ "step": 214
+ },
+ {
+ "epoch": 0.83984375,
+ "grad_norm": 0.4337830660702992,
+ "learning_rate": 1e-05,
+ "loss": 0.2152,
+ "step": 215
+ },
+ {
+ "epoch": 0.84375,
+ "grad_norm": 0.46223312750006246,
+ "learning_rate": 1e-05,
+ "loss": 0.2405,
+ "step": 216
+ },
+ {
+ "epoch": 0.84375,
+ "eval_dev_acc": 0.3828125,
+ "eval_dev_token": 5435.3046875,
+ "eval_runtime": 173.8173,
+ "eval_samples_per_second": 0.092,
+ "eval_steps_per_second": 0.006,
+ "step": 216
+ },
+ {
+ "epoch": 0.84765625,
+ "grad_norm": 0.5541820526606585,
+ "learning_rate": 1e-05,
+ "loss": 0.2751,
+ "step": 217
+ },
+ {
+ "epoch": 0.8515625,
+ "grad_norm": 0.4662570041545537,
+ "learning_rate": 1e-05,
+ "loss": 0.2142,
+ "step": 218
+ },
+ {
+ "epoch": 0.85546875,
+ "grad_norm": 0.7737037625157579,
+ "learning_rate": 1e-05,
+ "loss": 0.2397,
+ "step": 219
+ },
+ {
+ "epoch": 0.859375,
+ "grad_norm": 0.5572195616624243,
+ "learning_rate": 1e-05,
+ "loss": 0.2421,
+ "step": 220
+ },
+ {
+ "epoch": 0.86328125,
+ "grad_norm": 0.5088509372691609,
+ "learning_rate": 1e-05,
+ "loss": 0.1875,
+ "step": 221
+ },
+ {
+ "epoch": 0.8671875,
+ "grad_norm": 0.508699458613964,
+ "learning_rate": 1e-05,
+ "loss": 0.1927,
+ "step": 222
+ },
+ {
+ "epoch": 0.87109375,
+ "grad_norm": 0.5150091482241945,
+ "learning_rate": 1e-05,
+ "loss": 0.2536,
+ "step": 223
+ },
+ {
+ "epoch": 0.875,
+ "grad_norm": 0.5203627078659161,
+ "learning_rate": 1e-05,
+ "loss": 0.2571,
+ "step": 224
+ },
+ {
+ "epoch": 0.875,
+ "eval_dev_acc": 0.3515625,
+ "eval_dev_token": 5227.0859375,
+ "eval_runtime": 170.2355,
+ "eval_samples_per_second": 0.094,
+ "eval_steps_per_second": 0.006,
+ "step": 224
+ },
+ {
+ "epoch": 0.87890625,
+ "grad_norm": 0.5279392216696818,
+ "learning_rate": 1e-05,
+ "loss": 0.2278,
+ "step": 225
+ },
+ {
+ "epoch": 0.8828125,
+ "grad_norm": 0.45017131620724865,
+ "learning_rate": 1e-05,
+ "loss": 0.2132,
+ "step": 226
+ },
+ {
+ "epoch": 0.88671875,
+ "grad_norm": 0.48915211275869575,
+ "learning_rate": 1e-05,
+ "loss": 0.2627,
+ "step": 227
+ },
+ {
+ "epoch": 0.890625,
+ "grad_norm": 0.4606618945421734,
+ "learning_rate": 1e-05,
+ "loss": 0.1528,
+ "step": 228
+ },
+ {
+ "epoch": 0.89453125,
+ "grad_norm": 0.5072593200666395,
+ "learning_rate": 1e-05,
+ "loss": 0.2148,
+ "step": 229
+ },
+ {
+ "epoch": 0.8984375,
+ "grad_norm": 0.5513069869439534,
+ "learning_rate": 1e-05,
+ "loss": 0.2319,
+ "step": 230
+ },
+ {
+ "epoch": 0.90234375,
+ "grad_norm": 0.4917083878550277,
+ "learning_rate": 1e-05,
+ "loss": 0.1989,
+ "step": 231
+ },
+ {
+ "epoch": 0.90625,
+ "grad_norm": 0.4027028580105545,
+ "learning_rate": 1e-05,
+ "loss": 0.1398,
+ "step": 232
+ },
+ {
+ "epoch": 0.90625,
+ "eval_dev_acc": 0.3779527544975281,
+ "eval_dev_token": 5651.6455078125,
+ "eval_runtime": 175.5543,
+ "eval_samples_per_second": 0.091,
+ "eval_steps_per_second": 0.006,
+ "step": 232
+ },
+ {
+ "epoch": 0.91015625,
+ "grad_norm": 0.4098440727615931,
+ "learning_rate": 1e-05,
+ "loss": 0.1481,
+ "step": 233
+ },
+ {
+ "epoch": 0.9140625,
+ "grad_norm": 0.4379253949500134,
+ "learning_rate": 1e-05,
+ "loss": 0.172,
+ "step": 234
+ },
+ {
+ "epoch": 0.91796875,
+ "grad_norm": 0.6161974608496972,
+ "learning_rate": 1e-05,
+ "loss": 0.2234,
+ "step": 235
+ },
+ {
+ "epoch": 0.921875,
+ "grad_norm": 0.6431694552333217,
+ "learning_rate": 1e-05,
+ "loss": 0.2928,
+ "step": 236
+ },
+ {
+ "epoch": 0.92578125,
+ "grad_norm": 0.7524837454023333,
+ "learning_rate": 1e-05,
+ "loss": 0.3518,
+ "step": 237
+ },
+ {
+ "epoch": 0.9296875,
+ "grad_norm": 0.5137794157548315,
+ "learning_rate": 1e-05,
+ "loss": 0.2371,
+ "step": 238
+ },
+ {
+ "epoch": 0.93359375,
+ "grad_norm": 0.42726761741926383,
+ "learning_rate": 1e-05,
+ "loss": 0.1349,
+ "step": 239
+ },
+ {
+ "epoch": 0.9375,
+ "grad_norm": 0.50721507122848,
+ "learning_rate": 1e-05,
+ "loss": 0.147,
+ "step": 240
+ },
+ {
+ "epoch": 0.9375,
+ "eval_dev_acc": 0.4375,
+ "eval_dev_token": 5554.34375,
+ "eval_runtime": 173.4206,
+ "eval_samples_per_second": 0.092,
+ "eval_steps_per_second": 0.006,
+ "step": 240
+ },
+ {
+ "epoch": 0.94140625,
+ "grad_norm": 0.5085504060972834,
+ "learning_rate": 1e-05,
+ "loss": 0.2115,
+ "step": 241
+ },
+ {
+ "epoch": 0.9453125,
+ "grad_norm": 0.5245333395138617,
+ "learning_rate": 1e-05,
+ "loss": 0.2203,
+ "step": 242
+ },
+ {
+ "epoch": 0.94921875,
+ "grad_norm": 0.5149241747645703,
+ "learning_rate": 1e-05,
+ "loss": 0.1935,
+ "step": 243
+ },
+ {
+ "epoch": 0.953125,
+ "grad_norm": 0.45199967311107936,
+ "learning_rate": 1e-05,
+ "loss": 0.1875,
+ "step": 244
+ },
+ {
+ "epoch": 0.95703125,
+ "grad_norm": 0.6017279864923942,
+ "learning_rate": 1e-05,
+ "loss": 0.1964,
+ "step": 245
+ },
+ {
+ "epoch": 0.9609375,
+ "grad_norm": 0.541548647166723,
+ "learning_rate": 1e-05,
+ "loss": 0.2029,
+ "step": 246
+ },
+ {
+ "epoch": 0.96484375,
+ "grad_norm": 0.7095706252744872,
+ "learning_rate": 1e-05,
+ "loss": 0.1824,
+ "step": 247
+ },
+ {
+ "epoch": 0.96875,
+ "grad_norm": 0.6630534512223186,
+ "learning_rate": 1e-05,
+ "loss": 0.2346,
+ "step": 248
+ },
+ {
+ "epoch": 0.96875,
+ "eval_dev_acc": 0.5234375,
+ "eval_dev_token": 5464.203125,
+ "eval_runtime": 173.0858,
+ "eval_samples_per_second": 0.092,
+ "eval_steps_per_second": 0.006,
+ "step": 248
+ },
+ {
+ "epoch": 0.97265625,
+ "grad_norm": 0.7470938668923351,
+ "learning_rate": 1e-05,
+ "loss": 0.3028,
+ "step": 249
+ },
+ {
+ "epoch": 0.9765625,
+ "grad_norm": 0.534162369114681,
+ "learning_rate": 1e-05,
+ "loss": 0.243,
+ "step": 250
+ },
+ {
+ "epoch": 0.98046875,
+ "grad_norm": 0.5240149993617814,
+ "learning_rate": 1e-05,
+ "loss": 0.2475,
+ "step": 251
+ },
+ {
+ "epoch": 0.984375,
+ "grad_norm": 0.48058164633897993,
+ "learning_rate": 1e-05,
+ "loss": 0.2234,
+ "step": 252
+ },
+ {
+ "epoch": 0.98828125,
+ "grad_norm": 0.5427424821749397,
+ "learning_rate": 1e-05,
+ "loss": 0.2338,
+ "step": 253
+ },
+ {
+ "epoch": 0.9921875,
+ "grad_norm": 0.5309304323745797,
+ "learning_rate": 1e-05,
+ "loss": 0.2751,
+ "step": 254
+ },
+ {
+ "epoch": 0.99609375,
+ "grad_norm": 0.4961154954055658,
+ "learning_rate": 1e-05,
+ "loss": 0.2329,
+ "step": 255
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 0.519835488758917,
+ "learning_rate": 1e-05,
+ "loss": 0.2182,
+ "step": 256
+ },
+ {
+ "epoch": 1.0,
+ "eval_dev_acc": 0.4453125,
+ "eval_dev_token": 5674.0546875,
+ "eval_runtime": 175.8662,
+ "eval_samples_per_second": 0.091,
+ "eval_steps_per_second": 0.006,
+ "step": 256
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 256,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 9223372036854775807,
+ "save_steps": 64,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 31380919492608.0,
+ "train_batch_size": 8,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/tldr-7b-checkpoint-256/training_args.bin b/tldr-7b-checkpoint-256/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..c23f084720f8fb46f8ff9e8bef5175480d3cce40
--- /dev/null
+++ b/tldr-7b-checkpoint-256/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:293697045c82976ebdb828b71e8c654446c5ec0cc96c6a95e6cd39036cbaa551
+size 8376
diff --git a/tldr-7b-checkpoint-256/zero_to_fp32.py b/tldr-7b-checkpoint-256/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69ecd9acb5a235ffbf927091051106d902b3d39
--- /dev/null
+++ b/tldr-7b-checkpoint-256/zero_to_fp32.py
@@ -0,0 +1,674 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import json
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in files:
+ state_dict = torch.load(f, map_location=device)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ if zero_stage <= 2:
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ elif zero_stage == 3:
+ # if there is more than one param group, there will be multiple flattened tensors - one
+ # flattened tensor per group - for simplicity merge them into a single tensor
+ #
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
+
+ fp32_flat_groups = [
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
+ ]
+
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering Sharded Weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # XXX: memory usage doubles here
+ state_dict[name] = torch.cat(
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None, exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+
+ Returns:
+ - pytorch ``state_dict``
+
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag, exclude_frozen_parameters)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ state_dict_split = split_torch_state_dict_into_shards(state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard = {tensor: state_dict[tensor].contiguous() for tensor in tensors}
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard, output_path)
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)