loubnabnl HF Staff commited on
Commit
4056064
·
verified ·
1 Parent(s): 8b66889

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: HuggingFaceTB/135M-lc-100k-rope-12B
3
+ tags:
4
+ - trl
5
+ - sft
6
+ - generated_from_trainer
7
+ datasets:
8
+ - generator
9
+ model-index:
10
+ - name: smollm2-135M-8k-lc100k-mix1-ep2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/loubnabnl/huggingface/runs/bxdnlpsh)
18
+ # smollm2-135M-8k-lc100k-mix1-ep2
19
+
20
+ This model is a fine-tuned version of [HuggingFaceTB/135M-lc-100k-rope-12B](https://huggingface.co/HuggingFaceTB/135M-lc-100k-rope-12B) on the generator dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 1.8390
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 0.001
42
+ - train_batch_size: 4
43
+ - eval_batch_size: 4
44
+ - seed: 42
45
+ - distributed_type: multi-GPU
46
+ - num_devices: 8
47
+ - gradient_accumulation_steps: 4
48
+ - total_train_batch_size: 128
49
+ - total_eval_batch_size: 32
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: cosine
52
+ - lr_scheduler_warmup_ratio: 0.1
53
+ - num_epochs: 2
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss |
58
+ |:-------------:|:-----:|:----:|:---------------:|
59
+ | 1.2705 | 1.0 | 392 | 1.8649 |
60
+ | 1.1867 | 2.0 | 784 | 1.8390 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.42.3
66
+ - Pytorch 2.1.2
67
+ - Datasets 2.20.0
68
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 173253108695040.0,
4
+ "train_loss": 1.2838877389321521,
5
+ "train_runtime": 2377.8575,
6
+ "train_samples": 456544,
7
+ "train_samples_per_second": 42.195,
8
+ "train_steps_per_second": 0.33
9
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceTB/135M-lc-100k-rope-12B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 576,
12
+ "initializer_range": 0.041666666666666664,
13
+ "intermediate_size": 1536,
14
+ "is_llama_config": true,
15
+ "max_position_embeddings": 8192,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 9,
19
+ "num_hidden_layers": 30,
20
+ "num_key_value_heads": 3,
21
+ "pad_token_id": 2,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_interleaved": false,
25
+ "rope_scaling": null,
26
+ "rope_theta": 100000,
27
+ "tie_word_embeddings": true,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.42.3",
30
+ "use_cache": false,
31
+ "vocab_size": 49152
32
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.42.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99f5839f46e26ab68c01f37588a532970fbbb7cbd29013710429dc111262bdd
3
+ size 269060552
runs/Oct31_07-14-36_ip-26-0-170-143/events.out.tfevents.1730358945.ip-26-0-170-143.2039241.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d6eeebcab588bd48b8d1fa39e2fcda79afa26611ef0b9852118025079ed322
3
+ size 39085
special_tokens_map.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
21
+ "unk_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
144
+ ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
+ "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
+ "model_max_length": 2048,
150
+ "pad_token": "<|im_end|>",
151
+ "tokenizer_class": "GPT2Tokenizer",
152
+ "unk_token": "<|endoftext|>",
153
+ "vocab_size": 49152
154
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 173253108695040.0,
4
+ "train_loss": 1.2838877389321521,
5
+ "train_runtime": 2377.8575,
6
+ "train_samples": 456544,
7
+ "train_samples_per_second": 42.195,
8
+ "train_steps_per_second": 0.33
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 784,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.002551020408163265,
13
+ "grad_norm": 1.138124187948155,
14
+ "learning_rate": 1.2658227848101267e-05,
15
+ "loss": 1.7551,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.012755102040816327,
20
+ "grad_norm": 0.7551830131389933,
21
+ "learning_rate": 6.329113924050633e-05,
22
+ "loss": 1.7707,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.025510204081632654,
27
+ "grad_norm": 1.379764699722761,
28
+ "learning_rate": 0.00012658227848101267,
29
+ "loss": 1.7257,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.03826530612244898,
34
+ "grad_norm": 0.4560275436076655,
35
+ "learning_rate": 0.000189873417721519,
36
+ "loss": 1.6272,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.05102040816326531,
41
+ "grad_norm": 0.3675085939167979,
42
+ "learning_rate": 0.00025316455696202533,
43
+ "loss": 1.5808,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.06377551020408163,
48
+ "grad_norm": 0.20864289230413074,
49
+ "learning_rate": 0.00031645569620253165,
50
+ "loss": 1.5291,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.07653061224489796,
55
+ "grad_norm": 0.18719394627139643,
56
+ "learning_rate": 0.000379746835443038,
57
+ "loss": 1.5338,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.08928571428571429,
62
+ "grad_norm": 0.1717343346810603,
63
+ "learning_rate": 0.0004430379746835443,
64
+ "loss": 1.5007,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.10204081632653061,
69
+ "grad_norm": 0.15610479873752922,
70
+ "learning_rate": 0.0005063291139240507,
71
+ "loss": 1.4652,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.11479591836734694,
76
+ "grad_norm": 0.13214894352385953,
77
+ "learning_rate": 0.000569620253164557,
78
+ "loss": 1.4388,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.12755102040816327,
83
+ "grad_norm": 0.12168972456032588,
84
+ "learning_rate": 0.0006329113924050633,
85
+ "loss": 1.4225,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.14030612244897958,
90
+ "grad_norm": 0.1323077190059568,
91
+ "learning_rate": 0.0006962025316455697,
92
+ "loss": 1.409,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.15306122448979592,
97
+ "grad_norm": 0.37430096940846036,
98
+ "learning_rate": 0.000759493670886076,
99
+ "loss": 1.3951,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.16581632653061223,
104
+ "grad_norm": 0.3814386987087306,
105
+ "learning_rate": 0.0008227848101265824,
106
+ "loss": 1.3972,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.17857142857142858,
111
+ "grad_norm": 0.40216892416078964,
112
+ "learning_rate": 0.0008860759493670886,
113
+ "loss": 1.3864,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.1913265306122449,
118
+ "grad_norm": 0.20332439264842958,
119
+ "learning_rate": 0.0009493670886075949,
120
+ "loss": 1.3861,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.20408163265306123,
125
+ "grad_norm": 0.2924926145777638,
126
+ "learning_rate": 0.0009999950356681913,
127
+ "loss": 1.3891,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.21683673469387754,
132
+ "grad_norm": 0.29723891288444315,
133
+ "learning_rate": 0.000999821294405392,
134
+ "loss": 1.3982,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.22959183673469388,
139
+ "grad_norm": 0.23043075033731064,
140
+ "learning_rate": 0.0009993994351217151,
141
+ "loss": 1.3821,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.2423469387755102,
146
+ "grad_norm": 0.15934631204828656,
147
+ "learning_rate": 0.00099872966723379,
148
+ "loss": 1.3937,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.25510204081632654,
153
+ "grad_norm": 0.19636795516281652,
154
+ "learning_rate": 0.0009978123232234147,
155
+ "loss": 1.3393,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.26785714285714285,
160
+ "grad_norm": 0.3357008076473798,
161
+ "learning_rate": 0.0009966478584725086,
162
+ "loss": 1.3873,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.28061224489795916,
167
+ "grad_norm": 0.2979357511561018,
168
+ "learning_rate": 0.0009952368510370538,
169
+ "loss": 1.3575,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.29336734693877553,
174
+ "grad_norm": 0.23158016835002,
175
+ "learning_rate": 0.0009935800013601416,
176
+ "loss": 1.3614,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.30612244897959184,
181
+ "grad_norm": 0.19086278999416814,
182
+ "learning_rate": 0.0009916781319242614,
183
+ "loss": 1.3609,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.31887755102040816,
188
+ "grad_norm": 0.2238120900111794,
189
+ "learning_rate": 0.0009895321868430113,
190
+ "loss": 1.3436,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.33163265306122447,
195
+ "grad_norm": 0.22212987754769067,
196
+ "learning_rate": 0.0009871432313924254,
197
+ "loss": 1.3356,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.34438775510204084,
202
+ "grad_norm": 0.18172190875737002,
203
+ "learning_rate": 0.000984512451482158,
204
+ "loss": 1.3428,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.35714285714285715,
209
+ "grad_norm": 0.1965103083782127,
210
+ "learning_rate": 0.0009816411530667814,
211
+ "loss": 1.3053,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 0.36989795918367346,
216
+ "grad_norm": 0.14562927658303307,
217
+ "learning_rate": 0.000978530761497492,
218
+ "loss": 1.3233,
219
+ "step": 145
220
+ },
221
+ {
222
+ "epoch": 0.3826530612244898,
223
+ "grad_norm": 0.19370329245107856,
224
+ "learning_rate": 0.0009751828208145482,
225
+ "loss": 1.3309,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 0.39540816326530615,
230
+ "grad_norm": 0.25874954807518885,
231
+ "learning_rate": 0.0009715989929807862,
232
+ "loss": 1.3386,
233
+ "step": 155
234
+ },
235
+ {
236
+ "epoch": 0.40816326530612246,
237
+ "grad_norm": 0.32074900532811484,
238
+ "learning_rate": 0.000967781057056601,
239
+ "loss": 1.3197,
240
+ "step": 160
241
+ },
242
+ {
243
+ "epoch": 0.42091836734693877,
244
+ "grad_norm": 0.2345834124220609,
245
+ "learning_rate": 0.0009637309083167956,
246
+ "loss": 1.3353,
247
+ "step": 165
248
+ },
249
+ {
250
+ "epoch": 0.4336734693877551,
251
+ "grad_norm": 0.19716142253846314,
252
+ "learning_rate": 0.0009594505573097414,
253
+ "loss": 1.3148,
254
+ "step": 170
255
+ },
256
+ {
257
+ "epoch": 0.44642857142857145,
258
+ "grad_norm": 1.6838448936670876,
259
+ "learning_rate": 0.0009549421288593157,
260
+ "loss": 1.4963,
261
+ "step": 175
262
+ },
263
+ {
264
+ "epoch": 0.45918367346938777,
265
+ "grad_norm": 0.5400080339132108,
266
+ "learning_rate": 0.0009502078610101092,
267
+ "loss": 1.3763,
268
+ "step": 180
269
+ },
270
+ {
271
+ "epoch": 0.4719387755102041,
272
+ "grad_norm": 0.5636053179876761,
273
+ "learning_rate": 0.0009452501039164315,
274
+ "loss": 1.3562,
275
+ "step": 185
276
+ },
277
+ {
278
+ "epoch": 0.4846938775510204,
279
+ "grad_norm": 0.26363289666703416,
280
+ "learning_rate": 0.0009400713186756625,
281
+ "loss": 1.3671,
282
+ "step": 190
283
+ },
284
+ {
285
+ "epoch": 0.49744897959183676,
286
+ "grad_norm": 0.2063220498775075,
287
+ "learning_rate": 0.0009346740761065305,
288
+ "loss": 1.337,
289
+ "step": 195
290
+ },
291
+ {
292
+ "epoch": 0.5102040816326531,
293
+ "grad_norm": 0.851877134215801,
294
+ "learning_rate": 0.0009290610554729234,
295
+ "loss": 1.3404,
296
+ "step": 200
297
+ },
298
+ {
299
+ "epoch": 0.5229591836734694,
300
+ "grad_norm": 0.17433687232665127,
301
+ "learning_rate": 0.0009232350431538657,
302
+ "loss": 1.3278,
303
+ "step": 205
304
+ },
305
+ {
306
+ "epoch": 0.5357142857142857,
307
+ "grad_norm": 0.2552513397179755,
308
+ "learning_rate": 0.0009171989312603226,
309
+ "loss": 1.3621,
310
+ "step": 210
311
+ },
312
+ {
313
+ "epoch": 0.548469387755102,
314
+ "grad_norm": 0.18498941711074082,
315
+ "learning_rate": 0.0009109557161995172,
316
+ "loss": 1.3365,
317
+ "step": 215
318
+ },
319
+ {
320
+ "epoch": 0.5612244897959183,
321
+ "grad_norm": 0.19451053194971357,
322
+ "learning_rate": 0.0009045084971874737,
323
+ "loss": 1.3329,
324
+ "step": 220
325
+ },
326
+ {
327
+ "epoch": 0.5739795918367347,
328
+ "grad_norm": 0.14628015588879814,
329
+ "learning_rate": 0.0008978604747105246,
330
+ "loss": 1.3133,
331
+ "step": 225
332
+ },
333
+ {
334
+ "epoch": 0.5867346938775511,
335
+ "grad_norm": 0.2028020969079187,
336
+ "learning_rate": 0.000891014948936546,
337
+ "loss": 1.3337,
338
+ "step": 230
339
+ },
340
+ {
341
+ "epoch": 0.5994897959183674,
342
+ "grad_norm": 0.18332572064094557,
343
+ "learning_rate": 0.0008839753180767108,
344
+ "loss": 1.3132,
345
+ "step": 235
346
+ },
347
+ {
348
+ "epoch": 0.6122448979591837,
349
+ "grad_norm": 0.14205229763128208,
350
+ "learning_rate": 0.0008767450766985694,
351
+ "loss": 1.29,
352
+ "step": 240
353
+ },
354
+ {
355
+ "epoch": 0.625,
356
+ "grad_norm": 0.30410202409557807,
357
+ "learning_rate": 0.000869327813991301,
358
+ "loss": 1.3057,
359
+ "step": 245
360
+ },
361
+ {
362
+ "epoch": 0.6377551020408163,
363
+ "grad_norm": 0.19577309506455626,
364
+ "learning_rate": 0.0008617272119839903,
365
+ "loss": 1.332,
366
+ "step": 250
367
+ },
368
+ {
369
+ "epoch": 0.6505102040816326,
370
+ "grad_norm": 0.1272875527576836,
371
+ "learning_rate": 0.0008539470437178196,
372
+ "loss": 1.3206,
373
+ "step": 255
374
+ },
375
+ {
376
+ "epoch": 0.6632653061224489,
377
+ "grad_norm": 0.15194088157912253,
378
+ "learning_rate": 0.0008459911713730799,
379
+ "loss": 1.2914,
380
+ "step": 260
381
+ },
382
+ {
383
+ "epoch": 0.6760204081632653,
384
+ "grad_norm": 0.12663044164256262,
385
+ "learning_rate": 0.0008378635443519327,
386
+ "loss": 1.2917,
387
+ "step": 265
388
+ },
389
+ {
390
+ "epoch": 0.6887755102040817,
391
+ "grad_norm": 0.1344562065693353,
392
+ "learning_rate": 0.0008295681973178737,
393
+ "loss": 1.2994,
394
+ "step": 270
395
+ },
396
+ {
397
+ "epoch": 0.701530612244898,
398
+ "grad_norm": 0.13392241997034995,
399
+ "learning_rate": 0.0008211092481928716,
400
+ "loss": 1.297,
401
+ "step": 275
402
+ },
403
+ {
404
+ "epoch": 0.7142857142857143,
405
+ "grad_norm": 0.14371429407675881,
406
+ "learning_rate": 0.0008124908961131759,
407
+ "loss": 1.3095,
408
+ "step": 280
409
+ },
410
+ {
411
+ "epoch": 0.7270408163265306,
412
+ "grad_norm": 0.15013152147820535,
413
+ "learning_rate": 0.0008037174193448089,
414
+ "loss": 1.2991,
415
+ "step": 285
416
+ },
417
+ {
418
+ "epoch": 0.7397959183673469,
419
+ "grad_norm": 0.13870289504861724,
420
+ "learning_rate": 0.000794793173159778,
421
+ "loss": 1.3066,
422
+ "step": 290
423
+ },
424
+ {
425
+ "epoch": 0.7525510204081632,
426
+ "grad_norm": 0.11463233811669157,
427
+ "learning_rate": 0.0007857225876740584,
428
+ "loss": 1.2914,
429
+ "step": 295
430
+ },
431
+ {
432
+ "epoch": 0.7653061224489796,
433
+ "grad_norm": 0.16118564397614718,
434
+ "learning_rate": 0.000776510165648425,
435
+ "loss": 1.286,
436
+ "step": 300
437
+ },
438
+ {
439
+ "epoch": 0.7780612244897959,
440
+ "grad_norm": 0.16328954399209802,
441
+ "learning_rate": 0.000767160480253221,
442
+ "loss": 1.3065,
443
+ "step": 305
444
+ },
445
+ {
446
+ "epoch": 0.7908163265306123,
447
+ "grad_norm": 0.3891017035371994,
448
+ "learning_rate": 0.000757678172798175,
449
+ "loss": 1.2978,
450
+ "step": 310
451
+ },
452
+ {
453
+ "epoch": 0.8035714285714286,
454
+ "grad_norm": 0.1374369729620582,
455
+ "learning_rate": 0.0007480679504283911,
456
+ "loss": 1.2737,
457
+ "step": 315
458
+ },
459
+ {
460
+ "epoch": 0.8163265306122449,
461
+ "grad_norm": 0.10483812728418156,
462
+ "learning_rate": 0.00073833458378766,
463
+ "loss": 1.2638,
464
+ "step": 320
465
+ },
466
+ {
467
+ "epoch": 0.8290816326530612,
468
+ "grad_norm": 0.20381163662422036,
469
+ "learning_rate": 0.0007284829046502467,
470
+ "loss": 1.2888,
471
+ "step": 325
472
+ },
473
+ {
474
+ "epoch": 0.8418367346938775,
475
+ "grad_norm": 0.11728927247718034,
476
+ "learning_rate": 0.0007185178035223327,
477
+ "loss": 1.2988,
478
+ "step": 330
479
+ },
480
+ {
481
+ "epoch": 0.8545918367346939,
482
+ "grad_norm": 0.10959677714791609,
483
+ "learning_rate": 0.0007084442272143026,
484
+ "loss": 1.2722,
485
+ "step": 335
486
+ },
487
+ {
488
+ "epoch": 0.8673469387755102,
489
+ "grad_norm": 0.1156080310279316,
490
+ "learning_rate": 0.0006982671763850814,
491
+ "loss": 1.2831,
492
+ "step": 340
493
+ },
494
+ {
495
+ "epoch": 0.8801020408163265,
496
+ "grad_norm": 0.1401568063137037,
497
+ "learning_rate": 0.0006879917030597397,
498
+ "loss": 1.2766,
499
+ "step": 345
500
+ },
501
+ {
502
+ "epoch": 0.8928571428571429,
503
+ "grad_norm": 0.1267536193584464,
504
+ "learning_rate": 0.0006776229081216001,
505
+ "loss": 1.2909,
506
+ "step": 350
507
+ },
508
+ {
509
+ "epoch": 0.9056122448979592,
510
+ "grad_norm": 0.1276445520009822,
511
+ "learning_rate": 0.0006671659387800909,
512
+ "loss": 1.271,
513
+ "step": 355
514
+ },
515
+ {
516
+ "epoch": 0.9183673469387755,
517
+ "grad_norm": 0.12028703336124712,
518
+ "learning_rate": 0.0006566259860156014,
519
+ "loss": 1.2681,
520
+ "step": 360
521
+ },
522
+ {
523
+ "epoch": 0.9311224489795918,
524
+ "grad_norm": 0.10748771516704643,
525
+ "learning_rate": 0.0006460082820026094,
526
+ "loss": 1.2634,
527
+ "step": 365
528
+ },
529
+ {
530
+ "epoch": 0.9438775510204082,
531
+ "grad_norm": 0.09058225138342332,
532
+ "learning_rate": 0.0006353180975123595,
533
+ "loss": 1.2785,
534
+ "step": 370
535
+ },
536
+ {
537
+ "epoch": 0.9566326530612245,
538
+ "grad_norm": 0.1471646323105399,
539
+ "learning_rate": 0.000624560739296381,
540
+ "loss": 1.2537,
541
+ "step": 375
542
+ },
543
+ {
544
+ "epoch": 0.9693877551020408,
545
+ "grad_norm": 0.12205622985562622,
546
+ "learning_rate": 0.0006137415474521454,
547
+ "loss": 1.2834,
548
+ "step": 380
549
+ },
550
+ {
551
+ "epoch": 0.9821428571428571,
552
+ "grad_norm": 0.1198227865731359,
553
+ "learning_rate": 0.0006028658927721697,
554
+ "loss": 1.268,
555
+ "step": 385
556
+ },
557
+ {
558
+ "epoch": 0.9948979591836735,
559
+ "grad_norm": 0.10689994479443513,
560
+ "learning_rate": 0.0005919391740778833,
561
+ "loss": 1.2705,
562
+ "step": 390
563
+ },
564
+ {
565
+ "epoch": 1.0,
566
+ "eval_loss": 1.864872694015503,
567
+ "eval_runtime": 85.1881,
568
+ "eval_samples_per_second": 152.568,
569
+ "eval_steps_per_second": 4.778,
570
+ "step": 392
571
+ },
572
+ {
573
+ "epoch": 1.0076530612244898,
574
+ "grad_norm": 0.11143268558344442,
575
+ "learning_rate": 0.0005809668155395793,
576
+ "loss": 1.2488,
577
+ "step": 395
578
+ },
579
+ {
580
+ "epoch": 1.0204081632653061,
581
+ "grad_norm": 0.10688715702524573,
582
+ "learning_rate": 0.0005699542639837844,
583
+ "loss": 1.2129,
584
+ "step": 400
585
+ },
586
+ {
587
+ "epoch": 1.0331632653061225,
588
+ "grad_norm": 0.09930208201709802,
589
+ "learning_rate": 0.0005589069861893798,
590
+ "loss": 1.2152,
591
+ "step": 405
592
+ },
593
+ {
594
+ "epoch": 1.0459183673469388,
595
+ "grad_norm": 0.14907362895999168,
596
+ "learning_rate": 0.0005478304661738199,
597
+ "loss": 1.2204,
598
+ "step": 410
599
+ },
600
+ {
601
+ "epoch": 1.058673469387755,
602
+ "grad_norm": 0.12329968075769418,
603
+ "learning_rate": 0.000536730202470791,
604
+ "loss": 1.248,
605
+ "step": 415
606
+ },
607
+ {
608
+ "epoch": 1.0714285714285714,
609
+ "grad_norm": 0.10929541376542931,
610
+ "learning_rate": 0.000525611705400666,
611
+ "loss": 1.2511,
612
+ "step": 420
613
+ },
614
+ {
615
+ "epoch": 1.0841836734693877,
616
+ "grad_norm": 0.12233961076169748,
617
+ "learning_rate": 0.000514480494335106,
618
+ "loss": 1.2165,
619
+ "step": 425
620
+ },
621
+ {
622
+ "epoch": 1.096938775510204,
623
+ "grad_norm": 0.11362833380867698,
624
+ "learning_rate": 0.0005033420949571712,
625
+ "loss": 1.2353,
626
+ "step": 430
627
+ },
628
+ {
629
+ "epoch": 1.1096938775510203,
630
+ "grad_norm": 0.09720826313265056,
631
+ "learning_rate": 0.0004922020365182968,
632
+ "loss": 1.209,
633
+ "step": 435
634
+ },
635
+ {
636
+ "epoch": 1.1224489795918366,
637
+ "grad_norm": 0.1184275991715705,
638
+ "learning_rate": 0.0004810658490934979,
639
+ "loss": 1.2218,
640
+ "step": 440
641
+ },
642
+ {
643
+ "epoch": 1.135204081632653,
644
+ "grad_norm": 0.13198322755570885,
645
+ "learning_rate": 0.0004699390608361665,
646
+ "loss": 1.2035,
647
+ "step": 445
648
+ },
649
+ {
650
+ "epoch": 1.1479591836734695,
651
+ "grad_norm": 0.10874477518300978,
652
+ "learning_rate": 0.0004588271952338212,
653
+ "loss": 1.2172,
654
+ "step": 450
655
+ },
656
+ {
657
+ "epoch": 1.1607142857142858,
658
+ "grad_norm": 0.08852135264878797,
659
+ "learning_rate": 0.00044773576836617336,
660
+ "loss": 1.221,
661
+ "step": 455
662
+ },
663
+ {
664
+ "epoch": 1.1734693877551021,
665
+ "grad_norm": 0.10770572502518004,
666
+ "learning_rate": 0.0004366702861668716,
667
+ "loss": 1.2192,
668
+ "step": 460
669
+ },
670
+ {
671
+ "epoch": 1.1862244897959184,
672
+ "grad_norm": 0.15699433228433193,
673
+ "learning_rate": 0.0004256362416902817,
674
+ "loss": 1.2204,
675
+ "step": 465
676
+ },
677
+ {
678
+ "epoch": 1.1989795918367347,
679
+ "grad_norm": 0.09204241599916607,
680
+ "learning_rate": 0.0004146391123846606,
681
+ "loss": 1.2338,
682
+ "step": 470
683
+ },
684
+ {
685
+ "epoch": 1.211734693877551,
686
+ "grad_norm": 0.08836564479064991,
687
+ "learning_rate": 0.00040368435737307733,
688
+ "loss": 1.2248,
689
+ "step": 475
690
+ },
691
+ {
692
+ "epoch": 1.2244897959183674,
693
+ "grad_norm": 0.09509702477272594,
694
+ "learning_rate": 0.00039277741474343054,
695
+ "loss": 1.2168,
696
+ "step": 480
697
+ },
698
+ {
699
+ "epoch": 1.2372448979591837,
700
+ "grad_norm": 0.0894046781603559,
701
+ "learning_rate": 0.00038192369884890886,
702
+ "loss": 1.2232,
703
+ "step": 485
704
+ },
705
+ {
706
+ "epoch": 1.25,
707
+ "grad_norm": 0.09858037446272845,
708
+ "learning_rate": 0.0003711285976202331,
709
+ "loss": 1.2159,
710
+ "step": 490
711
+ },
712
+ {
713
+ "epoch": 1.2627551020408163,
714
+ "grad_norm": 0.10699315355393832,
715
+ "learning_rate": 0.0003603974698910139,
716
+ "loss": 1.2324,
717
+ "step": 495
718
+ },
719
+ {
720
+ "epoch": 1.2755102040816326,
721
+ "grad_norm": 0.09869982370664032,
722
+ "learning_rate": 0.0003497356427375562,
723
+ "loss": 1.2252,
724
+ "step": 500
725
+ },
726
+ {
727
+ "epoch": 1.288265306122449,
728
+ "grad_norm": 0.0845449914129218,
729
+ "learning_rate": 0.0003391484088344257,
730
+ "loss": 1.231,
731
+ "step": 505
732
+ },
733
+ {
734
+ "epoch": 1.3010204081632653,
735
+ "grad_norm": 0.09770167031364653,
736
+ "learning_rate": 0.00032864102382709374,
737
+ "loss": 1.2108,
738
+ "step": 510
739
+ },
740
+ {
741
+ "epoch": 1.3137755102040816,
742
+ "grad_norm": 0.08930615418315077,
743
+ "learning_rate": 0.0003182187037229653,
744
+ "loss": 1.2234,
745
+ "step": 515
746
+ },
747
+ {
748
+ "epoch": 1.3265306122448979,
749
+ "grad_norm": 0.11628427629387307,
750
+ "learning_rate": 0.0003078866223020815,
751
+ "loss": 1.2504,
752
+ "step": 520
753
+ },
754
+ {
755
+ "epoch": 1.3392857142857144,
756
+ "grad_norm": 0.08943286693904606,
757
+ "learning_rate": 0.0002976499085487862,
758
+ "loss": 1.2265,
759
+ "step": 525
760
+ },
761
+ {
762
+ "epoch": 1.3520408163265305,
763
+ "grad_norm": 0.10296423428005826,
764
+ "learning_rate": 0.0002875136441056286,
765
+ "loss": 1.2096,
766
+ "step": 530
767
+ },
768
+ {
769
+ "epoch": 1.364795918367347,
770
+ "grad_norm": 0.0881663874621136,
771
+ "learning_rate": 0.00027748286075076836,
772
+ "loss": 1.2155,
773
+ "step": 535
774
+ },
775
+ {
776
+ "epoch": 1.3775510204081631,
777
+ "grad_norm": 0.08112211915193616,
778
+ "learning_rate": 0.00026756253790013193,
779
+ "loss": 1.2279,
780
+ "step": 540
781
+ },
782
+ {
783
+ "epoch": 1.3903061224489797,
784
+ "grad_norm": 0.090452876583666,
785
+ "learning_rate": 0.00025775760013556424,
786
+ "loss": 1.2176,
787
+ "step": 545
788
+ },
789
+ {
790
+ "epoch": 1.403061224489796,
791
+ "grad_norm": 0.08241049413771702,
792
+ "learning_rate": 0.00024807291476019994,
793
+ "loss": 1.2235,
794
+ "step": 550
795
+ },
796
+ {
797
+ "epoch": 1.4158163265306123,
798
+ "grad_norm": 0.0930330073695628,
799
+ "learning_rate": 0.00023851328938226808,
800
+ "loss": 1.2039,
801
+ "step": 555
802
+ },
803
+ {
804
+ "epoch": 1.4285714285714286,
805
+ "grad_norm": 0.07990040558164058,
806
+ "learning_rate": 0.0002290834695285316,
807
+ "loss": 1.2134,
808
+ "step": 560
809
+ },
810
+ {
811
+ "epoch": 1.441326530612245,
812
+ "grad_norm": 0.08688446519906046,
813
+ "learning_rate": 0.0002197881362885426,
814
+ "loss": 1.1858,
815
+ "step": 565
816
+ },
817
+ {
818
+ "epoch": 1.4540816326530612,
819
+ "grad_norm": 0.08906821862405218,
820
+ "learning_rate": 0.0002106319039908879,
821
+ "loss": 1.2142,
822
+ "step": 570
823
+ },
824
+ {
825
+ "epoch": 1.4668367346938775,
826
+ "grad_norm": 0.09353827272017026,
827
+ "learning_rate": 0.000201619317912573,
828
+ "loss": 1.2176,
829
+ "step": 575
830
+ },
831
+ {
832
+ "epoch": 1.4795918367346939,
833
+ "grad_norm": 0.09869151116487236,
834
+ "learning_rate": 0.00019275485202268573,
835
+ "loss": 1.2111,
836
+ "step": 580
837
+ },
838
+ {
839
+ "epoch": 1.4923469387755102,
840
+ "grad_norm": 0.07871336680332862,
841
+ "learning_rate": 0.00018404290676145857,
842
+ "loss": 1.2009,
843
+ "step": 585
844
+ },
845
+ {
846
+ "epoch": 1.5051020408163265,
847
+ "grad_norm": 0.08210906329432778,
848
+ "learning_rate": 0.00017548780685582949,
849
+ "loss": 1.1945,
850
+ "step": 590
851
+ },
852
+ {
853
+ "epoch": 1.5178571428571428,
854
+ "grad_norm": 0.07607843649816663,
855
+ "learning_rate": 0.00016709379917259027,
856
+ "loss": 1.2013,
857
+ "step": 595
858
+ },
859
+ {
860
+ "epoch": 1.5306122448979593,
861
+ "grad_norm": 0.07966793194282133,
862
+ "learning_rate": 0.00015886505061018413,
863
+ "loss": 1.2372,
864
+ "step": 600
865
+ },
866
+ {
867
+ "epoch": 1.5433673469387754,
868
+ "grad_norm": 0.07879844228820579,
869
+ "learning_rate": 0.00015080564603020142,
870
+ "loss": 1.2051,
871
+ "step": 605
872
+ },
873
+ {
874
+ "epoch": 1.556122448979592,
875
+ "grad_norm": 0.07337572605792314,
876
+ "learning_rate": 0.00014291958622959973,
877
+ "loss": 1.2224,
878
+ "step": 610
879
+ },
880
+ {
881
+ "epoch": 1.568877551020408,
882
+ "grad_norm": 0.08717534192433574,
883
+ "learning_rate": 0.0001352107859546533,
884
+ "loss": 1.1978,
885
+ "step": 615
886
+ },
887
+ {
888
+ "epoch": 1.5816326530612246,
889
+ "grad_norm": 0.08338962851545824,
890
+ "learning_rate": 0.00012768307195762168,
891
+ "loss": 1.1933,
892
+ "step": 620
893
+ },
894
+ {
895
+ "epoch": 1.5943877551020407,
896
+ "grad_norm": 0.07500681809171242,
897
+ "learning_rate": 0.00012034018109709716,
898
+ "loss": 1.1924,
899
+ "step": 625
900
+ },
901
+ {
902
+ "epoch": 1.6071428571428572,
903
+ "grad_norm": 0.07959205675721448,
904
+ "learning_rate": 0.0001131857584829783,
905
+ "loss": 1.2101,
906
+ "step": 630
907
+ },
908
+ {
909
+ "epoch": 1.6198979591836735,
910
+ "grad_norm": 0.06971085806957752,
911
+ "learning_rate": 0.00010622335566698877,
912
+ "loss": 1.2142,
913
+ "step": 635
914
+ },
915
+ {
916
+ "epoch": 1.6326530612244898,
917
+ "grad_norm": 0.06828459321037969,
918
+ "learning_rate": 9.94564288796384e-05,
919
+ "loss": 1.2119,
920
+ "step": 640
921
+ },
922
+ {
923
+ "epoch": 1.6454081632653061,
924
+ "grad_norm": 0.07198545650406166,
925
+ "learning_rate": 9.288833731450419e-05,
926
+ "loss": 1.203,
927
+ "step": 645
928
+ },
929
+ {
930
+ "epoch": 1.6581632653061225,
931
+ "grad_norm": 0.0765334557855611,
932
+ "learning_rate": 8.652234146068206e-05,
933
+ "loss": 1.1987,
934
+ "step": 650
935
+ },
936
+ {
937
+ "epoch": 1.6709183673469388,
938
+ "grad_norm": 0.06996249721515567,
939
+ "learning_rate": 8.036160148423449e-05,
940
+ "loss": 1.205,
941
+ "step": 655
942
+ },
943
+ {
944
+ "epoch": 1.683673469387755,
945
+ "grad_norm": 0.07068726506681239,
946
+ "learning_rate": 7.440917565944349e-05,
947
+ "loss": 1.2168,
948
+ "step": 660
949
+ },
950
+ {
951
+ "epoch": 1.6964285714285714,
952
+ "grad_norm": 0.06352753939218142,
953
+ "learning_rate": 6.866801885064056e-05,
954
+ "loss": 1.1967,
955
+ "step": 665
956
+ },
957
+ {
958
+ "epoch": 1.7091836734693877,
959
+ "grad_norm": 0.06967499508626017,
960
+ "learning_rate": 6.314098104537324e-05,
961
+ "loss": 1.1747,
962
+ "step": 670
963
+ },
964
+ {
965
+ "epoch": 1.7219387755102042,
966
+ "grad_norm": 0.06527614945695559,
967
+ "learning_rate": 5.783080593963219e-05,
968
+ "loss": 1.1991,
969
+ "step": 675
970
+ },
971
+ {
972
+ "epoch": 1.7346938775510203,
973
+ "grad_norm": 0.06301111901417586,
974
+ "learning_rate": 5.27401295758439e-05,
975
+ "loss": 1.1996,
976
+ "step": 680
977
+ },
978
+ {
979
+ "epoch": 1.7474489795918369,
980
+ "grad_norm": 0.06944052501466691,
981
+ "learning_rate": 4.787147903430383e-05,
982
+ "loss": 1.1968,
983
+ "step": 685
984
+ },
985
+ {
986
+ "epoch": 1.760204081632653,
987
+ "grad_norm": 0.07413044994704315,
988
+ "learning_rate": 4.322727117869951e-05,
989
+ "loss": 1.1943,
990
+ "step": 690
991
+ },
992
+ {
993
+ "epoch": 1.7729591836734695,
994
+ "grad_norm": 0.06365480540319339,
995
+ "learning_rate": 3.880981145634704e-05,
996
+ "loss": 1.2191,
997
+ "step": 695
998
+ },
999
+ {
1000
+ "epoch": 1.7857142857142856,
1001
+ "grad_norm": 0.0713239761471288,
1002
+ "learning_rate": 3.462129275373577e-05,
1003
+ "loss": 1.2286,
1004
+ "step": 700
1005
+ },
1006
+ {
1007
+ "epoch": 1.7984693877551021,
1008
+ "grad_norm": 0.08049708826438332,
1009
+ "learning_rate": 3.066379430795002e-05,
1010
+ "loss": 1.2161,
1011
+ "step": 705
1012
+ },
1013
+ {
1014
+ "epoch": 1.8112244897959182,
1015
+ "grad_norm": 0.061716360746827195,
1016
+ "learning_rate": 2.6939280674508016e-05,
1017
+ "loss": 1.1999,
1018
+ "step": 710
1019
+ },
1020
+ {
1021
+ "epoch": 1.8239795918367347,
1022
+ "grad_norm": 0.0631321070521143,
1023
+ "learning_rate": 2.3449600752129597e-05,
1024
+ "loss": 1.2079,
1025
+ "step": 715
1026
+ },
1027
+ {
1028
+ "epoch": 1.836734693877551,
1029
+ "grad_norm": 0.0635532701163778,
1030
+ "learning_rate": 2.019648686491865e-05,
1031
+ "loss": 1.191,
1032
+ "step": 720
1033
+ },
1034
+ {
1035
+ "epoch": 1.8494897959183674,
1036
+ "grad_norm": 0.06640321579684394,
1037
+ "learning_rate": 1.7181553902413438e-05,
1038
+ "loss": 1.2255,
1039
+ "step": 725
1040
+ },
1041
+ {
1042
+ "epoch": 1.8622448979591837,
1043
+ "grad_norm": 0.06524200918052049,
1044
+ "learning_rate": 1.4406298517934068e-05,
1045
+ "loss": 1.2206,
1046
+ "step": 730
1047
+ },
1048
+ {
1049
+ "epoch": 1.875,
1050
+ "grad_norm": 0.06477805270118185,
1051
+ "learning_rate": 1.1872098385623586e-05,
1052
+ "loss": 1.1887,
1053
+ "step": 735
1054
+ },
1055
+ {
1056
+ "epoch": 1.8877551020408163,
1057
+ "grad_norm": 0.06497765016187285,
1058
+ "learning_rate": 9.580211516551862e-06,
1059
+ "loss": 1.2036,
1060
+ "step": 740
1061
+ },
1062
+ {
1063
+ "epoch": 1.9005102040816326,
1064
+ "grad_norm": 0.06544507847669666,
1065
+ "learning_rate": 7.531775634222138e-06,
1066
+ "loss": 1.2224,
1067
+ "step": 745
1068
+ },
1069
+ {
1070
+ "epoch": 1.913265306122449,
1071
+ "grad_norm": 0.06400465123455064,
1072
+ "learning_rate": 5.727807609789471e-06,
1073
+ "loss": 1.2099,
1074
+ "step": 750
1075
+ },
1076
+ {
1077
+ "epoch": 1.9260204081632653,
1078
+ "grad_norm": 0.06135587896382674,
1079
+ "learning_rate": 4.169202957272522e-06,
1080
+ "loss": 1.1876,
1081
+ "step": 755
1082
+ },
1083
+ {
1084
+ "epoch": 1.9387755102040818,
1085
+ "grad_norm": 0.06395336761154996,
1086
+ "learning_rate": 2.856735389008269e-06,
1087
+ "loss": 1.2315,
1088
+ "step": 760
1089
+ },
1090
+ {
1091
+ "epoch": 1.9515306122448979,
1092
+ "grad_norm": 0.06406624485413773,
1093
+ "learning_rate": 1.7910564315704035e-06,
1094
+ "loss": 1.1941,
1095
+ "step": 765
1096
+ },
1097
+ {
1098
+ "epoch": 1.9642857142857144,
1099
+ "grad_norm": 0.06823262080551822,
1100
+ "learning_rate": 9.726951023434348e-07,
1101
+ "loss": 1.2067,
1102
+ "step": 770
1103
+ },
1104
+ {
1105
+ "epoch": 1.9770408163265305,
1106
+ "grad_norm": 0.06268227825632704,
1107
+ "learning_rate": 4.020576469108139e-07,
1108
+ "loss": 1.2145,
1109
+ "step": 775
1110
+ },
1111
+ {
1112
+ "epoch": 1.989795918367347,
1113
+ "grad_norm": 0.06573014422017058,
1114
+ "learning_rate": 7.942733738924845e-08,
1115
+ "loss": 1.1867,
1116
+ "step": 780
1117
+ },
1118
+ {
1119
+ "epoch": 2.0,
1120
+ "eval_loss": 1.8389793634414673,
1121
+ "eval_runtime": 82.5875,
1122
+ "eval_samples_per_second": 157.373,
1123
+ "eval_steps_per_second": 4.928,
1124
+ "step": 784
1125
+ },
1126
+ {
1127
+ "epoch": 2.0,
1128
+ "step": 784,
1129
+ "total_flos": 173253108695040.0,
1130
+ "train_loss": 1.2838877389321521,
1131
+ "train_runtime": 2377.8575,
1132
+ "train_samples_per_second": 42.195,
1133
+ "train_steps_per_second": 0.33
1134
+ }
1135
+ ],
1136
+ "logging_steps": 5,
1137
+ "max_steps": 784,
1138
+ "num_input_tokens_seen": 0,
1139
+ "num_train_epochs": 2,
1140
+ "save_steps": 500,
1141
+ "stateful_callbacks": {
1142
+ "TrainerControl": {
1143
+ "args": {
1144
+ "should_epoch_stop": false,
1145
+ "should_evaluate": false,
1146
+ "should_log": false,
1147
+ "should_save": false,
1148
+ "should_training_stop": false
1149
+ },
1150
+ "attributes": {}
1151
+ }
1152
+ },
1153
+ "total_flos": 173253108695040.0,
1154
+ "train_batch_size": 4,
1155
+ "trial_name": null,
1156
+ "trial_params": null
1157
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9272d31cbb7e50ff8390c0c8f09fa8653fc9540630831a7cbda918e9decb98
3
+ size 6456
vocab.json ADDED
The diff for this file is too large to render. See raw diff