ahmedheakl commited on
Commit
f21f539
·
verified ·
1 Parent(s): 70ec237

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: Qwen/Qwen2.5-Coder-1.5B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: ex45_qwen2.5_1.5b_61k_source_16kcw_2ep_cuda_amd_os
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # ex45_qwen2.5_1.5b_61k_source_16kcw_2ep_cuda_amd_os
17
+
18
+ This model is a fine-tuned version of [Qwen/Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct) on an unknown dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 2e-05
38
+ - train_batch_size: 8
39
+ - eval_batch_size: 8
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 8
44
+ - total_train_batch_size: 256
45
+ - total_eval_batch_size: 32
46
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 2.0
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - Transformers 4.51.3
58
+ - Pytorch 2.6.0+cu124
59
+ - Datasets 3.2.0
60
+ - Tokenizers 0.21.0
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 8960,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 28,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 28,
17
+ "num_key_value_heads": 2,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.51.3",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.51.3"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59b21f25c33e9dd00bb9c1a4be9e0683c1a5d86bac206135f46e179221a857d4
3
+ size 3087467144
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 32768,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 478, "loss": 0.0243, "lr": 3.7500000000000005e-06, "epoch": 0.04184100418410042, "percentage": 2.09, "elapsed_time": "0:07:21", "remaining_time": "5:44:15"}
2
+ {"current_steps": 20, "total_steps": 478, "loss": 0.0066, "lr": 7.916666666666667e-06, "epoch": 0.08368200836820083, "percentage": 4.18, "elapsed_time": "0:14:37", "remaining_time": "5:34:55"}
3
+ {"current_steps": 30, "total_steps": 478, "loss": 0.0044, "lr": 1.2083333333333333e-05, "epoch": 0.12552301255230125, "percentage": 6.28, "elapsed_time": "0:21:43", "remaining_time": "5:24:28"}
4
+ {"current_steps": 40, "total_steps": 478, "loss": 0.0028, "lr": 1.6250000000000002e-05, "epoch": 0.16736401673640167, "percentage": 8.37, "elapsed_time": "0:28:35", "remaining_time": "5:13:08"}
5
+ {"current_steps": 50, "total_steps": 478, "loss": 0.0025, "lr": 1.9999733110857237e-05, "epoch": 0.20920502092050208, "percentage": 10.46, "elapsed_time": "0:34:55", "remaining_time": "4:59:01"}
6
+ {"current_steps": 60, "total_steps": 478, "loss": 0.0018, "lr": 1.9967723647752463e-05, "epoch": 0.2510460251046025, "percentage": 12.55, "elapsed_time": "0:41:26", "remaining_time": "4:48:39"}
7
+ {"current_steps": 70, "total_steps": 478, "loss": 0.0019, "lr": 1.988253206622306e-05, "epoch": 0.2928870292887029, "percentage": 14.64, "elapsed_time": "0:47:43", "remaining_time": "4:38:09"}
8
+ {"current_steps": 80, "total_steps": 478, "loss": 0.0026, "lr": 1.9744612900216588e-05, "epoch": 0.33472803347280333, "percentage": 16.74, "elapsed_time": "0:54:25", "remaining_time": "4:30:47"}
9
+ {"current_steps": 90, "total_steps": 478, "loss": 0.0017, "lr": 1.9554702008157567e-05, "epoch": 0.37656903765690375, "percentage": 18.83, "elapsed_time": "1:00:54", "remaining_time": "4:22:32"}
10
+ {"current_steps": 100, "total_steps": 478, "loss": 0.0013, "lr": 1.9313812646824432e-05, "epoch": 0.41841004184100417, "percentage": 20.92, "elapsed_time": "1:07:51", "remaining_time": "4:16:30"}
11
+ {"current_steps": 110, "total_steps": 478, "loss": 0.0031, "lr": 1.9023230065186192e-05, "epoch": 0.4602510460251046, "percentage": 23.01, "elapsed_time": "1:14:52", "remaining_time": "4:10:28"}
12
+ {"current_steps": 120, "total_steps": 478, "loss": 0.0022, "lr": 1.8684504647043093e-05, "epoch": 0.502092050209205, "percentage": 25.1, "elapsed_time": "1:21:09", "remaining_time": "4:02:06"}
13
+ {"current_steps": 130, "total_steps": 478, "loss": 0.0024, "lr": 1.8299443639058238e-05, "epoch": 0.5439330543933054, "percentage": 27.2, "elapsed_time": "1:27:42", "remaining_time": "3:54:47"}
14
+ {"current_steps": 140, "total_steps": 478, "loss": 0.0008, "lr": 1.7870101508314686e-05, "epoch": 0.5857740585774058, "percentage": 29.29, "elapsed_time": "1:33:57", "remaining_time": "3:46:50"}
15
+ {"current_steps": 150, "total_steps": 478, "loss": 0.002, "lr": 1.7398768980844664e-05, "epoch": 0.6276150627615062, "percentage": 31.38, "elapsed_time": "1:40:34", "remaining_time": "3:39:55"}
16
+ {"current_steps": 160, "total_steps": 478, "loss": 0.0011, "lr": 1.6887960819615025e-05, "epoch": 0.6694560669456067, "percentage": 33.47, "elapsed_time": "1:46:20", "remaining_time": "3:31:21"}
17
+ {"current_steps": 170, "total_steps": 478, "loss": 0.0008, "lr": 1.634040240717878e-05, "epoch": 0.7112970711297071, "percentage": 35.56, "elapsed_time": "1:53:05", "remaining_time": "3:24:53"}
18
+ {"current_steps": 180, "total_steps": 478, "loss": 0.0012, "lr": 1.5759015204579958e-05, "epoch": 0.7531380753138075, "percentage": 37.66, "elapsed_time": "1:59:19", "remaining_time": "3:17:32"}
19
+ {"current_steps": 190, "total_steps": 478, "loss": 0.0013, "lr": 1.5146901164094914e-05, "epoch": 0.7949790794979079, "percentage": 39.75, "elapsed_time": "2:06:06", "remaining_time": "3:11:09"}
20
+ {"current_steps": 200, "total_steps": 478, "loss": 0.0021, "lr": 1.4507326178974789e-05, "epoch": 0.8368200836820083, "percentage": 41.84, "elapsed_time": "2:13:11", "remaining_time": "3:05:08"}
21
+ {"current_steps": 210, "total_steps": 478, "loss": 0.0014, "lr": 1.3843702658491961e-05, "epoch": 0.8786610878661087, "percentage": 43.93, "elapsed_time": "2:20:09", "remaining_time": "2:58:51"}
22
+ {"current_steps": 220, "total_steps": 478, "loss": 0.0007, "lr": 1.3159571321260114e-05, "epoch": 0.9205020920502092, "percentage": 46.03, "elapsed_time": "2:27:05", "remaining_time": "2:52:30"}
23
+ {"current_steps": 230, "total_steps": 478, "loss": 0.0011, "lr": 1.2458582303968466e-05, "epoch": 0.9623430962343096, "percentage": 48.12, "elapsed_time": "2:33:44", "remaining_time": "2:45:46"}
24
+ {"current_steps": 240, "total_steps": 478, "loss": 0.0018, "lr": 1.1744475686323225e-05, "epoch": 1.00418410041841, "percentage": 50.21, "elapsed_time": "2:40:34", "remaining_time": "2:39:14"}
25
+ {"current_steps": 250, "total_steps": 478, "loss": 0.0007, "lr": 1.1021061536104093e-05, "epoch": 1.0460251046025104, "percentage": 52.3, "elapsed_time": "2:46:55", "remaining_time": "2:32:13"}
26
+ {"current_steps": 260, "total_steps": 478, "loss": 0.0008, "lr": 1.02921995808042e-05, "epoch": 1.0878661087866108, "percentage": 54.39, "elapsed_time": "2:53:40", "remaining_time": "2:25:36"}
27
+ {"current_steps": 270, "total_steps": 478, "loss": 0.0005, "lr": 9.561778614313876e-06, "epoch": 1.1297071129707112, "percentage": 56.49, "elapsed_time": "3:00:05", "remaining_time": "2:18:44"}
28
+ {"current_steps": 280, "total_steps": 478, "loss": 0.0011, "lr": 8.833695748522702e-06, "epoch": 1.1715481171548117, "percentage": 58.58, "elapsed_time": "3:06:54", "remaining_time": "2:12:09"}
29
+ {"current_steps": 290, "total_steps": 478, "loss": 0.0008, "lr": 8.111835620541397e-06, "epoch": 1.213389121338912, "percentage": 60.67, "elapsed_time": "3:14:02", "remaining_time": "2:05:47"}
30
+ {"current_steps": 300, "total_steps": 478, "loss": 0.0005, "lr": 7.400049666482061e-06, "epoch": 1.2552301255230125, "percentage": 62.76, "elapsed_time": "3:20:28", "remaining_time": "1:58:56"}
31
+ {"current_steps": 310, "total_steps": 478, "loss": 0.001, "lr": 6.702135572380078e-06, "epoch": 1.297071129707113, "percentage": 64.85, "elapsed_time": "3:27:18", "remaining_time": "1:52:20"}
32
+ {"current_steps": 320, "total_steps": 478, "loss": 0.0014, "lr": 6.021817011896004e-06, "epoch": 1.3389121338912133, "percentage": 66.95, "elapsed_time": "3:34:03", "remaining_time": "1:45:41"}
33
+ {"current_steps": 330, "total_steps": 478, "loss": 0.001, "lr": 5.362723778905427e-06, "epoch": 1.3807531380753137, "percentage": 69.04, "elapsed_time": "3:40:34", "remaining_time": "1:38:55"}
34
+ {"current_steps": 340, "total_steps": 478, "loss": 0.0012, "lr": 4.728372420978119e-06, "epoch": 1.4225941422594142, "percentage": 71.13, "elapsed_time": "3:46:53", "remaining_time": "1:32:05"}
35
+ {"current_steps": 350, "total_steps": 478, "loss": 0.0014, "lr": 4.12214747707527e-06, "epoch": 1.4644351464435146, "percentage": 73.22, "elapsed_time": "3:53:12", "remaining_time": "1:25:17"}
36
+ {"current_steps": 360, "total_steps": 478, "loss": 0.0005, "lr": 3.5472834195697017e-06, "epoch": 1.506276150627615, "percentage": 75.31, "elapsed_time": "3:59:48", "remaining_time": "1:18:36"}
37
+ {"current_steps": 370, "total_steps": 478, "loss": 0.0016, "lr": 3.0068473969362998e-06, "epoch": 1.5481171548117154, "percentage": 77.41, "elapsed_time": "4:06:13", "remaining_time": "1:11:52"}
38
+ {"current_steps": 380, "total_steps": 478, "loss": 0.0011, "lr": 2.5037228691878424e-06, "epoch": 1.5899581589958158, "percentage": 79.5, "elapsed_time": "4:13:06", "remaining_time": "1:05:16"}
39
+ {"current_steps": 390, "total_steps": 478, "loss": 0.0011, "lr": 2.0405942233682017e-06, "epoch": 1.6317991631799162, "percentage": 81.59, "elapsed_time": "4:20:00", "remaining_time": "0:58:40"}
40
+ {"current_steps": 400, "total_steps": 478, "loss": 0.0019, "lr": 1.619932451186048e-06, "epoch": 1.6736401673640167, "percentage": 83.68, "elapsed_time": "4:27:18", "remaining_time": "0:52:07"}
41
+ {"current_steps": 410, "total_steps": 478, "loss": 0.0008, "lr": 1.2439819652049178e-06, "epoch": 1.715481171548117, "percentage": 85.77, "elapsed_time": "4:33:52", "remaining_time": "0:45:25"}
42
+ {"current_steps": 420, "total_steps": 478, "loss": 0.001, "lr": 9.147486239311032e-07, "epoch": 1.7573221757322175, "percentage": 87.87, "elapsed_time": "4:40:21", "remaining_time": "0:38:43"}
43
+ {"current_steps": 430, "total_steps": 478, "loss": 0.0013, "lr": 6.339890296906493e-07, "epoch": 1.799163179916318, "percentage": 89.96, "elapsed_time": "4:46:28", "remaining_time": "0:31:58"}
44
+ {"current_steps": 440, "total_steps": 478, "loss": 0.0014, "lr": 4.032011563958893e-07, "epoch": 1.8410041841004183, "percentage": 92.05, "elapsed_time": "4:52:51", "remaining_time": "0:25:17"}
45
+ {"current_steps": 450, "total_steps": 478, "loss": 0.0012, "lr": 2.2361635720651199e-07, "epoch": 1.8828451882845187, "percentage": 94.14, "elapsed_time": "4:59:55", "remaining_time": "0:18:39"}
46
+ {"current_steps": 460, "total_steps": 478, "loss": 0.0008, "lr": 9.619279472766863e-08, "epoch": 1.9246861924686192, "percentage": 96.23, "elapsed_time": "5:06:12", "remaining_time": "0:11:58"}
47
+ {"current_steps": 470, "total_steps": 478, "loss": 0.002, "lr": 2.1610328797904145e-08, "epoch": 1.9665271966527196, "percentage": 98.33, "elapsed_time": "5:12:31", "remaining_time": "0:05:19"}
48
+ {"current_steps": 478, "total_steps": 478, "epoch": 2.0, "percentage": 100.0, "elapsed_time": "5:17:51", "remaining_time": "0:00:00"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbffd93a57199e48d8d1ed0bfaae02b24812e001033d4640b6d8a1856b735c93
3
+ size 7864
vocab.json ADDED
The diff for this file is too large to render. See raw diff