fpadovani commited on
Commit
17eb9e9
·
verified ·
1 Parent(s): 86649f2

Training in progress, step 36000, checkpoint

Browse files
checkpoint-36000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f86283dae9ba101b51bf4687a90467c86c992f386fe1ea7dcf75b3e7f4cd28db
3
  size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f8b1563b095c6564a53ebf57a86c5fa081e4578472b661ade29b2847c968ea5
3
  size 51007160
checkpoint-36000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ebecee161128f28ab16045ef2ea95aca273dd82460d4d02523a6f6203c78573
3
  size 102078202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56fed14a7fea8f37166069dc5205f9417f25b2b8b2e453e7bf9725a66225e4fa
3
  size 102078202
checkpoint-36000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fe2b99ee3a067945f1c5e5d5628eaca48afa133232a2b048af5a9e763657905
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7d8c917b9ad51a785fac0b31fb7212397baa76f85c31b90a359fd1d5b0df059
3
  size 14308
checkpoint-36000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7a94742019e388a00e70658ffa5860ecfa004552c4aa7680c6333c5e602c1f2
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b94c01e7189f8b6a312a4d5000bca76bf860ddf8b29514c3b3be65a1d7ea2d2
3
  size 1000
checkpoint-36000/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-36000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 4.3944478034973145,
3
- "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/en_clm/wikipedia_30/checkpoint-36000",
4
- "epoch": 18.93242177228504,
5
  "eval_steps": 2000,
6
  "global_step": 36000,
7
  "is_hyper_param_search": false,
@@ -9,217 +9,217 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 1.0518012095713911,
13
- "eval_loss": 7.518920421600342,
14
- "eval_runtime": 2.2399,
15
- "eval_samples_per_second": 1422.808,
16
- "eval_steps_per_second": 89.288,
17
  "step": 2000
18
  },
19
  {
20
- "epoch": 2.1036024191427822,
21
- "grad_norm": 1.2947496175765991,
22
  "learning_rate": 1e-05,
23
- "loss": 7.6141,
24
  "step": 4000
25
  },
26
  {
27
- "epoch": 2.1036024191427822,
28
- "eval_loss": 6.568221092224121,
29
- "eval_runtime": 2.1236,
30
- "eval_samples_per_second": 1500.783,
31
- "eval_steps_per_second": 94.182,
32
  "step": 4000
33
  },
34
  {
35
- "epoch": 3.155403628714173,
36
- "eval_loss": 6.16925573348999,
37
- "eval_runtime": 2.1624,
38
- "eval_samples_per_second": 1473.825,
39
- "eval_steps_per_second": 92.49,
40
  "step": 6000
41
  },
42
  {
43
- "epoch": 4.2072048382855645,
44
- "grad_norm": 2.330080270767212,
45
- "learning_rate": 2e-05,
46
- "loss": 6.2396,
47
  "step": 8000
48
  },
49
  {
50
- "epoch": 4.2072048382855645,
51
- "eval_loss": 5.901575088500977,
52
- "eval_runtime": 2.1326,
53
- "eval_samples_per_second": 1494.388,
54
- "eval_steps_per_second": 93.78,
55
  "step": 8000
56
  },
57
  {
58
- "epoch": 5.259006047856955,
59
- "eval_loss": 5.67369270324707,
60
- "eval_runtime": 2.1364,
61
- "eval_samples_per_second": 1491.788,
62
- "eval_steps_per_second": 93.617,
63
  "step": 10000
64
  },
65
  {
66
- "epoch": 6.310807257428346,
67
- "grad_norm": 2.565220594406128,
68
- "learning_rate": 2.99925e-05,
69
- "loss": 5.7217,
70
  "step": 12000
71
  },
72
  {
73
- "epoch": 6.310807257428346,
74
- "eval_loss": 5.460005760192871,
75
- "eval_runtime": 2.1835,
76
- "eval_samples_per_second": 1459.563,
77
- "eval_steps_per_second": 91.595,
78
  "step": 12000
79
  },
80
  {
81
- "epoch": 7.362608466999737,
82
- "eval_loss": 5.272489547729492,
83
- "eval_runtime": 2.1833,
84
- "eval_samples_per_second": 1459.691,
85
- "eval_steps_per_second": 91.603,
86
  "step": 14000
87
  },
88
  {
89
- "epoch": 8.414409676571129,
90
- "grad_norm": 2.7334115505218506,
91
- "learning_rate": 3.99875e-05,
92
- "loss": 5.3064,
93
  "step": 16000
94
  },
95
  {
96
- "epoch": 8.414409676571129,
97
- "eval_loss": 5.1074652671813965,
98
- "eval_runtime": 2.2402,
99
- "eval_samples_per_second": 1422.652,
100
- "eval_steps_per_second": 89.278,
101
  "step": 16000
102
  },
103
  {
104
- "epoch": 9.46621088614252,
105
- "eval_loss": 4.9716596603393555,
106
- "eval_runtime": 2.1736,
107
- "eval_samples_per_second": 1466.258,
108
- "eval_steps_per_second": 92.015,
109
  "step": 18000
110
  },
111
  {
112
- "epoch": 10.51801209571391,
113
- "grad_norm": 2.6722376346588135,
114
- "learning_rate": 4.9985e-05,
115
- "loss": 4.9744,
116
  "step": 20000
117
  },
118
  {
119
- "epoch": 10.51801209571391,
120
- "eval_loss": 4.864916801452637,
121
- "eval_runtime": 2.2031,
122
- "eval_samples_per_second": 1446.585,
123
- "eval_steps_per_second": 90.78,
124
  "step": 20000
125
  },
126
  {
127
- "epoch": 11.569813305285301,
128
- "eval_loss": 4.763542652130127,
129
- "eval_runtime": 2.4382,
130
- "eval_samples_per_second": 1307.131,
131
- "eval_steps_per_second": 82.029,
132
  "step": 22000
133
  },
134
  {
135
- "epoch": 12.621614514856692,
136
- "grad_norm": 2.4539408683776855,
137
- "learning_rate": 5.9985e-05,
138
- "loss": 4.7273,
139
  "step": 24000
140
  },
141
  {
142
- "epoch": 12.621614514856692,
143
- "eval_loss": 4.683297634124756,
144
- "eval_runtime": 2.452,
145
- "eval_samples_per_second": 1299.733,
146
- "eval_steps_per_second": 81.565,
147
  "step": 24000
148
  },
149
  {
150
- "epoch": 13.673415724428082,
151
- "eval_loss": 4.621574401855469,
152
- "eval_runtime": 2.1825,
153
- "eval_samples_per_second": 1460.226,
154
- "eval_steps_per_second": 91.636,
155
  "step": 26000
156
  },
157
  {
158
- "epoch": 14.725216933999475,
159
- "grad_norm": 2.4913346767425537,
160
- "learning_rate": 6.998e-05,
161
- "loss": 4.5397,
162
  "step": 28000
163
  },
164
  {
165
- "epoch": 14.725216933999475,
166
- "eval_loss": 4.562623977661133,
167
- "eval_runtime": 2.201,
168
- "eval_samples_per_second": 1447.965,
169
- "eval_steps_per_second": 90.867,
170
  "step": 28000
171
  },
172
  {
173
- "epoch": 15.777018143570865,
174
- "eval_loss": 4.506168842315674,
175
- "eval_runtime": 2.2491,
176
- "eval_samples_per_second": 1416.988,
177
- "eval_steps_per_second": 88.923,
178
  "step": 30000
179
  },
180
  {
181
- "epoch": 16.828819353142258,
182
- "grad_norm": 2.2661523818969727,
183
- "learning_rate": 7.99775e-05,
184
- "loss": 4.3839,
185
  "step": 32000
186
  },
187
  {
188
- "epoch": 16.828819353142258,
189
- "eval_loss": 4.464449882507324,
190
- "eval_runtime": 2.253,
191
- "eval_samples_per_second": 1414.55,
192
- "eval_steps_per_second": 88.77,
193
  "step": 32000
194
  },
195
  {
196
- "epoch": 17.88062056271365,
197
- "eval_loss": 4.426318168640137,
198
- "eval_runtime": 2.2688,
199
- "eval_samples_per_second": 1404.709,
200
- "eval_steps_per_second": 88.152,
201
  "step": 34000
202
  },
203
  {
204
- "epoch": 18.93242177228504,
205
- "grad_norm": 2.226025104522705,
206
- "learning_rate": 8.9975e-05,
207
- "loss": 4.2529,
208
  "step": 36000
209
  },
210
  {
211
- "epoch": 18.93242177228504,
212
- "eval_loss": 4.3944478034973145,
213
- "eval_runtime": 2.294,
214
- "eval_samples_per_second": 1389.273,
215
- "eval_steps_per_second": 87.184,
216
  "step": 36000
217
  }
218
  ],
219
  "logging_steps": 4000,
220
  "max_steps": 100000,
221
  "num_input_tokens_seen": 0,
222
- "num_train_epochs": 53,
223
  "save_steps": 4000,
224
  "stateful_callbacks": {
225
  "TrainerControl": {
@@ -233,7 +233,7 @@
233
  "attributes": {}
234
  }
235
  },
236
- "total_flos": 9306197347307520.0,
237
  "train_batch_size": 16,
238
  "trial_name": null,
239
  "trial_params": null
 
1
  {
2
+ "best_metric": 3.5649592876434326,
3
+ "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/fr_clm/wikipedia_30/checkpoint-36000",
4
+ "epoch": 38.64734299516908,
5
  "eval_steps": 2000,
6
  "global_step": 36000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 2.1470746108427265,
13
+ "eval_loss": 7.138043403625488,
14
+ "eval_runtime": 0.8415,
15
+ "eval_samples_per_second": 1273.93,
16
+ "eval_steps_per_second": 79.621,
17
  "step": 2000
18
  },
19
  {
20
+ "epoch": 4.294149221685453,
21
+ "grad_norm": 1.455039381980896,
22
  "learning_rate": 1e-05,
23
+ "loss": 7.213,
24
  "step": 4000
25
  },
26
  {
27
+ "epoch": 4.294149221685453,
28
+ "eval_loss": 5.8541717529296875,
29
+ "eval_runtime": 0.7753,
30
+ "eval_samples_per_second": 1382.676,
31
+ "eval_steps_per_second": 86.417,
32
  "step": 4000
33
  },
34
  {
35
+ "epoch": 6.4412238325281805,
36
+ "eval_loss": 5.403579235076904,
37
+ "eval_runtime": 0.7542,
38
+ "eval_samples_per_second": 1421.437,
39
+ "eval_steps_per_second": 88.84,
40
  "step": 6000
41
  },
42
  {
43
+ "epoch": 8.588298443370906,
44
+ "grad_norm": 3.117489814758301,
45
+ "learning_rate": 1.9997500000000003e-05,
46
+ "loss": 5.4304,
47
  "step": 8000
48
  },
49
  {
50
+ "epoch": 8.588298443370906,
51
+ "eval_loss": 5.049880504608154,
52
+ "eval_runtime": 0.7597,
53
+ "eval_samples_per_second": 1411.127,
54
+ "eval_steps_per_second": 88.195,
55
  "step": 8000
56
  },
57
  {
58
+ "epoch": 10.735373054213634,
59
+ "eval_loss": 4.760603427886963,
60
+ "eval_runtime": 0.7624,
61
+ "eval_samples_per_second": 1406.053,
62
+ "eval_steps_per_second": 87.878,
63
  "step": 10000
64
  },
65
  {
66
+ "epoch": 12.882447665056361,
67
+ "grad_norm": 2.9511141777038574,
68
+ "learning_rate": 2.9995e-05,
69
+ "loss": 4.771,
70
  "step": 12000
71
  },
72
  {
73
+ "epoch": 12.882447665056361,
74
+ "eval_loss": 4.517208576202393,
75
+ "eval_runtime": 0.764,
76
+ "eval_samples_per_second": 1403.097,
77
+ "eval_steps_per_second": 87.694,
78
  "step": 12000
79
  },
80
  {
81
+ "epoch": 15.029522275899087,
82
+ "eval_loss": 4.3206024169921875,
83
+ "eval_runtime": 0.7595,
84
+ "eval_samples_per_second": 1411.435,
85
+ "eval_steps_per_second": 88.215,
86
  "step": 14000
87
  },
88
  {
89
+ "epoch": 17.176596886741816,
90
+ "grad_norm": 2.955515146255493,
91
+ "learning_rate": 3.999e-05,
92
+ "loss": 4.2888,
93
  "step": 16000
94
  },
95
  {
96
+ "epoch": 17.176596886741816,
97
+ "eval_loss": 4.152973175048828,
98
+ "eval_runtime": 0.8247,
99
+ "eval_samples_per_second": 1299.91,
100
+ "eval_steps_per_second": 81.244,
101
  "step": 16000
102
  },
103
  {
104
+ "epoch": 19.32367149758454,
105
+ "eval_loss": 4.015476703643799,
106
+ "eval_runtime": 0.7501,
107
+ "eval_samples_per_second": 1429.237,
108
+ "eval_steps_per_second": 89.327,
109
  "step": 18000
110
  },
111
  {
112
+ "epoch": 21.470746108427267,
113
+ "grad_norm": 3.0531320571899414,
114
+ "learning_rate": 4.99875e-05,
115
+ "loss": 3.9141,
116
  "step": 20000
117
  },
118
  {
119
+ "epoch": 21.470746108427267,
120
+ "eval_loss": 3.896575927734375,
121
+ "eval_runtime": 0.768,
122
+ "eval_samples_per_second": 1395.843,
123
+ "eval_steps_per_second": 87.24,
124
  "step": 20000
125
  },
126
  {
127
+ "epoch": 23.617820719269993,
128
+ "eval_loss": 3.8046905994415283,
129
+ "eval_runtime": 0.7601,
130
+ "eval_samples_per_second": 1410.272,
131
+ "eval_steps_per_second": 88.142,
132
  "step": 22000
133
  },
134
  {
135
+ "epoch": 25.764895330112722,
136
+ "grad_norm": 3.089989185333252,
137
+ "learning_rate": 5.998250000000001e-05,
138
+ "loss": 3.6154,
139
  "step": 24000
140
  },
141
  {
142
+ "epoch": 25.764895330112722,
143
+ "eval_loss": 3.7359092235565186,
144
+ "eval_runtime": 0.7778,
145
+ "eval_samples_per_second": 1378.238,
146
+ "eval_steps_per_second": 86.14,
147
  "step": 24000
148
  },
149
  {
150
+ "epoch": 27.911969940955448,
151
+ "eval_loss": 3.6784231662750244,
152
+ "eval_runtime": 0.7541,
153
+ "eval_samples_per_second": 1421.491,
154
+ "eval_steps_per_second": 88.843,
155
  "step": 26000
156
  },
157
  {
158
+ "epoch": 30.059044551798173,
159
+ "grad_norm": 3.3495922088623047,
160
+ "learning_rate": 6.997500000000001e-05,
161
+ "loss": 3.3661,
162
  "step": 28000
163
  },
164
  {
165
+ "epoch": 30.059044551798173,
166
+ "eval_loss": 3.6360318660736084,
167
+ "eval_runtime": 0.7603,
168
+ "eval_samples_per_second": 1409.913,
169
+ "eval_steps_per_second": 88.12,
170
  "step": 28000
171
  },
172
  {
173
+ "epoch": 32.2061191626409,
174
+ "eval_loss": 3.6019041538238525,
175
+ "eval_runtime": 0.7638,
176
+ "eval_samples_per_second": 1403.547,
177
+ "eval_steps_per_second": 87.722,
178
  "step": 30000
179
  },
180
  {
181
+ "epoch": 34.35319377348363,
182
+ "grad_norm": 3.1256051063537598,
183
+ "learning_rate": 7.997250000000001e-05,
184
+ "loss": 3.1473,
185
  "step": 32000
186
  },
187
  {
188
+ "epoch": 34.35319377348363,
189
+ "eval_loss": 3.5816054344177246,
190
+ "eval_runtime": 0.7628,
191
+ "eval_samples_per_second": 1405.391,
192
+ "eval_steps_per_second": 87.837,
193
  "step": 32000
194
  },
195
  {
196
+ "epoch": 36.500268384326354,
197
+ "eval_loss": 3.5698702335357666,
198
+ "eval_runtime": 0.7617,
199
+ "eval_samples_per_second": 1407.381,
200
+ "eval_steps_per_second": 87.961,
201
  "step": 34000
202
  },
203
  {
204
+ "epoch": 38.64734299516908,
205
+ "grad_norm": 2.921318769454956,
206
+ "learning_rate": 8.997000000000001e-05,
207
+ "loss": 2.9533,
208
  "step": 36000
209
  },
210
  {
211
+ "epoch": 38.64734299516908,
212
+ "eval_loss": 3.5649592876434326,
213
+ "eval_runtime": 0.7607,
214
+ "eval_samples_per_second": 1409.149,
215
+ "eval_steps_per_second": 88.072,
216
  "step": 36000
217
  }
218
  ],
219
  "logging_steps": 4000,
220
  "max_steps": 100000,
221
  "num_input_tokens_seen": 0,
222
+ "num_train_epochs": 108,
223
  "save_steps": 4000,
224
  "stateful_callbacks": {
225
  "TrainerControl": {
 
233
  "attributes": {}
234
  }
235
  },
236
+ "total_flos": 9305928933310464.0,
237
  "train_batch_size": 16,
238
  "trial_name": null,
239
  "trial_params": null
checkpoint-36000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab68434ec7156d3a63e4783f824871a48d62c7c2fdcc831fdd5471c5f4aec7fe
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26102d0ac750ce531ebda9d7577ccea6c8ecbc93b04bdff23226e376dd5609a2
3
  size 5368