moogician commited on
Commit
d841436
·
verified ·
1 Parent(s): dacf0da

Upload trainer_state.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. trainer_state.json +425 -0
trainer_state.json ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.072398190045249,
5
+ "eval_steps": 500,
6
+ "global_step": 56,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03619909502262444,
13
+ "grad_norm": 2.867363214492798,
14
+ "learning_rate": 3.7037037037037036e-07,
15
+ "loss": 0.7721,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.07239819004524888,
20
+ "grad_norm": 2.795196771621704,
21
+ "learning_rate": 7.407407407407407e-07,
22
+ "loss": 0.7534,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.1085972850678733,
27
+ "grad_norm": 2.8518760204315186,
28
+ "learning_rate": 1.111111111111111e-06,
29
+ "loss": 0.7531,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.14479638009049775,
34
+ "grad_norm": 2.7276031970977783,
35
+ "learning_rate": 1.4814814814814815e-06,
36
+ "loss": 0.7604,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.18099547511312217,
41
+ "grad_norm": 2.6922106742858887,
42
+ "learning_rate": 1.8518518518518519e-06,
43
+ "loss": 0.7775,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.2171945701357466,
48
+ "grad_norm": 2.8505051136016846,
49
+ "learning_rate": 2.222222222222222e-06,
50
+ "loss": 0.7689,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.25339366515837103,
55
+ "grad_norm": 2.711665153503418,
56
+ "learning_rate": 2.5925925925925925e-06,
57
+ "loss": 0.7687,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.2895927601809955,
62
+ "grad_norm": 2.7716190814971924,
63
+ "learning_rate": 2.962962962962963e-06,
64
+ "loss": 0.769,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.3257918552036199,
69
+ "grad_norm": 2.607780933380127,
70
+ "learning_rate": 3.3333333333333333e-06,
71
+ "loss": 0.7674,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.36199095022624433,
76
+ "grad_norm": 2.305697202682495,
77
+ "learning_rate": 3.7037037037037037e-06,
78
+ "loss": 0.7529,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.39819004524886875,
83
+ "grad_norm": 1.5863313674926758,
84
+ "learning_rate": 4.074074074074074e-06,
85
+ "loss": 0.7259,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.4343891402714932,
90
+ "grad_norm": 1.2682443857192993,
91
+ "learning_rate": 4.444444444444444e-06,
92
+ "loss": 0.7206,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.47058823529411764,
97
+ "grad_norm": 1.010533332824707,
98
+ "learning_rate": 4.814814814814815e-06,
99
+ "loss": 0.6921,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.5067873303167421,
104
+ "grad_norm": 0.9136636257171631,
105
+ "learning_rate": 5.185185185185185e-06,
106
+ "loss": 0.6987,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.5429864253393665,
111
+ "grad_norm": 1.275439739227295,
112
+ "learning_rate": 5.555555555555557e-06,
113
+ "loss": 0.6997,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.579185520361991,
118
+ "grad_norm": 1.4839826822280884,
119
+ "learning_rate": 5.925925925925926e-06,
120
+ "loss": 0.6773,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.6153846153846154,
125
+ "grad_norm": 1.5695866346359253,
126
+ "learning_rate": 6.296296296296297e-06,
127
+ "loss": 0.6582,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.6515837104072398,
132
+ "grad_norm": 1.6517796516418457,
133
+ "learning_rate": 6.666666666666667e-06,
134
+ "loss": 0.6912,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.6877828054298643,
139
+ "grad_norm": 1.3464380502700806,
140
+ "learning_rate": 7.0370370370370375e-06,
141
+ "loss": 0.6636,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.7239819004524887,
146
+ "grad_norm": 1.0865730047225952,
147
+ "learning_rate": 7.4074074074074075e-06,
148
+ "loss": 0.6571,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.7601809954751131,
153
+ "grad_norm": 0.8862206339836121,
154
+ "learning_rate": 7.77777777777778e-06,
155
+ "loss": 0.6583,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.7963800904977375,
160
+ "grad_norm": 0.9783180952072144,
161
+ "learning_rate": 8.148148148148148e-06,
162
+ "loss": 0.6688,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.832579185520362,
167
+ "grad_norm": 0.7160141468048096,
168
+ "learning_rate": 8.518518518518519e-06,
169
+ "loss": 0.6408,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.8687782805429864,
174
+ "grad_norm": 0.6991405487060547,
175
+ "learning_rate": 8.888888888888888e-06,
176
+ "loss": 0.6447,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 0.9049773755656109,
181
+ "grad_norm": 0.7763178944587708,
182
+ "learning_rate": 9.25925925925926e-06,
183
+ "loss": 0.6512,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 0.9411764705882353,
188
+ "grad_norm": 0.5905259847640991,
189
+ "learning_rate": 9.62962962962963e-06,
190
+ "loss": 0.6337,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 0.9773755656108597,
195
+ "grad_norm": 0.7515297532081604,
196
+ "learning_rate": 1e-05,
197
+ "loss": 0.655,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 1.0361990950226245,
202
+ "grad_norm": 1.260597825050354,
203
+ "learning_rate": 9.999582149277188e-06,
204
+ "loss": 1.2675,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 1.0723981900452488,
209
+ "grad_norm": 0.5624723434448242,
210
+ "learning_rate": 9.998328666948437e-06,
211
+ "loss": 0.5869,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 1.1085972850678734,
216
+ "grad_norm": 0.5378224849700928,
217
+ "learning_rate": 9.996239762521152e-06,
218
+ "loss": 0.5919,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 1.1447963800904977,
223
+ "grad_norm": 0.48451751470565796,
224
+ "learning_rate": 9.993315785135417e-06,
225
+ "loss": 0.5952,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 1.1809954751131222,
230
+ "grad_norm": 0.4453476071357727,
231
+ "learning_rate": 9.989557223505661e-06,
232
+ "loss": 0.5941,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 1.2171945701357467,
237
+ "grad_norm": 0.5747421979904175,
238
+ "learning_rate": 9.98496470583896e-06,
239
+ "loss": 0.6023,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 1.253393665158371,
244
+ "grad_norm": 0.4679132401943207,
245
+ "learning_rate": 9.979538999730047e-06,
246
+ "loss": 0.5768,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 1.2895927601809956,
251
+ "grad_norm": 0.4853627383708954,
252
+ "learning_rate": 9.973281012033009e-06,
253
+ "loss": 0.5566,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 1.3257918552036199,
258
+ "grad_norm": 0.44682788848876953,
259
+ "learning_rate": 9.966191788709716e-06,
260
+ "loss": 0.5981,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 1.3619909502262444,
265
+ "grad_norm": 0.4072805941104889,
266
+ "learning_rate": 9.958272514655006e-06,
267
+ "loss": 0.5664,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 1.3981900452488687,
272
+ "grad_norm": 0.35199496150016785,
273
+ "learning_rate": 9.949524513498636e-06,
274
+ "loss": 0.5864,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 1.4343891402714932,
279
+ "grad_norm": 0.420901894569397,
280
+ "learning_rate": 9.939949247384046e-06,
281
+ "loss": 0.5558,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 1.4705882352941178,
286
+ "grad_norm": 0.39620932936668396,
287
+ "learning_rate": 9.929548316723983e-06,
288
+ "loss": 0.5713,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 1.506787330316742,
293
+ "grad_norm": 0.36159488558769226,
294
+ "learning_rate": 9.918323459933006e-06,
295
+ "loss": 0.5712,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 1.5429864253393664,
300
+ "grad_norm": 0.3682953119277954,
301
+ "learning_rate": 9.906276553136924e-06,
302
+ "loss": 0.5543,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 1.5791855203619911,
307
+ "grad_norm": 0.35126638412475586,
308
+ "learning_rate": 9.893409609859221e-06,
309
+ "loss": 0.5615,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 1.6153846153846154,
314
+ "grad_norm": 0.38464972376823425,
315
+ "learning_rate": 9.879724780684518e-06,
316
+ "loss": 0.5635,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 1.6515837104072397,
321
+ "grad_norm": 0.37480273842811584,
322
+ "learning_rate": 9.86522435289912e-06,
323
+ "loss": 0.5618,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 1.6877828054298643,
328
+ "grad_norm": 0.388458788394928,
329
+ "learning_rate": 9.849910750108718e-06,
330
+ "loss": 0.5635,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 1.7239819004524888,
335
+ "grad_norm": 0.4044710099697113,
336
+ "learning_rate": 9.833786531833311e-06,
337
+ "loss": 0.5665,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 1.760180995475113,
342
+ "grad_norm": 0.3484383821487427,
343
+ "learning_rate": 9.816854393079402e-06,
344
+ "loss": 0.5522,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 1.7963800904977374,
349
+ "grad_norm": 0.33232375979423523,
350
+ "learning_rate": 9.79911716388956e-06,
351
+ "loss": 0.5712,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 1.8325791855203621,
356
+ "grad_norm": 0.4234112501144409,
357
+ "learning_rate": 9.7805778088694e-06,
358
+ "loss": 0.5496,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 1.8687782805429864,
363
+ "grad_norm": 0.3586530387401581,
364
+ "learning_rate": 9.761239426692077e-06,
365
+ "loss": 0.5488,
366
+ "step": 51
367
+ },
368
+ {
369
+ "epoch": 1.9049773755656108,
370
+ "grad_norm": 0.34319135546684265,
371
+ "learning_rate": 9.741105249580383e-06,
372
+ "loss": 0.5534,
373
+ "step": 52
374
+ },
375
+ {
376
+ "epoch": 1.9411764705882353,
377
+ "grad_norm": 0.406502366065979,
378
+ "learning_rate": 9.7201786427665e-06,
379
+ "loss": 0.5765,
380
+ "step": 53
381
+ },
382
+ {
383
+ "epoch": 1.9773755656108598,
384
+ "grad_norm": 0.3931889533996582,
385
+ "learning_rate": 9.698463103929542e-06,
386
+ "loss": 0.561,
387
+ "step": 54
388
+ },
389
+ {
390
+ "epoch": 2.0361990950226243,
391
+ "grad_norm": 0.7969871759414673,
392
+ "learning_rate": 9.67596226261095e-06,
393
+ "loss": 0.9762,
394
+ "step": 55
395
+ },
396
+ {
397
+ "epoch": 2.072398190045249,
398
+ "grad_norm": 0.34094685316085815,
399
+ "learning_rate": 9.652679879607843e-06,
400
+ "loss": 0.5055,
401
+ "step": 56
402
+ }
403
+ ],
404
+ "logging_steps": 1,
405
+ "max_steps": 270,
406
+ "num_input_tokens_seen": 0,
407
+ "num_train_epochs": 10,
408
+ "save_steps": 28,
409
+ "stateful_callbacks": {
410
+ "TrainerControl": {
411
+ "args": {
412
+ "should_epoch_stop": false,
413
+ "should_evaluate": false,
414
+ "should_log": false,
415
+ "should_save": true,
416
+ "should_training_stop": false
417
+ },
418
+ "attributes": {}
419
+ }
420
+ },
421
+ "total_flos": 71041648754688.0,
422
+ "train_batch_size": 1,
423
+ "trial_name": null,
424
+ "trial_params": null
425
+ }