aseratus1 commited on
Commit
c154f9f
·
verified ·
1 Parent(s): 3d22055

Training in progress, step 450, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:227708832b49ac7061c8c8cedfec3f38b21c76d72436fca62de0d18c0032ab53
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54920ba191c3897d93abfae0a14a21bd7a86a9692f46148ebd173663fb27535f
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d9fb08196de88b0882b7cc0b21ec7bbcdafdbe26d855d24029b88bdd529d41c
3
  size 341314644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9701a6fcd0bec7a7a691c63dc3f672f22d9831364b8c114a2763218f599fac02
3
  size 341314644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4430b9a08c075060ef6b7ad7a7977beb00c91a14854ffdc791c60cb3093cb1e9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ab18e1d59252106afdf4da18e343f22032a2767a4c2723009e6558803b0e96
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c5a8c7855b3cc55dff44b95db370c984be8d56bad23c2aea8770dee5814ed88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f915568f86cbbb4ec27d1f37808bebd339a27f0f49c65b6033fc00a9a70a87ef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.5615507960319519,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-300",
4
- "epoch": 0.11374407582938388,
5
  "eval_steps": 150,
6
- "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -241,6 +241,119 @@
241
  "eval_samples_per_second": 21.057,
242
  "eval_steps_per_second": 5.267,
243
  "step": 300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  }
245
  ],
246
  "logging_steps": 10,
@@ -269,7 +382,7 @@
269
  "attributes": {}
270
  }
271
  },
272
- "total_flos": 2.6549838985927066e+17,
273
  "train_batch_size": 8,
274
  "trial_name": null,
275
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.53005450963974,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-450",
4
+ "epoch": 0.17061611374407584,
5
  "eval_steps": 150,
6
+ "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
241
  "eval_samples_per_second": 21.057,
242
  "eval_steps_per_second": 5.267,
243
  "step": 300
244
+ },
245
+ {
246
+ "epoch": 0.11753554502369669,
247
+ "grad_norm": 0.8847883343696594,
248
+ "learning_rate": 9.753026828739756e-05,
249
+ "loss": 0.9371,
250
+ "step": 310
251
+ },
252
+ {
253
+ "epoch": 0.12132701421800948,
254
+ "grad_norm": 0.840414822101593,
255
+ "learning_rate": 9.733837129832993e-05,
256
+ "loss": 0.5609,
257
+ "step": 320
258
+ },
259
+ {
260
+ "epoch": 0.12511848341232226,
261
+ "grad_norm": 0.8552011251449585,
262
+ "learning_rate": 9.713949874763296e-05,
263
+ "loss": 0.4904,
264
+ "step": 330
265
+ },
266
+ {
267
+ "epoch": 0.12890995260663507,
268
+ "grad_norm": 0.7504507899284363,
269
+ "learning_rate": 9.693367994023828e-05,
270
+ "loss": 0.4276,
271
+ "step": 340
272
+ },
273
+ {
274
+ "epoch": 0.13270142180094788,
275
+ "grad_norm": 0.7728025317192078,
276
+ "learning_rate": 9.672094520464552e-05,
277
+ "loss": 0.3153,
278
+ "step": 350
279
+ },
280
+ {
281
+ "epoch": 0.13649289099526066,
282
+ "grad_norm": 0.8927388191223145,
283
+ "learning_rate": 9.650132588845318e-05,
284
+ "loss": 0.8539,
285
+ "step": 360
286
+ },
287
+ {
288
+ "epoch": 0.14028436018957346,
289
+ "grad_norm": 0.9280526638031006,
290
+ "learning_rate": 9.627485435373948e-05,
291
+ "loss": 0.5319,
292
+ "step": 370
293
+ },
294
+ {
295
+ "epoch": 0.14407582938388624,
296
+ "grad_norm": 0.8443691730499268,
297
+ "learning_rate": 9.604156397229367e-05,
298
+ "loss": 0.4799,
299
+ "step": 380
300
+ },
301
+ {
302
+ "epoch": 0.14786729857819905,
303
+ "grad_norm": 0.8244546055793762,
304
+ "learning_rate": 9.580148912069836e-05,
305
+ "loss": 0.4255,
306
+ "step": 390
307
+ },
308
+ {
309
+ "epoch": 0.15165876777251186,
310
+ "grad_norm": 0.587851881980896,
311
+ "learning_rate": 9.555466517526405e-05,
312
+ "loss": 0.3149,
313
+ "step": 400
314
+ },
315
+ {
316
+ "epoch": 0.15545023696682464,
317
+ "grad_norm": 0.9294399619102478,
318
+ "learning_rate": 9.53011285068163e-05,
319
+ "loss": 0.8398,
320
+ "step": 410
321
+ },
322
+ {
323
+ "epoch": 0.15924170616113745,
324
+ "grad_norm": 0.8756105303764343,
325
+ "learning_rate": 9.50409164753362e-05,
326
+ "loss": 0.5178,
327
+ "step": 420
328
+ },
329
+ {
330
+ "epoch": 0.16303317535545023,
331
+ "grad_norm": 0.7490562796592712,
332
+ "learning_rate": 9.477406742445516e-05,
333
+ "loss": 0.4677,
334
+ "step": 430
335
+ },
336
+ {
337
+ "epoch": 0.16682464454976303,
338
+ "grad_norm": 0.8731195330619812,
339
+ "learning_rate": 9.450062067580488e-05,
340
+ "loss": 0.4073,
341
+ "step": 440
342
+ },
343
+ {
344
+ "epoch": 0.17061611374407584,
345
+ "grad_norm": 0.7838053107261658,
346
+ "learning_rate": 9.422061652322298e-05,
347
+ "loss": 0.2986,
348
+ "step": 450
349
+ },
350
+ {
351
+ "epoch": 0.17061611374407584,
352
+ "eval_loss": 0.53005450963974,
353
+ "eval_runtime": 211.0482,
354
+ "eval_samples_per_second": 21.047,
355
+ "eval_steps_per_second": 5.264,
356
+ "step": 450
357
  }
358
  ],
359
  "logging_steps": 10,
 
382
  "attributes": {}
383
  }
384
  },
385
+ "total_flos": 3.9862471318359245e+17,
386
  "train_batch_size": 8,
387
  "trial_name": null,
388
  "trial_params": null