Nexspear commited on
Commit
dd3196a
·
verified ·
1 Parent(s): ca1a6ab

Training in progress, step 39, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9785a89b17e27295010a8ef9b0217f071037900eef5896dd10aa8250850b7e1b
3
  size 80013120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f09c826079afacdd860b6719c5ae3d54ca1fbf4a806e4243eda14cdbcb05a9b
3
  size 80013120
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0fcc40c5e63c6d85606d86e068c38396ac3163419ef15c465602185d8a01ddb
3
  size 41119636
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:664c6c8194863ed9f12f3596a555874d90ea6426b1665e11e47d0df6c1582f88
3
  size 41119636
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b632b832bdc839a1f87f2b54a86c7465bc00e10f4d1d0eef845b6f6921a2856e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5fa5a15dacc246c77ef90ba7e7896379953025d6077daf340f31bc19decacb1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37841e69eda911caeb33edeefa0b2f140e72dcce247aeb757b2fe89c00d7887b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c50dbaa792cda4a28fbbc2acb2a3e03c59530712bbc5107212d33064d193da4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.029099048684946838,
5
  "eval_steps": 13,
6
- "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -66,6 +66,28 @@
66
  "eval_samples_per_second": 22.197,
67
  "eval_steps_per_second": 11.128,
68
  "step": 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
  ],
71
  "logging_steps": 5,
@@ -85,7 +107,7 @@
85
  "attributes": {}
86
  }
87
  },
88
- "total_flos": 4560452780556288.0,
89
  "train_batch_size": 2,
90
  "trial_name": null,
91
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04364857302742026,
5
  "eval_steps": 13,
6
+ "global_step": 39,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
66
  "eval_samples_per_second": 22.197,
67
  "eval_steps_per_second": 11.128,
68
  "step": 26
69
+ },
70
+ {
71
+ "epoch": 0.03357582540570789,
72
+ "grad_norm": 0.6512241959571838,
73
+ "learning_rate": 5e-05,
74
+ "loss": 1.4273,
75
+ "step": 30
76
+ },
77
+ {
78
+ "epoch": 0.03917179630665921,
79
+ "grad_norm": 0.6176732778549194,
80
+ "learning_rate": 3.086582838174551e-05,
81
+ "loss": 1.2656,
82
+ "step": 35
83
+ },
84
+ {
85
+ "epoch": 0.04364857302742026,
86
+ "eval_loss": 1.2559878826141357,
87
+ "eval_runtime": 17.0992,
88
+ "eval_samples_per_second": 22.048,
89
+ "eval_steps_per_second": 11.053,
90
+ "step": 39
91
  }
92
  ],
93
  "logging_steps": 5,
 
107
  "attributes": {}
108
  }
109
  },
110
+ "total_flos": 6840679170834432.0,
111
  "train_batch_size": 2,
112
  "trial_name": null,
113
  "trial_params": null