leixa commited on
Commit
26c9e3e
·
verified ·
1 Parent(s): 71c7616

Training in progress, step 500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edaff7825cdbe6b0fdb39696cf2b7cd66d5f90ad6d25445e43f5ef7ee3340029
3
  size 645975704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3713ff8b77c83ba29cfaeb8d16937c25c823b3c939bc36a5731c62c7bc0bd91
3
  size 645975704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12d42cdf0549644c2e41d727772a5c59e41e49be61fd4c13178c5d354ef644fc
3
  size 328468852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a7c0e64527c54ea3d64ae948893f26b9f78304066fe2bfee9f7a5bc61323260
3
  size 328468852
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfcf362697f9cf020341e74d8b51e99e167f40c11f2909e2bf8b9ea96351aec8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfbf6021e3e81f4c9b4a471ebfa9809e3f0b65c69ac87b4db7d3e0b2bd4cb7e1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b58b44a2d5024ddc12e64ead45d5d25c7fc985d9aaeb44c7bc3de9b8cf56f23
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe1d153de177b356f9e3a70d6e4ec979560b0c300994e71ca4cb89afc74c5b3a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.10875386199794027,
5
  "eval_steps": 42,
6
- "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1181,6 +1181,90 @@
1181
  "eval_samples_per_second": 26.622,
1182
  "eval_steps_per_second": 6.657,
1183
  "step": 462
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184
  }
1185
  ],
1186
  "logging_steps": 3,
@@ -1195,12 +1279,12 @@
1195
  "should_evaluate": false,
1196
  "should_log": false,
1197
  "should_save": true,
1198
- "should_training_stop": false
1199
  },
1200
  "attributes": {}
1201
  }
1202
  },
1203
- "total_flos": 3.284562863141683e+17,
1204
  "train_batch_size": 4,
1205
  "trial_name": null,
1206
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.1176989848462557,
5
  "eval_steps": 42,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1181
  "eval_samples_per_second": 26.622,
1182
  "eval_steps_per_second": 6.657,
1183
  "step": 462
1184
+ },
1185
+ {
1186
+ "epoch": 0.10946005590701781,
1187
+ "grad_norm": 0.890392005443573,
1188
+ "learning_rate": 1.2536043909088191e-06,
1189
+ "loss": 1.0834,
1190
+ "step": 465
1191
+ },
1192
+ {
1193
+ "epoch": 0.11016624981609534,
1194
+ "grad_norm": 0.8814263939857483,
1195
+ "learning_rate": 1.0486332583853563e-06,
1196
+ "loss": 1.0303,
1197
+ "step": 468
1198
+ },
1199
+ {
1200
+ "epoch": 0.11087244372517287,
1201
+ "grad_norm": 0.8813495635986328,
1202
+ "learning_rate": 8.617714201998084e-07,
1203
+ "loss": 1.0092,
1204
+ "step": 471
1205
+ },
1206
+ {
1207
+ "epoch": 0.11157863763425041,
1208
+ "grad_norm": 0.8011179566383362,
1209
+ "learning_rate": 6.93088004882253e-07,
1210
+ "loss": 1.0072,
1211
+ "step": 474
1212
+ },
1213
+ {
1214
+ "epoch": 0.11228483154332794,
1215
+ "grad_norm": 0.9191976189613342,
1216
+ "learning_rate": 5.426454159531913e-07,
1217
+ "loss": 1.0898,
1218
+ "step": 477
1219
+ },
1220
+ {
1221
+ "epoch": 0.11299102545240547,
1222
+ "grad_norm": 0.9359084963798523,
1223
+ "learning_rate": 4.104993088376974e-07,
1224
+ "loss": 1.0413,
1225
+ "step": 480
1226
+ },
1227
+ {
1228
+ "epoch": 0.11369721936148301,
1229
+ "grad_norm": 0.8636139035224915,
1230
+ "learning_rate": 2.966985702759828e-07,
1231
+ "loss": 1.0287,
1232
+ "step": 483
1233
+ },
1234
+ {
1235
+ "epoch": 0.11440341327056054,
1236
+ "grad_norm": 0.9202425479888916,
1237
+ "learning_rate": 2.012853002380466e-07,
1238
+ "loss": 1.1024,
1239
+ "step": 486
1240
+ },
1241
+ {
1242
+ "epoch": 0.11510960717963807,
1243
+ "grad_norm": 0.928870677947998,
1244
+ "learning_rate": 1.2429479634897267e-07,
1245
+ "loss": 1.0219,
1246
+ "step": 489
1247
+ },
1248
+ {
1249
+ "epoch": 0.11581580108871561,
1250
+ "grad_norm": 0.8760671019554138,
1251
+ "learning_rate": 6.575554083078084e-08,
1252
+ "loss": 1.034,
1253
+ "step": 492
1254
+ },
1255
+ {
1256
+ "epoch": 0.11652199499779314,
1257
+ "grad_norm": 0.8538545966148376,
1258
+ "learning_rate": 2.568918996560532e-08,
1259
+ "loss": 1.0029,
1260
+ "step": 495
1261
+ },
1262
+ {
1263
+ "epoch": 0.11722818890687067,
1264
+ "grad_norm": 0.8800035119056702,
1265
+ "learning_rate": 4.110566084036816e-09,
1266
+ "loss": 1.0125,
1267
+ "step": 498
1268
  }
1269
  ],
1270
  "logging_steps": 3,
 
1279
  "should_evaluate": false,
1280
  "should_log": false,
1281
  "should_save": true,
1282
+ "should_training_stop": true
1283
  },
1284
  "attributes": {}
1285
  }
1286
  },
1287
+ "total_flos": 3.5547217133568e+17,
1288
  "train_batch_size": 4,
1289
  "trial_name": null,
1290
  "trial_params": null