fpadovani commited on
Commit
ec6502f
·
verified ·
1 Parent(s): f0a25ef

Training in progress, step 12000, checkpoint

Browse files
checkpoint-12000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4ae09d47b5295ce46ddd3b48cc736ad9e206efb7505dbecd7ba877c6f6db31c
3
  size 51007160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59ce8f38601fbd6eee49a901e382da7d78ebce507e4e265c62d6aafebca365e2
3
  size 51007160
checkpoint-12000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74eb6edf5a00642fc4a2785cff160a15179ba255a6a749f599217d8908e2b9d6
3
  size 102078202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03d99881f84e0354e580b4d33490efe5733785e6f803651a550a5c9e53c91454
3
  size 102078202
checkpoint-12000/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56f44917d6c9223960f0046add7487286a53bb7d7d42fe253c3377466ea3e315
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8987f95f1cceab22386ab9bede80f931023ad982dc5dee03b613fe9d1cd18e2
3
  size 14308
checkpoint-12000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a300d405a31ed9a6a3aacb8615e61e340e96b3c687b201bcf6ed4ae9f299ec86
3
  size 1000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5463b724c2cd6909cd79c85b289d00a369e4fb6bb042482b75620c59845c7360
3
  size 1000
checkpoint-12000/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-12000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 5.460005760192871,
3
- "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/en_clm/wikipedia_30/checkpoint-12000",
4
- "epoch": 6.310807257428346,
5
  "eval_steps": 2000,
6
  "global_step": 12000,
7
  "is_hyper_param_search": false,
@@ -9,79 +9,79 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 1.0518012095713911,
13
- "eval_loss": 7.518920421600342,
14
- "eval_runtime": 2.2399,
15
- "eval_samples_per_second": 1422.808,
16
- "eval_steps_per_second": 89.288,
17
  "step": 2000
18
  },
19
  {
20
- "epoch": 2.1036024191427822,
21
- "grad_norm": 1.2947496175765991,
22
  "learning_rate": 1e-05,
23
- "loss": 7.6141,
24
  "step": 4000
25
  },
26
  {
27
- "epoch": 2.1036024191427822,
28
- "eval_loss": 6.568221092224121,
29
- "eval_runtime": 2.1236,
30
- "eval_samples_per_second": 1500.783,
31
- "eval_steps_per_second": 94.182,
32
  "step": 4000
33
  },
34
  {
35
- "epoch": 3.155403628714173,
36
- "eval_loss": 6.16925573348999,
37
- "eval_runtime": 2.1624,
38
- "eval_samples_per_second": 1473.825,
39
- "eval_steps_per_second": 92.49,
40
  "step": 6000
41
  },
42
  {
43
- "epoch": 4.2072048382855645,
44
- "grad_norm": 2.330080270767212,
45
- "learning_rate": 2e-05,
46
- "loss": 6.2396,
47
  "step": 8000
48
  },
49
  {
50
- "epoch": 4.2072048382855645,
51
- "eval_loss": 5.901575088500977,
52
- "eval_runtime": 2.1326,
53
- "eval_samples_per_second": 1494.388,
54
- "eval_steps_per_second": 93.78,
55
  "step": 8000
56
  },
57
  {
58
- "epoch": 5.259006047856955,
59
- "eval_loss": 5.67369270324707,
60
- "eval_runtime": 2.1364,
61
- "eval_samples_per_second": 1491.788,
62
- "eval_steps_per_second": 93.617,
63
  "step": 10000
64
  },
65
  {
66
- "epoch": 6.310807257428346,
67
- "grad_norm": 2.565220594406128,
68
- "learning_rate": 2.99925e-05,
69
- "loss": 5.7217,
70
  "step": 12000
71
  },
72
  {
73
- "epoch": 6.310807257428346,
74
- "eval_loss": 5.460005760192871,
75
- "eval_runtime": 2.1835,
76
- "eval_samples_per_second": 1459.563,
77
- "eval_steps_per_second": 91.595,
78
  "step": 12000
79
  }
80
  ],
81
  "logging_steps": 4000,
82
  "max_steps": 100000,
83
  "num_input_tokens_seen": 0,
84
- "num_train_epochs": 53,
85
  "save_steps": 4000,
86
  "stateful_callbacks": {
87
  "TrainerControl": {
@@ -95,7 +95,7 @@
95
  "attributes": {}
96
  }
97
  },
98
- "total_flos": 3102067886653440.0,
99
  "train_batch_size": 16,
100
  "trial_name": null,
101
  "trial_params": null
 
1
  {
2
+ "best_metric": 4.517208576202393,
3
+ "best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained/fr_clm/wikipedia_30/checkpoint-12000",
4
+ "epoch": 12.882447665056361,
5
  "eval_steps": 2000,
6
  "global_step": 12000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 2.1470746108427265,
13
+ "eval_loss": 7.138043403625488,
14
+ "eval_runtime": 0.8415,
15
+ "eval_samples_per_second": 1273.93,
16
+ "eval_steps_per_second": 79.621,
17
  "step": 2000
18
  },
19
  {
20
+ "epoch": 4.294149221685453,
21
+ "grad_norm": 1.455039381980896,
22
  "learning_rate": 1e-05,
23
+ "loss": 7.213,
24
  "step": 4000
25
  },
26
  {
27
+ "epoch": 4.294149221685453,
28
+ "eval_loss": 5.8541717529296875,
29
+ "eval_runtime": 0.7753,
30
+ "eval_samples_per_second": 1382.676,
31
+ "eval_steps_per_second": 86.417,
32
  "step": 4000
33
  },
34
  {
35
+ "epoch": 6.4412238325281805,
36
+ "eval_loss": 5.403579235076904,
37
+ "eval_runtime": 0.7542,
38
+ "eval_samples_per_second": 1421.437,
39
+ "eval_steps_per_second": 88.84,
40
  "step": 6000
41
  },
42
  {
43
+ "epoch": 8.588298443370906,
44
+ "grad_norm": 3.117489814758301,
45
+ "learning_rate": 1.9997500000000003e-05,
46
+ "loss": 5.4304,
47
  "step": 8000
48
  },
49
  {
50
+ "epoch": 8.588298443370906,
51
+ "eval_loss": 5.049880504608154,
52
+ "eval_runtime": 0.7597,
53
+ "eval_samples_per_second": 1411.127,
54
+ "eval_steps_per_second": 88.195,
55
  "step": 8000
56
  },
57
  {
58
+ "epoch": 10.735373054213634,
59
+ "eval_loss": 4.760603427886963,
60
+ "eval_runtime": 0.7624,
61
+ "eval_samples_per_second": 1406.053,
62
+ "eval_steps_per_second": 87.878,
63
  "step": 10000
64
  },
65
  {
66
+ "epoch": 12.882447665056361,
67
+ "grad_norm": 2.9511141777038574,
68
+ "learning_rate": 2.9995e-05,
69
+ "loss": 4.771,
70
  "step": 12000
71
  },
72
  {
73
+ "epoch": 12.882447665056361,
74
+ "eval_loss": 4.517208576202393,
75
+ "eval_runtime": 0.764,
76
+ "eval_samples_per_second": 1403.097,
77
+ "eval_steps_per_second": 87.694,
78
  "step": 12000
79
  }
80
  ],
81
  "logging_steps": 4000,
82
  "max_steps": 100000,
83
  "num_input_tokens_seen": 0,
84
+ "num_train_epochs": 108,
85
  "save_steps": 4000,
86
  "stateful_callbacks": {
87
  "TrainerControl": {
 
95
  "attributes": {}
96
  }
97
  },
98
+ "total_flos": 3102019405479936.0,
99
  "train_batch_size": 16,
100
  "trial_name": null,
101
  "trial_params": null
checkpoint-12000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab68434ec7156d3a63e4783f824871a48d62c7c2fdcc831fdd5471c5f4aec7fe
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26102d0ac750ce531ebda9d7577ccea6c8ecbc93b04bdff23226e376dd5609a2
3
  size 5368