bradmin commited on
Commit
53f6455
·
1 Parent(s): d62d8fa

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/config.json CHANGED
@@ -1,35 +1,36 @@
1
  {
2
- "_name_or_path": "klue/roberta-large",
3
  "architectures": [
4
- "RobertaForSequenceClassification"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
- "classifier_dropout": null,
9
  "eos_token_id": 2,
10
- "gradient_checkpointing": false,
11
  "hidden_act": "gelu",
12
- "hidden_dropout_prob": 0.1,
13
- "hidden_size": 1024,
14
  "id2label": {
15
  "0": "LABEL_0"
16
  },
17
  "initializer_range": 0.02,
18
- "intermediate_size": 4096,
19
  "label2id": {
20
  "LABEL_0": 0
21
  },
22
  "layer_norm_eps": 1e-05,
23
- "max_position_embeddings": 514,
24
- "model_type": "roberta",
25
  "num_attention_heads": 16,
26
  "num_hidden_layers": 24,
27
  "pad_token_id": 2,
28
- "position_embedding_type": "absolute",
29
- "tokenizer_class": "BertTokenizer",
 
 
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.34.1",
32
- "type_vocab_size": 1,
33
  "use_cache": true,
34
- "vocab_size": 32000
 
35
  }
 
1
  {
2
+ "_name_or_path": "EleutherAI/polyglot-ko-1.3b",
3
  "architectures": [
4
+ "GPTNeoXForSequenceClassification"
5
  ],
6
+ "attention_dropout": 0.0,
7
  "bos_token_id": 0,
8
+ "classifier_dropout": 0.1,
9
  "eos_token_id": 2,
 
10
  "hidden_act": "gelu",
11
+ "hidden_dropout": 0.0,
12
+ "hidden_size": 2048,
13
  "id2label": {
14
  "0": "LABEL_0"
15
  },
16
  "initializer_range": 0.02,
17
+ "intermediate_size": 8192,
18
  "label2id": {
19
  "LABEL_0": 0
20
  },
21
  "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 2048,
23
+ "model_type": "gpt_neox",
24
  "num_attention_heads": 16,
25
  "num_hidden_layers": 24,
26
  "pad_token_id": 2,
27
+ "rope_scaling": null,
28
+ "rotary_emb_base": 10000,
29
+ "rotary_pct": 0.5,
30
+ "tie_word_embeddings": false,
31
  "torch_dtype": "float32",
32
  "transformers_version": "4.34.1",
 
33
  "use_cache": true,
34
+ "use_parallel_residual": true,
35
+ "vocab_size": 30080
36
  }
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c40d7d6ac7c9f651b2166fa7ccf825f008a4b0f6cc15c384dd83ced12e5db793
3
- size 2693490234
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ade30226eb4e297d8decaec73c9550740f04a0911264dc394137a7a57b068dde
3
+ size 10161845574
checkpoint-100/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59587ddb0c8585b1b8154e3d30ecf54a9e18dae1ce009bcdaee1aac965f85089
3
- size 1346765678
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5a038be930d7e4b9c06380cca5c12b961950b685498eb1df7b7188f4af6c5ba
3
+ size 5080945162
checkpoint-100/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e06bbdec16b98761b8f1c3f2be37280e4c9a8c49585a0b40267fbbf7cd255b2
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c8c1979a903366056f816dfa903e1707fbe02269ee67fd5bd4bb542c4208efb
3
  size 14308
checkpoint-100/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1eb9b965954fc6facf61811f017e0585a50011ee22f071a2fb1e6e116234d3d7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25eaeb8d0bf8a025a9171ec8459913c5e37aeef9ab9f3203b3512815e80a717
3
  size 1064
checkpoint-100/special_tokens_map.json CHANGED
@@ -1,9 +1,11 @@
1
  {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
- "mask_token": "[MASK]",
6
- "pad_token": "[PAD]",
7
- "sep_token": "[SEP]",
8
- "unk_token": "[UNK]"
 
 
9
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|sep|>",
5
+ "<|acc|>",
6
+ "<|tel|>",
7
+ "<|rrn|>"
8
+ ],
9
+ "eos_token": "<|endoftext|>",
10
+ "pad_token": "<|endoftext|>"
11
  }
checkpoint-100/tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/tokenizer_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
- "content": "[CLS]",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
@@ -9,7 +9,7 @@
9
  "special": true
10
  },
11
  "1": {
12
- "content": "[PAD]",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
@@ -17,7 +17,7 @@
17
  "special": true
18
  },
19
  "2": {
20
- "content": "[SEP]",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
@@ -25,15 +25,31 @@
25
  "special": true
26
  },
27
  "3": {
28
- "content": "[UNK]",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
- "4": {
36
- "content": "[MASK]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  "lstrip": false,
38
  "normalized": false,
39
  "rstrip": false,
@@ -41,19 +57,16 @@
41
  "special": true
42
  }
43
  },
44
- "bos_token": "[CLS]",
 
 
 
 
 
 
45
  "clean_up_tokenization_spaces": true,
46
- "cls_token": "[CLS]",
47
- "do_basic_tokenize": true,
48
- "do_lower_case": false,
49
- "eos_token": "[SEP]",
50
- "mask_token": "[MASK]",
51
- "model_max_length": 512,
52
- "never_split": null,
53
- "pad_token": "[PAD]",
54
- "sep_token": "[SEP]",
55
- "strip_accents": null,
56
- "tokenize_chinese_chars": true,
57
- "tokenizer_class": "BertTokenizer",
58
- "unk_token": "[UNK]"
59
  }
 
1
  {
2
  "added_tokens_decoder": {
3
  "0": {
4
+ "content": "<|unused0|>",
5
  "lstrip": false,
6
  "normalized": false,
7
  "rstrip": false,
 
9
  "special": true
10
  },
11
  "1": {
12
+ "content": "<|unused1|>",
13
  "lstrip": false,
14
  "normalized": false,
15
  "rstrip": false,
 
17
  "special": true
18
  },
19
  "2": {
20
+ "content": "<|endoftext|>",
21
  "lstrip": false,
22
  "normalized": false,
23
  "rstrip": false,
 
25
  "special": true
26
  },
27
  "3": {
28
+ "content": "<|sep|>",
29
  "lstrip": false,
30
  "normalized": false,
31
  "rstrip": false,
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "30000": {
36
+ "content": "<|acc|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "30001": {
44
+ "content": "<|tel|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "30002": {
52
+ "content": "<|rrn|>",
53
  "lstrip": false,
54
  "normalized": false,
55
  "rstrip": false,
 
57
  "special": true
58
  }
59
  },
60
+ "additional_special_tokens": [
61
+ "<|endoftext|>",
62
+ "<|sep|>",
63
+ "<|acc|>",
64
+ "<|tel|>",
65
+ "<|rrn|>"
66
+ ],
67
  "clean_up_tokenization_spaces": true,
68
+ "eos_token": "<|endoftext|>",
69
+ "model_max_length": 1000000000000000019884624838656,
70
+ "pad_token": "<|endoftext|>",
71
+ "tokenizer_class": "PreTrainedTokenizerFast"
 
 
 
 
 
 
 
 
 
72
  }
checkpoint-100/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.15241579027587257,
5
  "eval_steps": 100,
6
  "global_step": 100,
7
  "is_hyper_param_search": false,
@@ -9,77 +9,77 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.02,
13
- "learning_rate": 8.99484069051768e-06,
14
- "loss": 0.672,
15
  "step": 10
16
  },
17
  {
18
- "epoch": 0.03,
19
- "learning_rate": 8.979374592503753e-06,
20
- "loss": 0.393,
21
  "step": 20
22
  },
23
  {
24
- "epoch": 0.05,
25
- "learning_rate": 8.953637170129838e-06,
26
- "loss": 0.1594,
27
  "step": 30
28
  },
29
  {
30
- "epoch": 0.06,
31
- "learning_rate": 8.917687439985847e-06,
32
- "loss": 0.122,
33
  "step": 40
34
  },
35
  {
36
- "epoch": 0.08,
37
- "learning_rate": 8.87160783575339e-06,
38
- "loss": 0.0956,
39
  "step": 50
40
  },
41
  {
42
- "epoch": 0.09,
43
- "learning_rate": 8.815504019183158e-06,
44
- "loss": 0.102,
45
  "step": 60
46
  },
47
  {
48
- "epoch": 0.11,
49
- "learning_rate": 8.749504637809742e-06,
50
- "loss": 0.0888,
51
  "step": 70
52
  },
53
  {
54
- "epoch": 0.12,
55
- "learning_rate": 8.673761029959426e-06,
56
- "loss": 0.097,
57
  "step": 80
58
  },
59
  {
60
- "epoch": 0.14,
61
- "learning_rate": 8.588446877727417e-06,
62
- "loss": 0.0968,
63
  "step": 90
64
  },
65
  {
66
- "epoch": 0.15,
67
- "learning_rate": 8.493757808720196e-06,
68
- "loss": 0.0808,
69
  "step": 100
70
  },
71
  {
72
- "epoch": 0.15,
73
- "eval_accuracy": 0.0,
74
- "eval_loss": 3.0685205274494365e-05,
75
- "eval_runtime": 30.877,
76
- "eval_samples_per_second": 141.659,
77
- "eval_steps_per_second": 23.61,
78
  "step": 100
79
  }
80
  ],
81
  "logging_steps": 10,
82
- "max_steps": 656,
83
  "num_train_epochs": 1,
84
  "save_steps": 100,
85
  "total_flos": 0.0,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.251414204902577,
5
  "eval_steps": 100,
6
  "global_step": 100,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.03,
13
+ "learning_rate": 8.985917667125256e-06,
14
+ "loss": 0.4081,
15
  "step": 10
16
  },
17
  {
18
+ "epoch": 0.05,
19
+ "learning_rate": 8.943758807211778e-06,
20
+ "loss": 0.2059,
21
  "step": 20
22
  },
23
  {
24
+ "epoch": 0.08,
25
+ "learning_rate": 8.873787284747973e-06,
26
+ "loss": 0.1613,
27
  "step": 30
28
  },
29
  {
30
+ "epoch": 0.1,
31
+ "learning_rate": 8.776441038520995e-06,
32
+ "loss": 0.1359,
33
  "step": 40
34
  },
35
  {
36
+ "epoch": 0.13,
37
+ "learning_rate": 8.652329340639053e-06,
38
+ "loss": 0.1794,
39
  "step": 50
40
  },
41
  {
42
+ "epoch": 0.15,
43
+ "learning_rate": 8.502228983210244e-06,
44
+ "loss": 0.1471,
45
  "step": 60
46
  },
47
  {
48
+ "epoch": 0.18,
49
+ "learning_rate": 8.327079416544763e-06,
50
+ "loss": 0.1173,
51
  "step": 70
52
  },
53
  {
54
+ "epoch": 0.2,
55
+ "learning_rate": 8.127976869309567e-06,
56
+ "loss": 0.1016,
57
  "step": 80
58
  },
59
  {
60
+ "epoch": 0.23,
61
+ "learning_rate": 7.906167487436374e-06,
62
+ "loss": 0.1486,
63
  "step": 90
64
  },
65
  {
66
+ "epoch": 0.25,
67
+ "learning_rate": 7.663039534725309e-06,
68
+ "loss": 0.1029,
69
  "step": 100
70
  },
71
  {
72
+ "epoch": 0.25,
73
+ "eval_accuracy": 0.5,
74
+ "eval_loss": 1.1548028089336526e-09,
75
+ "eval_runtime": 31.6494,
76
+ "eval_samples_per_second": 89.354,
77
+ "eval_steps_per_second": 11.185,
78
  "step": 100
79
  }
80
  ],
81
  "logging_steps": 10,
82
+ "max_steps": 397,
83
  "num_train_epochs": 1,
84
  "save_steps": 100,
85
  "total_flos": 0.0,
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1b0be5f447301b577e44061c504ec9398fb05913a538169784e7cfc7134c7d3
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19f57ba66f27fb038e41f4b640f06321151aa6bdef95a3c3a615bf73330d3d10
3
  size 4600