First abERT version - modified
Browse files- aBERT_albert/checkpoint-2002/config.json +32 -0
- aBERT_albert/checkpoint-2002/optimizer.pt +3 -0
- aBERT_albert/checkpoint-2002/pytorch_model.bin +3 -0
- aBERT_albert/checkpoint-2002/rng_state.pth +3 -0
- aBERT_albert/checkpoint-2002/scheduler.pt +3 -0
- aBERT_albert/checkpoint-2002/trainer_state.json +40 -0
- aBERT_albert/checkpoint-2002/training_args.bin +3 -0
- aBERT_albert/checkpoint-3003/config.json +32 -0
- aBERT_albert/checkpoint-3003/optimizer.pt +3 -0
- aBERT_albert/checkpoint-3003/pytorch_model.bin +3 -0
- aBERT_albert/checkpoint-3003/rng_state.pth +3 -0
- aBERT_albert/checkpoint-3003/scheduler.pt +3 -0
- aBERT_albert/checkpoint-3003/trainer_state.json +52 -0
- aBERT_albert/checkpoint-3003/training_args.bin +3 -0
- aBERT_albert/runs/Nov12_15-08-21_588f645600fb/1636729706.827277/events.out.tfevents.1636729706.588f645600fb.72.1 +3 -0
- aBERT_albert/runs/Nov12_15-08-21_588f645600fb/events.out.tfevents.1636729706.588f645600fb.72.0 +3 -0
- aBERT_albert/runs/Nov12_15-25-01_588f645600fb/1636730705.4966695/events.out.tfevents.1636730705.588f645600fb.72.3 +3 -0
- aBERT_albert/runs/Nov12_15-25-01_588f645600fb/events.out.tfevents.1636730705.588f645600fb.72.2 +3 -0
- aBERT_albert/runs/Nov12_15-31-11_588f645600fb/1636731074.3929214/events.out.tfevents.1636731074.588f645600fb.72.5 +3 -0
- aBERT_albert/runs/Nov12_15-31-11_588f645600fb/events.out.tfevents.1636731074.588f645600fb.72.4 +3 -0
- aBERT_albert/runs/Nov12_15-42-23_588f645600fb/1636731746.5490618/events.out.tfevents.1636731746.588f645600fb.72.7 +3 -0
- aBERT_albert/runs/Nov12_15-42-23_588f645600fb/events.out.tfevents.1636731746.588f645600fb.72.6 +3 -0
- aBERT_albert/runs/Nov12_16-01-20_588f645600fb/1636732883.3480775/events.out.tfevents.1636732883.588f645600fb.72.9 +3 -0
- aBERT_albert/runs/Nov12_16-01-20_588f645600fb/events.out.tfevents.1636732883.588f645600fb.72.8 +3 -0
- added_tokens.json +1 -0
- config.json +31 -0
- optimizer.pt +3 -0
- pytorch_model.bin +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- spiece.vocab +0 -0
- tokenizer_config.json +1 -0
- trainer_state.json +52 -0
- training_args.bin +3 -0
aBERT_albert/checkpoint-2002/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"AlbertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0,
|
6 |
+
"bos_token_id": 2,
|
7 |
+
"classifier_dropout_prob": 0.1,
|
8 |
+
"down_scale_factor": 1,
|
9 |
+
"embedding_size": 128,
|
10 |
+
"eos_token_id": 3,
|
11 |
+
"gap_size": 0,
|
12 |
+
"hidden_act": "gelu_new",
|
13 |
+
"hidden_dropout_prob": 0,
|
14 |
+
"hidden_size": 768,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"inner_group_num": 1,
|
17 |
+
"intermediate_size": 3072,
|
18 |
+
"layer_norm_eps": 1e-12,
|
19 |
+
"max_position_embeddings": 512,
|
20 |
+
"model_type": "albert",
|
21 |
+
"net_structure_type": 0,
|
22 |
+
"num_attention_heads": 12,
|
23 |
+
"num_hidden_groups": 1,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"num_memory_blocks": 0,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"torch_dtype": "float32",
|
29 |
+
"transformers_version": "4.12.3",
|
30 |
+
"type_vocab_size": 2,
|
31 |
+
"vocab_size": 30000
|
32 |
+
}
|
aBERT_albert/checkpoint-2002/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79e2d380ba8e96076e087ba1aee6965498998f2b8043d35a755f9c324b05e6da
|
3 |
+
size 89789181
|
aBERT_albert/checkpoint-2002/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8049999b409fa863d1905105613923c7095eb1ddb902e7981890c876978461a
|
3 |
+
size 44902222
|
aBERT_albert/checkpoint-2002/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41149e8ed8789369b53281381ad196879e1d80020b7363cdb1871cb81a42568b
|
3 |
+
size 14503
|
aBERT_albert/checkpoint-2002/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f3fa053573253b741c54d691d1ef44f7695d202cf55166013e5a53512b137b8
|
3 |
+
size 623
|
aBERT_albert/checkpoint-2002/trainer_state.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.0,
|
5 |
+
"global_step": 2002,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.5,
|
12 |
+
"learning_rate": 4.167499167499168e-05,
|
13 |
+
"loss": 8.825,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"learning_rate": 3.334998334998335e-05,
|
19 |
+
"loss": 7.1815,
|
20 |
+
"step": 1000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 1.5,
|
24 |
+
"learning_rate": 2.5024975024975027e-05,
|
25 |
+
"loss": 6.9532,
|
26 |
+
"step": 1500
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 2.0,
|
30 |
+
"learning_rate": 1.6699966699966702e-05,
|
31 |
+
"loss": 6.7979,
|
32 |
+
"step": 2000
|
33 |
+
}
|
34 |
+
],
|
35 |
+
"max_steps": 3003,
|
36 |
+
"num_train_epochs": 3,
|
37 |
+
"total_flos": 20211252614976.0,
|
38 |
+
"trial_name": null,
|
39 |
+
"trial_params": null
|
40 |
+
}
|
aBERT_albert/checkpoint-2002/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e74d494bb0d03f53b17fe3db4bb1a504ab726e3733a21b31e815b480f66ae8d2
|
3 |
+
size 2799
|
aBERT_albert/checkpoint-3003/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"AlbertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0,
|
6 |
+
"bos_token_id": 2,
|
7 |
+
"classifier_dropout_prob": 0.1,
|
8 |
+
"down_scale_factor": 1,
|
9 |
+
"embedding_size": 128,
|
10 |
+
"eos_token_id": 3,
|
11 |
+
"gap_size": 0,
|
12 |
+
"hidden_act": "gelu_new",
|
13 |
+
"hidden_dropout_prob": 0,
|
14 |
+
"hidden_size": 768,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"inner_group_num": 1,
|
17 |
+
"intermediate_size": 3072,
|
18 |
+
"layer_norm_eps": 1e-12,
|
19 |
+
"max_position_embeddings": 512,
|
20 |
+
"model_type": "albert",
|
21 |
+
"net_structure_type": 0,
|
22 |
+
"num_attention_heads": 12,
|
23 |
+
"num_hidden_groups": 1,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"num_memory_blocks": 0,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"torch_dtype": "float32",
|
29 |
+
"transformers_version": "4.12.3",
|
30 |
+
"type_vocab_size": 2,
|
31 |
+
"vocab_size": 30000
|
32 |
+
}
|
aBERT_albert/checkpoint-3003/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef44f509b6612b43998c7c56afbd987ddef5c066e278397d3db447a88ba99c1d
|
3 |
+
size 89789181
|
aBERT_albert/checkpoint-3003/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56b28f5b09d5d5819711d737fce6e7f9cd86d3688f11dd4a1553ee72816f7614
|
3 |
+
size 44902222
|
aBERT_albert/checkpoint-3003/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:61f0dadcac6bef092147dddb64ae3c904671c7e45ecba39a8da55d26432f3219
|
3 |
+
size 14503
|
aBERT_albert/checkpoint-3003/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d2316cb2dc93aa51287fd95e99a983fbde7c85d8f1969003818f823c51a9a21
|
3 |
+
size 623
|
aBERT_albert/checkpoint-3003/trainer_state.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.0,
|
5 |
+
"global_step": 3003,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.5,
|
12 |
+
"learning_rate": 4.167499167499168e-05,
|
13 |
+
"loss": 8.825,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"learning_rate": 3.334998334998335e-05,
|
19 |
+
"loss": 7.1815,
|
20 |
+
"step": 1000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 1.5,
|
24 |
+
"learning_rate": 2.5024975024975027e-05,
|
25 |
+
"loss": 6.9532,
|
26 |
+
"step": 1500
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 2.0,
|
30 |
+
"learning_rate": 1.6699966699966702e-05,
|
31 |
+
"loss": 6.7979,
|
32 |
+
"step": 2000
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 2.5,
|
36 |
+
"learning_rate": 8.374958374958376e-06,
|
37 |
+
"loss": 6.7027,
|
38 |
+
"step": 2500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 3.0,
|
42 |
+
"learning_rate": 4.9950049950049954e-08,
|
43 |
+
"loss": 6.7203,
|
44 |
+
"step": 3000
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"max_steps": 3003,
|
48 |
+
"num_train_epochs": 3,
|
49 |
+
"total_flos": 30316878922464.0,
|
50 |
+
"trial_name": null,
|
51 |
+
"trial_params": null
|
52 |
+
}
|
aBERT_albert/checkpoint-3003/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e74d494bb0d03f53b17fe3db4bb1a504ab726e3733a21b31e815b480f66ae8d2
|
3 |
+
size 2799
|
aBERT_albert/runs/Nov12_15-08-21_588f645600fb/1636729706.827277/events.out.tfevents.1636729706.588f645600fb.72.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe330f3c20feff50675dca1e0208c15709e44dd32a00a8d30807b9f3670f38a7
|
3 |
+
size 4495
|
aBERT_albert/runs/Nov12_15-08-21_588f645600fb/events.out.tfevents.1636729706.588f645600fb.72.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0efcee4ce4d9a58f15c8f15b6615bbe2b0c429d19e0ebd672dbbadb34dcd7d2c
|
3 |
+
size 4546
|
aBERT_albert/runs/Nov12_15-25-01_588f645600fb/1636730705.4966695/events.out.tfevents.1636730705.588f645600fb.72.3
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a3680d78befc2712d771495cac91c77e792571c4aa4e3ec4ed5c70a104afbf8d
|
3 |
+
size 4495
|
aBERT_albert/runs/Nov12_15-25-01_588f645600fb/events.out.tfevents.1636730705.588f645600fb.72.2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b063c70dca7c3dd712298373c0d1d9d1ab28a0e7ea2b8a1ef6575a9e7bcff96
|
3 |
+
size 3788
|
aBERT_albert/runs/Nov12_15-31-11_588f645600fb/1636731074.3929214/events.out.tfevents.1636731074.588f645600fb.72.5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bc9121c142d2d8dd24e19a76f1deb854774e8fceeb5bee224494df9254fa34b
|
3 |
+
size 4495
|
aBERT_albert/runs/Nov12_15-31-11_588f645600fb/events.out.tfevents.1636731074.588f645600fb.72.4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d4472b3780a5516dcdfd41427b46a613ef03278cfa80e2585c80835babb5cb3
|
3 |
+
size 3789
|
aBERT_albert/runs/Nov12_15-42-23_588f645600fb/1636731746.5490618/events.out.tfevents.1636731746.588f645600fb.72.7
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0965005fe283862b5bc53e123c41b643ca9de43170d768aa0ec98b1b94addf1
|
3 |
+
size 4495
|
aBERT_albert/runs/Nov12_15-42-23_588f645600fb/events.out.tfevents.1636731746.588f645600fb.72.6
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8650a9d8901c58d5e56e5c15ef94598e3650fd8eef817dde8d241bca49202011
|
3 |
+
size 3790
|
aBERT_albert/runs/Nov12_16-01-20_588f645600fb/1636732883.3480775/events.out.tfevents.1636732883.588f645600fb.72.9
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81c738d46ca8afae1553629a74f5ece37e7b2e39672fe30b715ea76c8c280621
|
3 |
+
size 4495
|
aBERT_albert/runs/Nov12_16-01-20_588f645600fb/events.out.tfevents.1636732883.588f645600fb.72.8
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe35d59e72da755dab3a3322df0472cc73e2ae88e6c9961476c1b625854cf82b
|
3 |
+
size 3279
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"[CLS]": 12811, "[SEP]": 12812, "<pad>": 12813, "[MASK]": 12814}
|
config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"AlbertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0,
|
6 |
+
"bos_token_id": 2,
|
7 |
+
"classifier_dropout_prob": 0.1,
|
8 |
+
"down_scale_factor": 1,
|
9 |
+
"embedding_size": 128,
|
10 |
+
"eos_token_id": 3,
|
11 |
+
"gap_size": 0,
|
12 |
+
"hidden_act": "gelu_new",
|
13 |
+
"hidden_dropout_prob": 0,
|
14 |
+
"hidden_size": 768,
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"inner_group_num": 1,
|
17 |
+
"intermediate_size": 3072,
|
18 |
+
"layer_norm_eps": 1e-12,
|
19 |
+
"max_position_embeddings": 512,
|
20 |
+
"model_type": "albert",
|
21 |
+
"net_structure_type": 0,
|
22 |
+
"num_attention_heads": 12,
|
23 |
+
"num_hidden_groups": 1,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"num_memory_blocks": 0,
|
26 |
+
"pad_token_id": 0,
|
27 |
+
"position_embedding_type": "absolute",
|
28 |
+
"transformers_version": "4.12.3",
|
29 |
+
"type_vocab_size": 2,
|
30 |
+
"vocab_size": 30000
|
31 |
+
}
|
optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef44f509b6612b43998c7c56afbd987ddef5c066e278397d3db447a88ba99c1d
|
3 |
+
size 89789181
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56b28f5b09d5d5819711d737fce6e7f9cd86d3688f11dd4a1553ee72816f7614
|
3 |
+
size 44902222
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:61f0dadcac6bef092147dddb64ae3c904671c7e45ecba39a8da55d26432f3219
|
3 |
+
size 14503
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d2316cb2dc93aa51287fd95e99a983fbde7c85d8f1969003818f823c51a9a21
|
3 |
+
size 623
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82f70bed802282fef4dd7e47fd90db5d24e844c84b73ca6358a0257578905351
|
3 |
+
size 447557
|
spiece.vocab
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": true, "remove_space": true, "keep_accents": false, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "aBERT", "tokenizer_class": "AlbertTokenizer"}
|
trainer_state.json
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.0,
|
5 |
+
"global_step": 3003,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.5,
|
12 |
+
"learning_rate": 4.167499167499168e-05,
|
13 |
+
"loss": 8.825,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"learning_rate": 3.334998334998335e-05,
|
19 |
+
"loss": 7.1815,
|
20 |
+
"step": 1000
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 1.5,
|
24 |
+
"learning_rate": 2.5024975024975027e-05,
|
25 |
+
"loss": 6.9532,
|
26 |
+
"step": 1500
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 2.0,
|
30 |
+
"learning_rate": 1.6699966699966702e-05,
|
31 |
+
"loss": 6.7979,
|
32 |
+
"step": 2000
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 2.5,
|
36 |
+
"learning_rate": 8.374958374958376e-06,
|
37 |
+
"loss": 6.7027,
|
38 |
+
"step": 2500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 3.0,
|
42 |
+
"learning_rate": 4.9950049950049954e-08,
|
43 |
+
"loss": 6.7203,
|
44 |
+
"step": 3000
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"max_steps": 3003,
|
48 |
+
"num_train_epochs": 3,
|
49 |
+
"total_flos": 30316878922464.0,
|
50 |
+
"trial_name": null,
|
51 |
+
"trial_params": null
|
52 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e74d494bb0d03f53b17fe3db4bb1a504ab726e3733a21b31e815b480f66ae8d2
|
3 |
+
size 2799
|