Yannis98 commited on
Commit
7e06f96
verified
1 Parent(s): 2d94fe5

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eval_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
37
+ predict_nbest_predictions.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -4,6 +4,8 @@ license: apache-2.0
4
  base_model: albert/albert-xxlarge-v2
5
  tags:
6
  - generated_from_trainer
 
 
7
  model-index:
8
  - name: trivia_albert_XXL_finetuned
9
  results: []
@@ -14,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # trivia_albert_XXL_finetuned
16
 
17
- This model is a fine-tuned version of [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
4
  base_model: albert/albert-xxlarge-v2
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - TimoImhof/TriviaQA-in-SQuAD-format
9
  model-index:
10
  - name: trivia_albert_XXL_finetuned
11
  results: []
 
16
 
17
  # trivia_albert_XXL_finetuned
18
 
19
+ This model is a fine-tuned version of [albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) on the TimoImhof/TriviaQA-in-SQuAD-format dataset.
20
 
21
  ## Model description
22
 
all_results.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e22cedffb1049eabfb5166a61c9a01d2d85c2b5790530a3c2401ebc138a834a3
3
+ size 18829488
eval_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "eval_exact_match": 80.0910865322056,
4
+ "eval_f1": 84.56093533580793,
5
+ "eval_runtime": 179.6918,
6
+ "eval_samples": 3398,
7
+ "eval_samples_per_second": 18.91,
8
+ "eval_steps_per_second": 2.365
9
+ }
full_predict_tr_results.json ADDED
The diff for this file is too large to render. See raw diff
 
full_predict_val_results.json ADDED
The diff for this file is too large to render. See raw diff
 
predict_nbest_predictions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e22cedffb1049eabfb5166a61c9a01d2d85c2b5790530a3c2401ebc138a834a3
3
+ size 18829488
predict_predictions.json ADDED
The diff for this file is too large to render. See raw diff
 
predict_tr_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "predict_samples_tr": 13545,
3
+ "test_exact_match": 94.1516186757768,
4
+ "test_f1": 96.27167174803232,
5
+ "test_runtime": 725.0484,
6
+ "test_samples_per_second": 18.682,
7
+ "test_steps_per_second": 2.336
8
+ }
predict_val_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "predict_samples_val": 3398,
3
+ "test_exact_match": 80.0910865322056,
4
+ "test_f1": 84.56093533580793,
5
+ "test_runtime": 180.7077,
6
+ "test_samples_per_second": 18.804,
7
+ "test_steps_per_second": 2.352
8
+ }
runs/Feb09_18-46-57_xgpi2/events.out.tfevents.1739102543.xgpi2.7405.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b0f1e9ac1cab182b7226494a403d195c00f8a4b721dda787c5173aebb4a46f2
3
+ size 412
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 1.260270189771264e+16,
4
+ "train_loss": 1.4897090983840655,
5
+ "train_runtime": 4249.2251,
6
+ "train_samples": 13545,
7
+ "train_samples_per_second": 6.375,
8
+ "train_steps_per_second": 0.2
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 848,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.179245283018868,
13
+ "grad_norm": 16.47541046142578,
14
+ "learning_rate": 2.0518867924528304e-05,
15
+ "loss": 1.7593,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 2.0,
20
+ "step": 848,
21
+ "total_flos": 1.260270189771264e+16,
22
+ "train_loss": 1.4897090983840655,
23
+ "train_runtime": 4249.2251,
24
+ "train_samples_per_second": 6.375,
25
+ "train_steps_per_second": 0.2
26
+ }
27
+ ],
28
+ "logging_steps": 500,
29
+ "max_steps": 848,
30
+ "num_input_tokens_seen": 0,
31
+ "num_train_epochs": 2,
32
+ "save_steps": 500,
33
+ "stateful_callbacks": {
34
+ "TrainerControl": {
35
+ "args": {
36
+ "should_epoch_stop": false,
37
+ "should_evaluate": false,
38
+ "should_log": false,
39
+ "should_save": true,
40
+ "should_training_stop": true
41
+ },
42
+ "attributes": {}
43
+ }
44
+ },
45
+ "total_flos": 1.260270189771264e+16,
46
+ "train_batch_size": 32,
47
+ "trial_name": null,
48
+ "trial_params": null
49
+ }