anonimak commited on
Commit
b9f89fc
·
verified ·
1 Parent(s): 7247d88

Upload folder using huggingface_hub

Browse files
Files changed (48) hide show
  1. all_results.json +13 -0
  2. checkpoint-1/config.json +54 -0
  3. checkpoint-1/model.safetensors +3 -0
  4. checkpoint-1/optimizer.pt +3 -0
  5. checkpoint-1/preprocessor_config.json +17 -0
  6. checkpoint-1/rng_state.pth +3 -0
  7. checkpoint-1/scheduler.pt +3 -0
  8. checkpoint-1/trainer_state.json +43 -0
  9. checkpoint-1/training_args.bin +3 -0
  10. checkpoint-2/config.json +54 -0
  11. checkpoint-2/model.safetensors +3 -0
  12. checkpoint-2/optimizer.pt +3 -0
  13. checkpoint-2/preprocessor_config.json +17 -0
  14. checkpoint-2/rng_state.pth +3 -0
  15. checkpoint-2/scheduler.pt +3 -0
  16. checkpoint-2/trainer_state.json +52 -0
  17. checkpoint-2/training_args.bin +3 -0
  18. checkpoint-3/config.json +54 -0
  19. checkpoint-3/model.safetensors +3 -0
  20. checkpoint-3/optimizer.pt +3 -0
  21. checkpoint-3/preprocessor_config.json +17 -0
  22. checkpoint-3/rng_state.pth +3 -0
  23. checkpoint-3/scheduler.pt +3 -0
  24. checkpoint-3/trainer_state.json +61 -0
  25. checkpoint-3/training_args.bin +3 -0
  26. checkpoint-4/config.json +54 -0
  27. checkpoint-4/model.safetensors +3 -0
  28. checkpoint-4/optimizer.pt +3 -0
  29. checkpoint-4/preprocessor_config.json +17 -0
  30. checkpoint-4/rng_state.pth +3 -0
  31. checkpoint-4/scheduler.pt +3 -0
  32. checkpoint-4/trainer_state.json +70 -0
  33. checkpoint-4/training_args.bin +3 -0
  34. checkpoint-5/config.json +54 -0
  35. checkpoint-5/model.safetensors +3 -0
  36. checkpoint-5/optimizer.pt +3 -0
  37. checkpoint-5/preprocessor_config.json +17 -0
  38. checkpoint-5/rng_state.pth +3 -0
  39. checkpoint-5/scheduler.pt +3 -0
  40. checkpoint-5/trainer_state.json +79 -0
  41. checkpoint-5/training_args.bin +3 -0
  42. config.json +54 -0
  43. eval_results.json +8 -0
  44. model.safetensors +3 -0
  45. preprocessor_config.json +17 -0
  46. train_results.json +8 -0
  47. trainer_state.json +88 -0
  48. training_args.bin +3 -0
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.3333333333333333,
4
+ "eval_loss": 0.6925444602966309,
5
+ "eval_runtime": 0.4804,
6
+ "eval_samples_per_second": 6.245,
7
+ "eval_steps_per_second": 2.082,
8
+ "total_flos": 349570801336320.0,
9
+ "train_loss": 0.668668270111084,
10
+ "train_runtime": 49.9671,
11
+ "train_samples_per_second": 1.201,
12
+ "train_steps_per_second": 0.1
13
+ }
checkpoint-1/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
checkpoint-1/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af63baef92f78f5c0ab9713b1d04bdabb0297fb74fa31dbbabd0d3359b071e02
3
+ size 19851560
checkpoint-1/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c0535de8faf9bc6c7a4e8b9b5c4109d8f3fe501877dca17650594bbfa97e81a
3
+ size 39713530
checkpoint-1/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
checkpoint-1/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30bf26c2c7590b2e63c6c2907406745c658919a91b180d36b7329198dffe3f3c
3
+ size 13990
checkpoint-1/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e3bf5f5b48923fff34716541354d4e7a796b08fd31ef67518ffbadf93e0a7a
3
+ size 1064
checkpoint-1/trainer_state.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1,
3
+ "best_metric": 0.25,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-1",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ }
21
+ ],
22
+ "logging_steps": 500,
23
+ "max_steps": 5,
24
+ "num_input_tokens_seen": 0,
25
+ "num_train_epochs": 5,
26
+ "save_steps": 500,
27
+ "stateful_callbacks": {
28
+ "TrainerControl": {
29
+ "args": {
30
+ "should_epoch_stop": false,
31
+ "should_evaluate": false,
32
+ "should_log": false,
33
+ "should_save": true,
34
+ "should_training_stop": false
35
+ },
36
+ "attributes": {}
37
+ }
38
+ },
39
+ "total_flos": 69914160267264.0,
40
+ "train_batch_size": 16,
41
+ "trial_name": null,
42
+ "trial_params": null
43
+ }
checkpoint-1/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304
checkpoint-2/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
checkpoint-2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc791843e7147b98c35e161f4920f0364a23499c03759fbee479b462e662b8c
3
+ size 19851560
checkpoint-2/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:839e08aba2811cd5dd88f55acb0c9519ec14d4fe5983d3a4f8b517af0ed87133
3
+ size 39713530
checkpoint-2/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
checkpoint-2/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b6d8058bd13e850e48a0ca4df78973a507fff3502b12e5a684df109f166e171
3
+ size 13990
checkpoint-2/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b651bc4149658a6da631b87808cf9b99990105ee57a4afb648cdae24c114d817
3
+ size 1064
checkpoint-2/trainer_state.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1,
3
+ "best_metric": 0.25,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-1",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 2,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.25,
24
+ "eval_loss": 0.7048182487487793,
25
+ "eval_runtime": 0.5506,
26
+ "eval_samples_per_second": 7.265,
27
+ "eval_steps_per_second": 1.816,
28
+ "step": 2
29
+ }
30
+ ],
31
+ "logging_steps": 500,
32
+ "max_steps": 5,
33
+ "num_input_tokens_seen": 0,
34
+ "num_train_epochs": 5,
35
+ "save_steps": 500,
36
+ "stateful_callbacks": {
37
+ "TrainerControl": {
38
+ "args": {
39
+ "should_epoch_stop": false,
40
+ "should_evaluate": false,
41
+ "should_log": false,
42
+ "should_save": true,
43
+ "should_training_stop": false
44
+ },
45
+ "attributes": {}
46
+ }
47
+ },
48
+ "total_flos": 139828320534528.0,
49
+ "train_batch_size": 16,
50
+ "trial_name": null,
51
+ "trial_params": null
52
+ }
checkpoint-2/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304
checkpoint-3/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
checkpoint-3/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a92c78883131906aaef4bbdc772eef0783419199345d541ee7ef4bef0301fbd
3
+ size 19851560
checkpoint-3/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da0a4f5bc5849b38f9620b1f4fb10c25b07089d1f79a7a830040a5562480ae41
3
+ size 39713530
checkpoint-3/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
checkpoint-3/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d31d6afad5e3e8a020d10830e7192e96c1cefecc9d434406fa55604bfdc9b81a
3
+ size 13990
checkpoint-3/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22a8c335a9de0ca2a4c0e701e17a73b4d4af7bec6085d00edb0674e4213d34a
3
+ size 1064
checkpoint-3/trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 3,
3
+ "best_metric": 0.5,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-3",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 3,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.25,
24
+ "eval_loss": 0.7048182487487793,
25
+ "eval_runtime": 0.5506,
26
+ "eval_samples_per_second": 7.265,
27
+ "eval_steps_per_second": 1.816,
28
+ "step": 2
29
+ },
30
+ {
31
+ "epoch": 3.0,
32
+ "eval_accuracy": 0.5,
33
+ "eval_loss": 0.7065370082855225,
34
+ "eval_runtime": 0.8353,
35
+ "eval_samples_per_second": 4.789,
36
+ "eval_steps_per_second": 1.197,
37
+ "step": 3
38
+ }
39
+ ],
40
+ "logging_steps": 500,
41
+ "max_steps": 5,
42
+ "num_input_tokens_seen": 0,
43
+ "num_train_epochs": 5,
44
+ "save_steps": 500,
45
+ "stateful_callbacks": {
46
+ "TrainerControl": {
47
+ "args": {
48
+ "should_epoch_stop": false,
49
+ "should_evaluate": false,
50
+ "should_log": false,
51
+ "should_save": true,
52
+ "should_training_stop": false
53
+ },
54
+ "attributes": {}
55
+ }
56
+ },
57
+ "total_flos": 209742480801792.0,
58
+ "train_batch_size": 16,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
checkpoint-3/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304
checkpoint-4/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
checkpoint-4/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c94bb888314b3489473b4a4e364f83fb979bf0fb437622b6064e8be0eff7b400
3
+ size 19851560
checkpoint-4/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41bd923e92e9187269720618e76be9073ace4e778fe4bc5ef90f1ff95770fc09
3
+ size 39713530
checkpoint-4/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
checkpoint-4/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ef88e9e82c3db55d48c417ecdd583933a3ed3c6d77f2902882cf634aedf0f39
3
+ size 13990
checkpoint-4/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:097d7390e2ea6e2dc7b13ed52d618b4b09bb1445c1d3cd28d2ba5b19709a290a
3
+ size 1064
checkpoint-4/trainer_state.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 3,
3
+ "best_metric": 0.5,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-3",
5
+ "epoch": 4.0,
6
+ "eval_steps": 500,
7
+ "global_step": 4,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.25,
24
+ "eval_loss": 0.7048182487487793,
25
+ "eval_runtime": 0.5506,
26
+ "eval_samples_per_second": 7.265,
27
+ "eval_steps_per_second": 1.816,
28
+ "step": 2
29
+ },
30
+ {
31
+ "epoch": 3.0,
32
+ "eval_accuracy": 0.5,
33
+ "eval_loss": 0.7065370082855225,
34
+ "eval_runtime": 0.8353,
35
+ "eval_samples_per_second": 4.789,
36
+ "eval_steps_per_second": 1.197,
37
+ "step": 3
38
+ },
39
+ {
40
+ "epoch": 4.0,
41
+ "eval_accuracy": 0.5,
42
+ "eval_loss": 0.7078632116317749,
43
+ "eval_runtime": 0.5684,
44
+ "eval_samples_per_second": 7.037,
45
+ "eval_steps_per_second": 1.759,
46
+ "step": 4
47
+ }
48
+ ],
49
+ "logging_steps": 500,
50
+ "max_steps": 5,
51
+ "num_input_tokens_seen": 0,
52
+ "num_train_epochs": 5,
53
+ "save_steps": 500,
54
+ "stateful_callbacks": {
55
+ "TrainerControl": {
56
+ "args": {
57
+ "should_epoch_stop": false,
58
+ "should_evaluate": false,
59
+ "should_log": false,
60
+ "should_save": true,
61
+ "should_training_stop": false
62
+ },
63
+ "attributes": {}
64
+ }
65
+ },
66
+ "total_flos": 279656641069056.0,
67
+ "train_batch_size": 16,
68
+ "trial_name": null,
69
+ "trial_params": null
70
+ }
checkpoint-4/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304
checkpoint-5/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
checkpoint-5/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0cb013cf2b05e501551d542157e88caac296c63ce497a7882c232c14ff39433
3
+ size 19851560
checkpoint-5/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:733b9461bc75cda02c507c96df53aa69de4f6a44c44d6de4ae457cb91499a29b
3
+ size 39713530
checkpoint-5/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
checkpoint-5/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e9ed59d65719ca75221e245800cec6eb2565ef03680c2d408e6efad799991a
3
+ size 13990
checkpoint-5/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31c7cc88724873ebe7646569a370a20622e5bc45bf31d6c9df8c11adca8e41e8
3
+ size 1064
checkpoint-5/trainer_state.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 3,
3
+ "best_metric": 0.5,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-3",
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 5,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.25,
24
+ "eval_loss": 0.7048182487487793,
25
+ "eval_runtime": 0.5506,
26
+ "eval_samples_per_second": 7.265,
27
+ "eval_steps_per_second": 1.816,
28
+ "step": 2
29
+ },
30
+ {
31
+ "epoch": 3.0,
32
+ "eval_accuracy": 0.5,
33
+ "eval_loss": 0.7065370082855225,
34
+ "eval_runtime": 0.8353,
35
+ "eval_samples_per_second": 4.789,
36
+ "eval_steps_per_second": 1.197,
37
+ "step": 3
38
+ },
39
+ {
40
+ "epoch": 4.0,
41
+ "eval_accuracy": 0.5,
42
+ "eval_loss": 0.7078632116317749,
43
+ "eval_runtime": 0.5684,
44
+ "eval_samples_per_second": 7.037,
45
+ "eval_steps_per_second": 1.759,
46
+ "step": 4
47
+ },
48
+ {
49
+ "epoch": 5.0,
50
+ "eval_accuracy": 0.5,
51
+ "eval_loss": 0.708530604839325,
52
+ "eval_runtime": 0.5344,
53
+ "eval_samples_per_second": 7.485,
54
+ "eval_steps_per_second": 1.871,
55
+ "step": 5
56
+ }
57
+ ],
58
+ "logging_steps": 500,
59
+ "max_steps": 5,
60
+ "num_input_tokens_seen": 0,
61
+ "num_train_epochs": 5,
62
+ "save_steps": 500,
63
+ "stateful_callbacks": {
64
+ "TrainerControl": {
65
+ "args": {
66
+ "should_epoch_stop": false,
67
+ "should_evaluate": false,
68
+ "should_log": false,
69
+ "should_save": true,
70
+ "should_training_stop": true
71
+ },
72
+ "attributes": {}
73
+ }
74
+ },
75
+ "total_flos": 349570801336320.0,
76
+ "train_batch_size": 16,
77
+ "trial_name": null,
78
+ "trial_params": null
79
+ }
checkpoint-5/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileViTForImageClassification"
4
+ ],
5
+ "aspp_dropout_prob": 0.1,
6
+ "aspp_out_channels": 256,
7
+ "atrous_rates": [
8
+ 6,
9
+ 12,
10
+ 18
11
+ ],
12
+ "attention_probs_dropout_prob": 0.0,
13
+ "classifier_dropout_prob": 0.1,
14
+ "conv_kernel_size": 3,
15
+ "expand_ratio": 4.0,
16
+ "hidden_act": "silu",
17
+ "hidden_dropout_prob": 0.1,
18
+ "hidden_sizes": [
19
+ 144,
20
+ 192,
21
+ 240
22
+ ],
23
+ "id2label": {
24
+ "0": "Anoa",
25
+ "1": "Kerbau"
26
+ },
27
+ "image_size": 256,
28
+ "initializer_range": 0.02,
29
+ "label2id": {
30
+ "Anoa": 0,
31
+ "Kerbau": 1
32
+ },
33
+ "layer_norm_eps": 1e-05,
34
+ "mlp_ratio": 2.0,
35
+ "model_type": "mobilevit",
36
+ "neck_hidden_sizes": [
37
+ 16,
38
+ 32,
39
+ 64,
40
+ 96,
41
+ 128,
42
+ 160,
43
+ 640
44
+ ],
45
+ "num_attention_heads": 4,
46
+ "num_channels": 3,
47
+ "output_stride": 32,
48
+ "patch_size": 2,
49
+ "problem_type": "single_label_classification",
50
+ "qkv_bias": true,
51
+ "semantic_loss_ignore_index": 255,
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.53.3"
54
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.3333333333333333,
4
+ "eval_loss": 0.6925444602966309,
5
+ "eval_runtime": 0.4804,
6
+ "eval_samples_per_second": 6.245,
7
+ "eval_steps_per_second": 2.082
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a92c78883131906aaef4bbdc772eef0783419199345d541ee7ef4bef0301fbd
3
+ size 19851560
preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 256,
4
+ "width": 256
5
+ },
6
+ "do_center_crop": true,
7
+ "do_flip_channel_order": true,
8
+ "do_flip_channels": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_processor_type": "MobileViTImageProcessor",
12
+ "resample": 2,
13
+ "rescale_factor": 0.00392156862745098,
14
+ "size": {
15
+ "shortest_edge": 288
16
+ }
17
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 349570801336320.0,
4
+ "train_loss": 0.668668270111084,
5
+ "train_runtime": 49.9671,
6
+ "train_samples_per_second": 1.201,
7
+ "train_steps_per_second": 0.1
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 3,
3
+ "best_metric": 0.5,
4
+ "best_model_checkpoint": "mobilevit-small-custom-classifier/checkpoint-3",
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 5,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_accuracy": 0.25,
15
+ "eval_loss": 0.703833281993866,
16
+ "eval_runtime": 0.61,
17
+ "eval_samples_per_second": 6.557,
18
+ "eval_steps_per_second": 1.639,
19
+ "step": 1
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.25,
24
+ "eval_loss": 0.7048182487487793,
25
+ "eval_runtime": 0.5506,
26
+ "eval_samples_per_second": 7.265,
27
+ "eval_steps_per_second": 1.816,
28
+ "step": 2
29
+ },
30
+ {
31
+ "epoch": 3.0,
32
+ "eval_accuracy": 0.5,
33
+ "eval_loss": 0.7065370082855225,
34
+ "eval_runtime": 0.8353,
35
+ "eval_samples_per_second": 4.789,
36
+ "eval_steps_per_second": 1.197,
37
+ "step": 3
38
+ },
39
+ {
40
+ "epoch": 4.0,
41
+ "eval_accuracy": 0.5,
42
+ "eval_loss": 0.7078632116317749,
43
+ "eval_runtime": 0.5684,
44
+ "eval_samples_per_second": 7.037,
45
+ "eval_steps_per_second": 1.759,
46
+ "step": 4
47
+ },
48
+ {
49
+ "epoch": 5.0,
50
+ "eval_accuracy": 0.5,
51
+ "eval_loss": 0.708530604839325,
52
+ "eval_runtime": 0.5344,
53
+ "eval_samples_per_second": 7.485,
54
+ "eval_steps_per_second": 1.871,
55
+ "step": 5
56
+ },
57
+ {
58
+ "epoch": 5.0,
59
+ "step": 5,
60
+ "total_flos": 349570801336320.0,
61
+ "train_loss": 0.668668270111084,
62
+ "train_runtime": 49.9671,
63
+ "train_samples_per_second": 1.201,
64
+ "train_steps_per_second": 0.1
65
+ }
66
+ ],
67
+ "logging_steps": 500,
68
+ "max_steps": 5,
69
+ "num_input_tokens_seen": 0,
70
+ "num_train_epochs": 5,
71
+ "save_steps": 500,
72
+ "stateful_callbacks": {
73
+ "TrainerControl": {
74
+ "args": {
75
+ "should_epoch_stop": false,
76
+ "should_evaluate": false,
77
+ "should_log": false,
78
+ "should_save": true,
79
+ "should_training_stop": true
80
+ },
81
+ "attributes": {}
82
+ }
83
+ },
84
+ "total_flos": 349570801336320.0,
85
+ "train_batch_size": 16,
86
+ "trial_name": null,
87
+ "trial_params": null
88
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c4c8b91e93df31166eeab2284d193936d8fc951d549d79ae4264174d4b0972
3
+ size 5304