diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..04a2ef15e53ece97e2c4fd18fa9068a23a74ccf8 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_bone_motion_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_motion_xset +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_motion_xset diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..bf594ef589e8ebc4b8595c1f8be8d2ac9dafb765 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc835811033c5f5255dff5b60d8f19d5c13216e0c476205e7c28746fb214f412 +size 34946665 diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/log.txt b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..90cb8eee55c8f5dfb64fde3c0f9ff456457cfa30 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/log.txt @@ -0,0 +1,929 @@ +[ Thu Sep 15 20:53:13 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_motion_xset', 'model_saved_name': './save_models/ntu120_bone_motion_xset', 'Experiment_name': 'ntu120_bone_motion_xset', 'config': './config/ntu120_xset/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 20:53:13 2022 ] Training epoch: 1 +[ Thu Sep 15 20:54:31 2022 ] Batch(99/162) done. Loss: 3.1825 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 20:55:16 2022 ] Eval epoch: 1 +[ Thu Sep 15 20:57:05 2022 ] Mean test loss of 930 batches: 4.908753395080566. +[ Thu Sep 15 20:57:05 2022 ] Top1: 9.45% +[ Thu Sep 15 20:57:06 2022 ] Top5: 27.60% +[ Thu Sep 15 20:57:06 2022 ] Training epoch: 2 +[ Thu Sep 15 20:57:37 2022 ] Batch(37/162) done. Loss: 2.2182 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 20:58:50 2022 ] Batch(137/162) done. Loss: 2.2917 lr:0.100000 network_time: 0.0297 +[ Thu Sep 15 20:59:07 2022 ] Eval epoch: 2 +[ Thu Sep 15 21:00:56 2022 ] Mean test loss of 930 batches: 4.822448253631592. +[ Thu Sep 15 21:00:57 2022 ] Top1: 14.80% +[ Thu Sep 15 21:00:57 2022 ] Top5: 37.31% +[ Thu Sep 15 21:00:57 2022 ] Training epoch: 3 +[ Thu Sep 15 21:01:56 2022 ] Batch(75/162) done. Loss: 2.0696 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 21:02:59 2022 ] Eval epoch: 3 +[ Thu Sep 15 21:04:48 2022 ] Mean test loss of 930 batches: 3.4721670150756836. +[ Thu Sep 15 21:04:48 2022 ] Top1: 24.95% +[ Thu Sep 15 21:04:49 2022 ] Top5: 50.87% +[ Thu Sep 15 21:04:49 2022 ] Training epoch: 4 +[ Thu Sep 15 21:05:03 2022 ] Batch(13/162) done. Loss: 1.4831 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 21:06:15 2022 ] Batch(113/162) done. Loss: 1.6804 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 21:06:50 2022 ] Eval epoch: 4 +[ Thu Sep 15 21:08:39 2022 ] Mean test loss of 930 batches: 3.2496213912963867. +[ Thu Sep 15 21:08:39 2022 ] Top1: 28.67% +[ Thu Sep 15 21:08:40 2022 ] Top5: 55.53% +[ Thu Sep 15 21:08:40 2022 ] Training epoch: 5 +[ Thu Sep 15 21:09:21 2022 ] Batch(51/162) done. Loss: 1.3771 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 21:10:34 2022 ] Batch(151/162) done. Loss: 1.7848 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 21:10:41 2022 ] Eval epoch: 5 +[ Thu Sep 15 21:12:30 2022 ] Mean test loss of 930 batches: 3.401992082595825. +[ Thu Sep 15 21:12:31 2022 ] Top1: 28.73% +[ Thu Sep 15 21:12:31 2022 ] Top5: 57.81% +[ Thu Sep 15 21:12:31 2022 ] Training epoch: 6 +[ Thu Sep 15 21:13:40 2022 ] Batch(89/162) done. Loss: 1.3649 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 21:14:32 2022 ] Eval epoch: 6 +[ Thu Sep 15 21:16:21 2022 ] Mean test loss of 930 batches: 2.9739701747894287. +[ Thu Sep 15 21:16:22 2022 ] Top1: 29.80% +[ Thu Sep 15 21:16:22 2022 ] Top5: 62.30% +[ Thu Sep 15 21:16:23 2022 ] Training epoch: 7 +[ Thu Sep 15 21:16:46 2022 ] Batch(27/162) done. Loss: 1.4052 lr:0.100000 network_time: 0.0294 +[ Thu Sep 15 21:17:59 2022 ] Batch(127/162) done. Loss: 0.7479 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 21:18:24 2022 ] Eval epoch: 7 +[ Thu Sep 15 21:20:13 2022 ] Mean test loss of 930 batches: 2.601893901824951. +[ Thu Sep 15 21:20:14 2022 ] Top1: 37.29% +[ Thu Sep 15 21:20:14 2022 ] Top5: 69.45% +[ Thu Sep 15 21:20:14 2022 ] Training epoch: 8 +[ Thu Sep 15 21:21:06 2022 ] Batch(65/162) done. Loss: 1.2406 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 21:22:16 2022 ] Eval epoch: 8 +[ Thu Sep 15 21:24:05 2022 ] Mean test loss of 930 batches: 2.694429397583008. +[ Thu Sep 15 21:24:05 2022 ] Top1: 38.20% +[ Thu Sep 15 21:24:06 2022 ] Top5: 69.74% +[ Thu Sep 15 21:24:06 2022 ] Training epoch: 9 +[ Thu Sep 15 21:24:12 2022 ] Batch(3/162) done. Loss: 0.8463 lr:0.100000 network_time: 0.0294 +[ Thu Sep 15 21:25:25 2022 ] Batch(103/162) done. Loss: 0.9506 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 21:26:07 2022 ] Eval epoch: 9 +[ Thu Sep 15 21:27:56 2022 ] Mean test loss of 930 batches: 3.4346163272857666. +[ Thu Sep 15 21:27:56 2022 ] Top1: 32.62% +[ Thu Sep 15 21:27:57 2022 ] Top5: 64.30% +[ Thu Sep 15 21:27:57 2022 ] Training epoch: 10 +[ Thu Sep 15 21:28:31 2022 ] Batch(41/162) done. Loss: 0.6424 lr:0.100000 network_time: 0.0260 +[ Thu Sep 15 21:29:44 2022 ] Batch(141/162) done. Loss: 0.9037 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 21:29:58 2022 ] Eval epoch: 10 +[ Thu Sep 15 21:31:47 2022 ] Mean test loss of 930 batches: 3.4100637435913086. +[ Thu Sep 15 21:31:47 2022 ] Top1: 35.12% +[ Thu Sep 15 21:31:48 2022 ] Top5: 65.64% +[ Thu Sep 15 21:31:48 2022 ] Training epoch: 11 +[ Thu Sep 15 21:32:49 2022 ] Batch(79/162) done. Loss: 0.8555 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 21:33:49 2022 ] Eval epoch: 11 +[ Thu Sep 15 21:35:38 2022 ] Mean test loss of 930 batches: 2.7729108333587646. +[ Thu Sep 15 21:35:38 2022 ] Top1: 38.37% +[ Thu Sep 15 21:35:39 2022 ] Top5: 71.00% +[ Thu Sep 15 21:35:39 2022 ] Training epoch: 12 +[ Thu Sep 15 21:35:55 2022 ] Batch(17/162) done. Loss: 0.9231 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 21:37:08 2022 ] Batch(117/162) done. Loss: 1.0566 lr:0.100000 network_time: 0.0222 +[ Thu Sep 15 21:37:40 2022 ] Eval epoch: 12 +[ Thu Sep 15 21:39:29 2022 ] Mean test loss of 930 batches: 3.1429965496063232. +[ Thu Sep 15 21:39:30 2022 ] Top1: 38.27% +[ Thu Sep 15 21:39:30 2022 ] Top5: 70.08% +[ Thu Sep 15 21:39:30 2022 ] Training epoch: 13 +[ Thu Sep 15 21:40:14 2022 ] Batch(55/162) done. Loss: 0.6888 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 21:41:27 2022 ] Batch(155/162) done. Loss: 0.8608 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 21:41:31 2022 ] Eval epoch: 13 +[ Thu Sep 15 21:43:20 2022 ] Mean test loss of 930 batches: 2.652634620666504. +[ Thu Sep 15 21:43:21 2022 ] Top1: 39.41% +[ Thu Sep 15 21:43:21 2022 ] Top5: 72.22% +[ Thu Sep 15 21:43:22 2022 ] Training epoch: 14 +[ Thu Sep 15 21:44:33 2022 ] Batch(93/162) done. Loss: 0.8529 lr:0.100000 network_time: 0.0314 +[ Thu Sep 15 21:45:23 2022 ] Eval epoch: 14 +[ Thu Sep 15 21:47:12 2022 ] Mean test loss of 930 batches: 2.584991693496704. +[ Thu Sep 15 21:47:12 2022 ] Top1: 39.55% +[ Thu Sep 15 21:47:13 2022 ] Top5: 72.15% +[ Thu Sep 15 21:47:13 2022 ] Training epoch: 15 +[ Thu Sep 15 21:47:39 2022 ] Batch(31/162) done. Loss: 0.6209 lr:0.100000 network_time: 0.0335 +[ Thu Sep 15 21:48:52 2022 ] Batch(131/162) done. Loss: 0.6853 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 21:49:14 2022 ] Eval epoch: 15 +[ Thu Sep 15 21:51:02 2022 ] Mean test loss of 930 batches: 3.4786317348480225. +[ Thu Sep 15 21:51:03 2022 ] Top1: 38.24% +[ Thu Sep 15 21:51:03 2022 ] Top5: 69.38% +[ Thu Sep 15 21:51:04 2022 ] Training epoch: 16 +[ Thu Sep 15 21:51:58 2022 ] Batch(69/162) done. Loss: 0.7813 lr:0.100000 network_time: 0.0280 +[ Thu Sep 15 21:53:05 2022 ] Eval epoch: 16 +[ Thu Sep 15 21:54:54 2022 ] Mean test loss of 930 batches: 3.6086277961730957. +[ Thu Sep 15 21:54:54 2022 ] Top1: 35.35% +[ Thu Sep 15 21:54:55 2022 ] Top5: 64.75% +[ Thu Sep 15 21:54:55 2022 ] Training epoch: 17 +[ Thu Sep 15 21:55:05 2022 ] Batch(7/162) done. Loss: 0.7263 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 21:56:17 2022 ] Batch(107/162) done. Loss: 0.6502 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 21:56:57 2022 ] Eval epoch: 17 +[ Thu Sep 15 21:58:45 2022 ] Mean test loss of 930 batches: 3.275714635848999. +[ Thu Sep 15 21:58:46 2022 ] Top1: 40.58% +[ Thu Sep 15 21:58:46 2022 ] Top5: 72.06% +[ Thu Sep 15 21:58:46 2022 ] Training epoch: 18 +[ Thu Sep 15 21:59:23 2022 ] Batch(45/162) done. Loss: 0.6945 lr:0.100000 network_time: 0.0292 +[ Thu Sep 15 22:00:36 2022 ] Batch(145/162) done. Loss: 0.4933 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:00:48 2022 ] Eval epoch: 18 +[ Thu Sep 15 22:02:37 2022 ] Mean test loss of 930 batches: 2.9244513511657715. +[ Thu Sep 15 22:02:37 2022 ] Top1: 40.86% +[ Thu Sep 15 22:02:38 2022 ] Top5: 73.57% +[ Thu Sep 15 22:02:38 2022 ] Training epoch: 19 +[ Thu Sep 15 22:03:42 2022 ] Batch(83/162) done. Loss: 0.3187 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 22:04:39 2022 ] Eval epoch: 19 +[ Thu Sep 15 22:06:28 2022 ] Mean test loss of 930 batches: 3.0369040966033936. +[ Thu Sep 15 22:06:29 2022 ] Top1: 40.36% +[ Thu Sep 15 22:06:29 2022 ] Top5: 72.71% +[ Thu Sep 15 22:06:29 2022 ] Training epoch: 20 +[ Thu Sep 15 22:06:49 2022 ] Batch(21/162) done. Loss: 0.6907 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 22:08:01 2022 ] Batch(121/162) done. Loss: 0.6564 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:08:31 2022 ] Eval epoch: 20 +[ Thu Sep 15 22:10:19 2022 ] Mean test loss of 930 batches: 2.658047914505005. +[ Thu Sep 15 22:10:20 2022 ] Top1: 41.92% +[ Thu Sep 15 22:10:20 2022 ] Top5: 73.82% +[ Thu Sep 15 22:10:20 2022 ] Training epoch: 21 +[ Thu Sep 15 22:11:07 2022 ] Batch(59/162) done. Loss: 0.5150 lr:0.100000 network_time: 0.0284 +[ Thu Sep 15 22:12:20 2022 ] Batch(159/162) done. Loss: 0.6909 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:12:22 2022 ] Eval epoch: 21 +[ Thu Sep 15 22:14:11 2022 ] Mean test loss of 930 batches: 2.6667940616607666. +[ Thu Sep 15 22:14:11 2022 ] Top1: 44.15% +[ Thu Sep 15 22:14:12 2022 ] Top5: 75.83% +[ Thu Sep 15 22:14:12 2022 ] Training epoch: 22 +[ Thu Sep 15 22:15:26 2022 ] Batch(97/162) done. Loss: 0.5496 lr:0.100000 network_time: 0.0443 +[ Thu Sep 15 22:16:13 2022 ] Eval epoch: 22 +[ Thu Sep 15 22:18:02 2022 ] Mean test loss of 930 batches: 3.3101744651794434. +[ Thu Sep 15 22:18:03 2022 ] Top1: 38.63% +[ Thu Sep 15 22:18:03 2022 ] Top5: 70.21% +[ Thu Sep 15 22:18:03 2022 ] Training epoch: 23 +[ Thu Sep 15 22:18:33 2022 ] Batch(35/162) done. Loss: 0.4830 lr:0.100000 network_time: 0.0309 +[ Thu Sep 15 22:19:45 2022 ] Batch(135/162) done. Loss: 0.2860 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 22:20:05 2022 ] Eval epoch: 23 +[ Thu Sep 15 22:21:53 2022 ] Mean test loss of 930 batches: 2.9660871028900146. +[ Thu Sep 15 22:21:54 2022 ] Top1: 43.51% +[ Thu Sep 15 22:21:54 2022 ] Top5: 74.93% +[ Thu Sep 15 22:21:54 2022 ] Training epoch: 24 +[ Thu Sep 15 22:22:51 2022 ] Batch(73/162) done. Loss: 0.4363 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:23:56 2022 ] Eval epoch: 24 +[ Thu Sep 15 22:25:44 2022 ] Mean test loss of 930 batches: 3.1512691974639893. +[ Thu Sep 15 22:25:45 2022 ] Top1: 42.29% +[ Thu Sep 15 22:25:45 2022 ] Top5: 71.11% +[ Thu Sep 15 22:25:46 2022 ] Training epoch: 25 +[ Thu Sep 15 22:25:58 2022 ] Batch(11/162) done. Loss: 0.2536 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 22:27:10 2022 ] Batch(111/162) done. Loss: 0.6440 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 22:27:47 2022 ] Eval epoch: 25 +[ Thu Sep 15 22:29:35 2022 ] Mean test loss of 930 batches: 3.2045881748199463. +[ Thu Sep 15 22:29:36 2022 ] Top1: 41.34% +[ Thu Sep 15 22:29:36 2022 ] Top5: 72.56% +[ Thu Sep 15 22:29:36 2022 ] Training epoch: 26 +[ Thu Sep 15 22:30:16 2022 ] Batch(49/162) done. Loss: 0.3471 lr:0.100000 network_time: 0.0248 +[ Thu Sep 15 22:31:28 2022 ] Batch(149/162) done. Loss: 0.6179 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 22:31:37 2022 ] Eval epoch: 26 +[ Thu Sep 15 22:33:26 2022 ] Mean test loss of 930 batches: 2.7953853607177734. +[ Thu Sep 15 22:33:27 2022 ] Top1: 45.61% +[ Thu Sep 15 22:33:27 2022 ] Top5: 75.82% +[ Thu Sep 15 22:33:28 2022 ] Training epoch: 27 +[ Thu Sep 15 22:34:35 2022 ] Batch(87/162) done. Loss: 0.2963 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:35:29 2022 ] Eval epoch: 27 +[ Thu Sep 15 22:37:18 2022 ] Mean test loss of 930 batches: 3.8976290225982666. +[ Thu Sep 15 22:37:18 2022 ] Top1: 37.02% +[ Thu Sep 15 22:37:19 2022 ] Top5: 68.66% +[ Thu Sep 15 22:37:19 2022 ] Training epoch: 28 +[ Thu Sep 15 22:37:41 2022 ] Batch(25/162) done. Loss: 0.2610 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 22:38:54 2022 ] Batch(125/162) done. Loss: 0.3643 lr:0.100000 network_time: 0.0256 +[ Thu Sep 15 22:39:20 2022 ] Eval epoch: 28 +[ Thu Sep 15 22:41:09 2022 ] Mean test loss of 930 batches: 3.0510149002075195. +[ Thu Sep 15 22:41:09 2022 ] Top1: 42.95% +[ Thu Sep 15 22:41:10 2022 ] Top5: 71.93% +[ Thu Sep 15 22:41:10 2022 ] Training epoch: 29 +[ Thu Sep 15 22:42:00 2022 ] Batch(63/162) done. Loss: 0.4196 lr:0.100000 network_time: 0.0287 +[ Thu Sep 15 22:43:11 2022 ] Eval epoch: 29 +[ Thu Sep 15 22:45:01 2022 ] Mean test loss of 930 batches: 3.3990092277526855. +[ Thu Sep 15 22:45:01 2022 ] Top1: 42.01% +[ Thu Sep 15 22:45:01 2022 ] Top5: 72.58% +[ Thu Sep 15 22:45:02 2022 ] Training epoch: 30 +[ Thu Sep 15 22:45:07 2022 ] Batch(1/162) done. Loss: 0.4026 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 22:46:19 2022 ] Batch(101/162) done. Loss: 0.2621 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 22:47:03 2022 ] Eval epoch: 30 +[ Thu Sep 15 22:48:51 2022 ] Mean test loss of 930 batches: 3.6437642574310303. +[ Thu Sep 15 22:48:52 2022 ] Top1: 36.19% +[ Thu Sep 15 22:48:52 2022 ] Top5: 69.46% +[ Thu Sep 15 22:48:53 2022 ] Training epoch: 31 +[ Thu Sep 15 22:49:25 2022 ] Batch(39/162) done. Loss: 0.3941 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 22:50:38 2022 ] Batch(139/162) done. Loss: 0.3324 lr:0.100000 network_time: 0.0342 +[ Thu Sep 15 22:50:54 2022 ] Eval epoch: 31 +[ Thu Sep 15 22:52:43 2022 ] Mean test loss of 930 batches: 2.982487440109253. +[ Thu Sep 15 22:52:43 2022 ] Top1: 43.66% +[ Thu Sep 15 22:52:44 2022 ] Top5: 75.18% +[ Thu Sep 15 22:52:44 2022 ] Training epoch: 32 +[ Thu Sep 15 22:53:44 2022 ] Batch(77/162) done. Loss: 0.3935 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 22:54:45 2022 ] Eval epoch: 32 +[ Thu Sep 15 22:56:33 2022 ] Mean test loss of 930 batches: 3.3070483207702637. +[ Thu Sep 15 22:56:34 2022 ] Top1: 44.26% +[ Thu Sep 15 22:56:34 2022 ] Top5: 73.42% +[ Thu Sep 15 22:56:35 2022 ] Training epoch: 33 +[ Thu Sep 15 22:56:49 2022 ] Batch(15/162) done. Loss: 0.2932 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:58:02 2022 ] Batch(115/162) done. Loss: 0.7155 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 22:58:36 2022 ] Eval epoch: 33 +[ Thu Sep 15 23:00:24 2022 ] Mean test loss of 930 batches: 3.00728440284729. +[ Thu Sep 15 23:00:25 2022 ] Top1: 46.30% +[ Thu Sep 15 23:00:25 2022 ] Top5: 76.53% +[ Thu Sep 15 23:00:26 2022 ] Training epoch: 34 +[ Thu Sep 15 23:01:08 2022 ] Batch(53/162) done. Loss: 0.4351 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 23:02:21 2022 ] Batch(153/162) done. Loss: 0.2856 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 23:02:27 2022 ] Eval epoch: 34 +[ Thu Sep 15 23:04:15 2022 ] Mean test loss of 930 batches: 3.851074695587158. +[ Thu Sep 15 23:04:16 2022 ] Top1: 39.96% +[ Thu Sep 15 23:04:16 2022 ] Top5: 68.37% +[ Thu Sep 15 23:04:16 2022 ] Training epoch: 35 +[ Thu Sep 15 23:05:27 2022 ] Batch(91/162) done. Loss: 0.3399 lr:0.100000 network_time: 0.0322 +[ Thu Sep 15 23:06:18 2022 ] Eval epoch: 35 +[ Thu Sep 15 23:08:07 2022 ] Mean test loss of 930 batches: 3.4936296939849854. +[ Thu Sep 15 23:08:07 2022 ] Top1: 39.69% +[ Thu Sep 15 23:08:07 2022 ] Top5: 69.64% +[ Thu Sep 15 23:08:08 2022 ] Training epoch: 36 +[ Thu Sep 15 23:08:33 2022 ] Batch(29/162) done. Loss: 0.2299 lr:0.100000 network_time: 0.0319 +[ Thu Sep 15 23:09:46 2022 ] Batch(129/162) done. Loss: 0.3337 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 23:10:09 2022 ] Eval epoch: 36 +[ Thu Sep 15 23:11:58 2022 ] Mean test loss of 930 batches: 3.194895029067993. +[ Thu Sep 15 23:11:58 2022 ] Top1: 43.84% +[ Thu Sep 15 23:11:59 2022 ] Top5: 73.81% +[ Thu Sep 15 23:11:59 2022 ] Training epoch: 37 +[ Thu Sep 15 23:12:51 2022 ] Batch(67/162) done. Loss: 0.2560 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:14:00 2022 ] Eval epoch: 37 +[ Thu Sep 15 23:15:49 2022 ] Mean test loss of 930 batches: 3.691544771194458. +[ Thu Sep 15 23:15:49 2022 ] Top1: 40.55% +[ Thu Sep 15 23:15:50 2022 ] Top5: 71.71% +[ Thu Sep 15 23:15:50 2022 ] Training epoch: 38 +[ Thu Sep 15 23:15:57 2022 ] Batch(5/162) done. Loss: 0.3489 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 23:17:10 2022 ] Batch(105/162) done. Loss: 0.2590 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 23:17:51 2022 ] Eval epoch: 38 +[ Thu Sep 15 23:19:40 2022 ] Mean test loss of 930 batches: 3.0611417293548584. +[ Thu Sep 15 23:19:40 2022 ] Top1: 41.27% +[ Thu Sep 15 23:19:41 2022 ] Top5: 72.73% +[ Thu Sep 15 23:19:41 2022 ] Training epoch: 39 +[ Thu Sep 15 23:20:17 2022 ] Batch(43/162) done. Loss: 0.1300 lr:0.100000 network_time: 0.0293 +[ Thu Sep 15 23:21:29 2022 ] Batch(143/162) done. Loss: 0.2533 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 23:21:42 2022 ] Eval epoch: 39 +[ Thu Sep 15 23:23:32 2022 ] Mean test loss of 930 batches: 2.9703688621520996. +[ Thu Sep 15 23:23:32 2022 ] Top1: 44.27% +[ Thu Sep 15 23:23:32 2022 ] Top5: 74.68% +[ Thu Sep 15 23:23:33 2022 ] Training epoch: 40 +[ Thu Sep 15 23:24:35 2022 ] Batch(81/162) done. Loss: 0.3066 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 23:25:34 2022 ] Eval epoch: 40 +[ Thu Sep 15 23:27:23 2022 ] Mean test loss of 930 batches: 3.1744678020477295. +[ Thu Sep 15 23:27:24 2022 ] Top1: 47.07% +[ Thu Sep 15 23:27:24 2022 ] Top5: 75.50% +[ Thu Sep 15 23:27:25 2022 ] Training epoch: 41 +[ Thu Sep 15 23:27:42 2022 ] Batch(19/162) done. Loss: 0.2931 lr:0.100000 network_time: 0.0363 +[ Thu Sep 15 23:28:55 2022 ] Batch(119/162) done. Loss: 0.5136 lr:0.100000 network_time: 0.0293 +[ Thu Sep 15 23:29:26 2022 ] Eval epoch: 41 +[ Thu Sep 15 23:31:15 2022 ] Mean test loss of 930 batches: 3.718257427215576. +[ Thu Sep 15 23:31:15 2022 ] Top1: 37.00% +[ Thu Sep 15 23:31:15 2022 ] Top5: 69.65% +[ Thu Sep 15 23:31:16 2022 ] Training epoch: 42 +[ Thu Sep 15 23:32:01 2022 ] Batch(57/162) done. Loss: 0.2064 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 23:33:13 2022 ] Batch(157/162) done. Loss: 0.2419 lr:0.100000 network_time: 0.0330 +[ Thu Sep 15 23:33:17 2022 ] Eval epoch: 42 +[ Thu Sep 15 23:35:06 2022 ] Mean test loss of 930 batches: 2.983041524887085. +[ Thu Sep 15 23:35:06 2022 ] Top1: 46.51% +[ Thu Sep 15 23:35:06 2022 ] Top5: 75.47% +[ Thu Sep 15 23:35:07 2022 ] Training epoch: 43 +[ Thu Sep 15 23:36:20 2022 ] Batch(95/162) done. Loss: 0.3565 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:37:08 2022 ] Eval epoch: 43 +[ Thu Sep 15 23:38:57 2022 ] Mean test loss of 930 batches: 3.6973397731781006. +[ Thu Sep 15 23:38:57 2022 ] Top1: 38.11% +[ Thu Sep 15 23:38:57 2022 ] Top5: 70.05% +[ Thu Sep 15 23:38:58 2022 ] Training epoch: 44 +[ Thu Sep 15 23:39:26 2022 ] Batch(33/162) done. Loss: 0.2877 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 23:40:38 2022 ] Batch(133/162) done. Loss: 0.3982 lr:0.100000 network_time: 0.0310 +[ Thu Sep 15 23:40:59 2022 ] Eval epoch: 44 +[ Thu Sep 15 23:42:48 2022 ] Mean test loss of 930 batches: 3.175381660461426. +[ Thu Sep 15 23:42:48 2022 ] Top1: 40.97% +[ Thu Sep 15 23:42:48 2022 ] Top5: 73.30% +[ Thu Sep 15 23:42:49 2022 ] Training epoch: 45 +[ Thu Sep 15 23:43:44 2022 ] Batch(71/162) done. Loss: 0.1432 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 23:44:50 2022 ] Eval epoch: 45 +[ Thu Sep 15 23:46:38 2022 ] Mean test loss of 930 batches: 3.595310688018799. +[ Thu Sep 15 23:46:39 2022 ] Top1: 41.20% +[ Thu Sep 15 23:46:39 2022 ] Top5: 71.01% +[ Thu Sep 15 23:46:40 2022 ] Training epoch: 46 +[ Thu Sep 15 23:46:50 2022 ] Batch(9/162) done. Loss: 0.1890 lr:0.100000 network_time: 0.0291 +[ Thu Sep 15 23:48:03 2022 ] Batch(109/162) done. Loss: 0.2266 lr:0.100000 network_time: 0.0320 +[ Thu Sep 15 23:48:41 2022 ] Eval epoch: 46 +[ Thu Sep 15 23:50:30 2022 ] Mean test loss of 930 batches: 2.9090216159820557. +[ Thu Sep 15 23:50:30 2022 ] Top1: 46.62% +[ Thu Sep 15 23:50:31 2022 ] Top5: 76.53% +[ Thu Sep 15 23:50:31 2022 ] Training epoch: 47 +[ Thu Sep 15 23:51:09 2022 ] Batch(47/162) done. Loss: 0.0817 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:52:22 2022 ] Batch(147/162) done. Loss: 0.2669 lr:0.100000 network_time: 0.0342 +[ Thu Sep 15 23:52:32 2022 ] Eval epoch: 47 +[ Thu Sep 15 23:54:20 2022 ] Mean test loss of 930 batches: 3.4526474475860596. +[ Thu Sep 15 23:54:21 2022 ] Top1: 42.55% +[ Thu Sep 15 23:54:21 2022 ] Top5: 72.60% +[ Thu Sep 15 23:54:22 2022 ] Training epoch: 48 +[ Thu Sep 15 23:55:27 2022 ] Batch(85/162) done. Loss: 0.2879 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:56:23 2022 ] Eval epoch: 48 +[ Thu Sep 15 23:58:11 2022 ] Mean test loss of 930 batches: 3.220905065536499. +[ Thu Sep 15 23:58:12 2022 ] Top1: 45.22% +[ Thu Sep 15 23:58:12 2022 ] Top5: 73.68% +[ Thu Sep 15 23:58:12 2022 ] Training epoch: 49 +[ Thu Sep 15 23:58:33 2022 ] Batch(23/162) done. Loss: 0.1667 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:59:46 2022 ] Batch(123/162) done. Loss: 0.1133 lr:0.100000 network_time: 0.0319 +[ Fri Sep 16 00:00:13 2022 ] Eval epoch: 49 +[ Fri Sep 16 00:02:02 2022 ] Mean test loss of 930 batches: 3.258099317550659. +[ Fri Sep 16 00:02:02 2022 ] Top1: 44.87% +[ Fri Sep 16 00:02:03 2022 ] Top5: 74.91% +[ Fri Sep 16 00:02:03 2022 ] Training epoch: 50 +[ Fri Sep 16 00:02:51 2022 ] Batch(61/162) done. Loss: 0.1385 lr:0.100000 network_time: 0.0285 +[ Fri Sep 16 00:04:04 2022 ] Batch(161/162) done. Loss: 0.2105 lr:0.100000 network_time: 0.0310 +[ Fri Sep 16 00:04:04 2022 ] Eval epoch: 50 +[ Fri Sep 16 00:05:53 2022 ] Mean test loss of 930 batches: 3.1035125255584717. +[ Fri Sep 16 00:05:54 2022 ] Top1: 46.88% +[ Fri Sep 16 00:05:54 2022 ] Top5: 77.57% +[ Fri Sep 16 00:05:54 2022 ] Training epoch: 51 +[ Fri Sep 16 00:07:10 2022 ] Batch(99/162) done. Loss: 0.2091 lr:0.100000 network_time: 0.0273 +[ Fri Sep 16 00:07:55 2022 ] Eval epoch: 51 +[ Fri Sep 16 00:09:44 2022 ] Mean test loss of 930 batches: 2.9262797832489014. +[ Fri Sep 16 00:09:45 2022 ] Top1: 47.21% +[ Fri Sep 16 00:09:45 2022 ] Top5: 76.63% +[ Fri Sep 16 00:09:45 2022 ] Training epoch: 52 +[ Fri Sep 16 00:10:16 2022 ] Batch(37/162) done. Loss: 0.2059 lr:0.100000 network_time: 0.0318 +[ Fri Sep 16 00:11:29 2022 ] Batch(137/162) done. Loss: 0.2774 lr:0.100000 network_time: 0.0283 +[ Fri Sep 16 00:11:46 2022 ] Eval epoch: 52 +[ Fri Sep 16 00:13:35 2022 ] Mean test loss of 930 batches: 3.9441447257995605. +[ Fri Sep 16 00:13:36 2022 ] Top1: 39.44% +[ Fri Sep 16 00:13:36 2022 ] Top5: 69.58% +[ Fri Sep 16 00:13:36 2022 ] Training epoch: 53 +[ Fri Sep 16 00:14:35 2022 ] Batch(75/162) done. Loss: 0.2692 lr:0.100000 network_time: 0.0272 +[ Fri Sep 16 00:15:37 2022 ] Eval epoch: 53 +[ Fri Sep 16 00:17:26 2022 ] Mean test loss of 930 batches: 3.2309017181396484. +[ Fri Sep 16 00:17:26 2022 ] Top1: 46.12% +[ Fri Sep 16 00:17:27 2022 ] Top5: 75.79% +[ Fri Sep 16 00:17:27 2022 ] Training epoch: 54 +[ Fri Sep 16 00:17:40 2022 ] Batch(13/162) done. Loss: 0.1146 lr:0.100000 network_time: 0.0309 +[ Fri Sep 16 00:18:53 2022 ] Batch(113/162) done. Loss: 0.2228 lr:0.100000 network_time: 0.0282 +[ Fri Sep 16 00:19:28 2022 ] Eval epoch: 54 +[ Fri Sep 16 00:21:17 2022 ] Mean test loss of 930 batches: 2.87821364402771. +[ Fri Sep 16 00:21:17 2022 ] Top1: 47.83% +[ Fri Sep 16 00:21:18 2022 ] Top5: 76.74% +[ Fri Sep 16 00:21:18 2022 ] Training epoch: 55 +[ Fri Sep 16 00:21:59 2022 ] Batch(51/162) done. Loss: 0.2163 lr:0.100000 network_time: 0.0272 +[ Fri Sep 16 00:23:12 2022 ] Batch(151/162) done. Loss: 0.1489 lr:0.100000 network_time: 0.0260 +[ Fri Sep 16 00:23:19 2022 ] Eval epoch: 55 +[ Fri Sep 16 00:25:08 2022 ] Mean test loss of 930 batches: 3.5154900550842285. +[ Fri Sep 16 00:25:08 2022 ] Top1: 43.40% +[ Fri Sep 16 00:25:08 2022 ] Top5: 71.89% +[ Fri Sep 16 00:25:09 2022 ] Training epoch: 56 +[ Fri Sep 16 00:26:17 2022 ] Batch(89/162) done. Loss: 0.0506 lr:0.100000 network_time: 0.0277 +[ Fri Sep 16 00:27:10 2022 ] Eval epoch: 56 +[ Fri Sep 16 00:28:59 2022 ] Mean test loss of 930 batches: 2.9366331100463867. +[ Fri Sep 16 00:29:00 2022 ] Top1: 48.54% +[ Fri Sep 16 00:29:00 2022 ] Top5: 77.41% +[ Fri Sep 16 00:29:00 2022 ] Training epoch: 57 +[ Fri Sep 16 00:29:24 2022 ] Batch(27/162) done. Loss: 0.1010 lr:0.100000 network_time: 0.0288 +[ Fri Sep 16 00:30:37 2022 ] Batch(127/162) done. Loss: 0.1355 lr:0.100000 network_time: 0.0316 +[ Fri Sep 16 00:31:01 2022 ] Eval epoch: 57 +[ Fri Sep 16 00:32:50 2022 ] Mean test loss of 930 batches: 2.7375779151916504. +[ Fri Sep 16 00:32:51 2022 ] Top1: 49.76% +[ Fri Sep 16 00:32:51 2022 ] Top5: 77.95% +[ Fri Sep 16 00:32:51 2022 ] Training epoch: 58 +[ Fri Sep 16 00:33:43 2022 ] Batch(65/162) done. Loss: 0.1278 lr:0.100000 network_time: 0.0278 +[ Fri Sep 16 00:34:53 2022 ] Eval epoch: 58 +[ Fri Sep 16 00:36:42 2022 ] Mean test loss of 930 batches: 3.172297239303589. +[ Fri Sep 16 00:36:42 2022 ] Top1: 47.46% +[ Fri Sep 16 00:36:42 2022 ] Top5: 75.36% +[ Fri Sep 16 00:36:43 2022 ] Training epoch: 59 +[ Fri Sep 16 00:36:49 2022 ] Batch(3/162) done. Loss: 0.0779 lr:0.100000 network_time: 0.0329 +[ Fri Sep 16 00:38:01 2022 ] Batch(103/162) done. Loss: 0.1418 lr:0.100000 network_time: 0.0267 +[ Fri Sep 16 00:38:44 2022 ] Eval epoch: 59 +[ Fri Sep 16 00:40:32 2022 ] Mean test loss of 930 batches: 2.990083694458008. +[ Fri Sep 16 00:40:33 2022 ] Top1: 47.78% +[ Fri Sep 16 00:40:33 2022 ] Top5: 76.48% +[ Fri Sep 16 00:40:33 2022 ] Training epoch: 60 +[ Fri Sep 16 00:41:07 2022 ] Batch(41/162) done. Loss: 0.2073 lr:0.100000 network_time: 0.0301 +[ Fri Sep 16 00:42:20 2022 ] Batch(141/162) done. Loss: 0.0576 lr:0.100000 network_time: 0.0277 +[ Fri Sep 16 00:42:34 2022 ] Eval epoch: 60 +[ Fri Sep 16 00:44:23 2022 ] Mean test loss of 930 batches: 2.775873899459839. +[ Fri Sep 16 00:44:24 2022 ] Top1: 50.88% +[ Fri Sep 16 00:44:24 2022 ] Top5: 78.18% +[ Fri Sep 16 00:44:25 2022 ] Training epoch: 61 +[ Fri Sep 16 00:45:26 2022 ] Batch(79/162) done. Loss: 0.0945 lr:0.010000 network_time: 0.0278 +[ Fri Sep 16 00:46:26 2022 ] Eval epoch: 61 +[ Fri Sep 16 00:48:14 2022 ] Mean test loss of 930 batches: 2.546633243560791. +[ Fri Sep 16 00:48:15 2022 ] Top1: 53.51% +[ Fri Sep 16 00:48:15 2022 ] Top5: 81.01% +[ Fri Sep 16 00:48:16 2022 ] Training epoch: 62 +[ Fri Sep 16 00:48:32 2022 ] Batch(17/162) done. Loss: 0.0171 lr:0.010000 network_time: 0.0321 +[ Fri Sep 16 00:49:44 2022 ] Batch(117/162) done. Loss: 0.0262 lr:0.010000 network_time: 0.0321 +[ Fri Sep 16 00:50:16 2022 ] Eval epoch: 62 +[ Fri Sep 16 00:52:06 2022 ] Mean test loss of 930 batches: 2.4446537494659424. +[ Fri Sep 16 00:52:06 2022 ] Top1: 55.29% +[ Fri Sep 16 00:52:07 2022 ] Top5: 81.98% +[ Fri Sep 16 00:52:07 2022 ] Training epoch: 63 +[ Fri Sep 16 00:52:51 2022 ] Batch(55/162) done. Loss: 0.0085 lr:0.010000 network_time: 0.0275 +[ Fri Sep 16 00:54:03 2022 ] Batch(155/162) done. Loss: 0.0210 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 00:54:08 2022 ] Eval epoch: 63 +[ Fri Sep 16 00:55:57 2022 ] Mean test loss of 930 batches: 2.4653677940368652. +[ Fri Sep 16 00:55:57 2022 ] Top1: 55.56% +[ Fri Sep 16 00:55:58 2022 ] Top5: 82.16% +[ Fri Sep 16 00:55:58 2022 ] Training epoch: 64 +[ Fri Sep 16 00:57:10 2022 ] Batch(93/162) done. Loss: 0.0419 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 00:57:59 2022 ] Eval epoch: 64 +[ Fri Sep 16 00:59:48 2022 ] Mean test loss of 930 batches: 2.5171265602111816. +[ Fri Sep 16 00:59:48 2022 ] Top1: 54.31% +[ Fri Sep 16 00:59:49 2022 ] Top5: 81.78% +[ Fri Sep 16 00:59:49 2022 ] Training epoch: 65 +[ Fri Sep 16 01:00:15 2022 ] Batch(31/162) done. Loss: 0.0102 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:01:28 2022 ] Batch(131/162) done. Loss: 0.0297 lr:0.010000 network_time: 0.0288 +[ Fri Sep 16 01:01:50 2022 ] Eval epoch: 65 +[ Fri Sep 16 01:03:39 2022 ] Mean test loss of 930 batches: 2.46934175491333. +[ Fri Sep 16 01:03:39 2022 ] Top1: 55.12% +[ Fri Sep 16 01:03:40 2022 ] Top5: 81.94% +[ Fri Sep 16 01:03:40 2022 ] Training epoch: 66 +[ Fri Sep 16 01:04:34 2022 ] Batch(69/162) done. Loss: 0.0395 lr:0.010000 network_time: 0.0301 +[ Fri Sep 16 01:05:41 2022 ] Eval epoch: 66 +[ Fri Sep 16 01:07:31 2022 ] Mean test loss of 930 batches: 2.5224382877349854. +[ Fri Sep 16 01:07:31 2022 ] Top1: 55.47% +[ Fri Sep 16 01:07:32 2022 ] Top5: 81.80% +[ Fri Sep 16 01:07:32 2022 ] Training epoch: 67 +[ Fri Sep 16 01:07:41 2022 ] Batch(7/162) done. Loss: 0.0076 lr:0.010000 network_time: 0.0296 +[ Fri Sep 16 01:08:54 2022 ] Batch(107/162) done. Loss: 0.0097 lr:0.010000 network_time: 0.0279 +[ Fri Sep 16 01:09:33 2022 ] Eval epoch: 67 +[ Fri Sep 16 01:11:22 2022 ] Mean test loss of 930 batches: 2.4902591705322266. +[ Fri Sep 16 01:11:22 2022 ] Top1: 55.35% +[ Fri Sep 16 01:11:23 2022 ] Top5: 81.95% +[ Fri Sep 16 01:11:23 2022 ] Training epoch: 68 +[ Fri Sep 16 01:12:00 2022 ] Batch(45/162) done. Loss: 0.0064 lr:0.010000 network_time: 0.0280 +[ Fri Sep 16 01:13:12 2022 ] Batch(145/162) done. Loss: 0.0057 lr:0.010000 network_time: 0.0269 +[ Fri Sep 16 01:13:24 2022 ] Eval epoch: 68 +[ Fri Sep 16 01:15:13 2022 ] Mean test loss of 930 batches: 2.47910737991333. +[ Fri Sep 16 01:15:13 2022 ] Top1: 55.58% +[ Fri Sep 16 01:15:14 2022 ] Top5: 82.24% +[ Fri Sep 16 01:15:14 2022 ] Training epoch: 69 +[ Fri Sep 16 01:16:18 2022 ] Batch(83/162) done. Loss: 0.0091 lr:0.010000 network_time: 0.0289 +[ Fri Sep 16 01:17:15 2022 ] Eval epoch: 69 +[ Fri Sep 16 01:19:04 2022 ] Mean test loss of 930 batches: 2.5265135765075684. +[ Fri Sep 16 01:19:04 2022 ] Top1: 54.84% +[ Fri Sep 16 01:19:05 2022 ] Top5: 81.86% +[ Fri Sep 16 01:19:05 2022 ] Training epoch: 70 +[ Fri Sep 16 01:19:25 2022 ] Batch(21/162) done. Loss: 0.0230 lr:0.010000 network_time: 0.0311 +[ Fri Sep 16 01:20:37 2022 ] Batch(121/162) done. Loss: 0.0078 lr:0.010000 network_time: 0.0326 +[ Fri Sep 16 01:21:06 2022 ] Eval epoch: 70 +[ Fri Sep 16 01:22:55 2022 ] Mean test loss of 930 batches: 2.474184036254883. +[ Fri Sep 16 01:22:55 2022 ] Top1: 55.45% +[ Fri Sep 16 01:22:56 2022 ] Top5: 82.10% +[ Fri Sep 16 01:22:56 2022 ] Training epoch: 71 +[ Fri Sep 16 01:23:43 2022 ] Batch(59/162) done. Loss: 0.0104 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:24:55 2022 ] Batch(159/162) done. Loss: 0.0061 lr:0.010000 network_time: 0.0309 +[ Fri Sep 16 01:24:57 2022 ] Eval epoch: 71 +[ Fri Sep 16 01:26:46 2022 ] Mean test loss of 930 batches: 2.546384572982788. +[ Fri Sep 16 01:26:47 2022 ] Top1: 54.93% +[ Fri Sep 16 01:26:47 2022 ] Top5: 81.87% +[ Fri Sep 16 01:26:47 2022 ] Training epoch: 72 +[ Fri Sep 16 01:28:02 2022 ] Batch(97/162) done. Loss: 0.0070 lr:0.010000 network_time: 0.0311 +[ Fri Sep 16 01:28:48 2022 ] Eval epoch: 72 +[ Fri Sep 16 01:30:38 2022 ] Mean test loss of 930 batches: 2.5100717544555664. +[ Fri Sep 16 01:30:38 2022 ] Top1: 55.77% +[ Fri Sep 16 01:30:38 2022 ] Top5: 82.31% +[ Fri Sep 16 01:30:39 2022 ] Training epoch: 73 +[ Fri Sep 16 01:31:08 2022 ] Batch(35/162) done. Loss: 0.0055 lr:0.010000 network_time: 0.0316 +[ Fri Sep 16 01:32:21 2022 ] Batch(135/162) done. Loss: 0.0062 lr:0.010000 network_time: 0.0302 +[ Fri Sep 16 01:32:40 2022 ] Eval epoch: 73 +[ Fri Sep 16 01:34:28 2022 ] Mean test loss of 930 batches: 2.4956727027893066. +[ Fri Sep 16 01:34:29 2022 ] Top1: 56.07% +[ Fri Sep 16 01:34:29 2022 ] Top5: 82.33% +[ Fri Sep 16 01:34:29 2022 ] Training epoch: 74 +[ Fri Sep 16 01:35:26 2022 ] Batch(73/162) done. Loss: 0.0058 lr:0.010000 network_time: 0.0304 +[ Fri Sep 16 01:36:30 2022 ] Eval epoch: 74 +[ Fri Sep 16 01:38:19 2022 ] Mean test loss of 930 batches: 2.4992711544036865. +[ Fri Sep 16 01:38:19 2022 ] Top1: 55.49% +[ Fri Sep 16 01:38:19 2022 ] Top5: 82.19% +[ Fri Sep 16 01:38:20 2022 ] Training epoch: 75 +[ Fri Sep 16 01:38:32 2022 ] Batch(11/162) done. Loss: 0.0068 lr:0.010000 network_time: 0.0274 +[ Fri Sep 16 01:39:44 2022 ] Batch(111/162) done. Loss: 0.0088 lr:0.010000 network_time: 0.0283 +[ Fri Sep 16 01:40:21 2022 ] Eval epoch: 75 +[ Fri Sep 16 01:42:09 2022 ] Mean test loss of 930 batches: 2.5417563915252686. +[ Fri Sep 16 01:42:10 2022 ] Top1: 54.24% +[ Fri Sep 16 01:42:10 2022 ] Top5: 81.63% +[ Fri Sep 16 01:42:10 2022 ] Training epoch: 76 +[ Fri Sep 16 01:42:50 2022 ] Batch(49/162) done. Loss: 0.0113 lr:0.010000 network_time: 0.0317 +[ Fri Sep 16 01:44:02 2022 ] Batch(149/162) done. Loss: 0.0099 lr:0.010000 network_time: 0.0267 +[ Fri Sep 16 01:44:11 2022 ] Eval epoch: 76 +[ Fri Sep 16 01:46:00 2022 ] Mean test loss of 930 batches: 2.5105996131896973. +[ Fri Sep 16 01:46:00 2022 ] Top1: 55.93% +[ Fri Sep 16 01:46:01 2022 ] Top5: 82.12% +[ Fri Sep 16 01:46:01 2022 ] Training epoch: 77 +[ Fri Sep 16 01:47:08 2022 ] Batch(87/162) done. Loss: 0.0104 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:48:02 2022 ] Eval epoch: 77 +[ Fri Sep 16 01:49:50 2022 ] Mean test loss of 930 batches: 2.4512035846710205. +[ Fri Sep 16 01:49:51 2022 ] Top1: 55.85% +[ Fri Sep 16 01:49:51 2022 ] Top5: 82.47% +[ Fri Sep 16 01:49:51 2022 ] Training epoch: 78 +[ Fri Sep 16 01:50:14 2022 ] Batch(25/162) done. Loss: 0.0062 lr:0.010000 network_time: 0.0299 +[ Fri Sep 16 01:51:26 2022 ] Batch(125/162) done. Loss: 0.0035 lr:0.010000 network_time: 0.0276 +[ Fri Sep 16 01:51:53 2022 ] Eval epoch: 78 +[ Fri Sep 16 01:53:42 2022 ] Mean test loss of 930 batches: 2.4763002395629883. +[ Fri Sep 16 01:53:42 2022 ] Top1: 55.33% +[ Fri Sep 16 01:53:43 2022 ] Top5: 82.01% +[ Fri Sep 16 01:53:43 2022 ] Training epoch: 79 +[ Fri Sep 16 01:54:33 2022 ] Batch(63/162) done. Loss: 0.0081 lr:0.010000 network_time: 0.0294 +[ Fri Sep 16 01:55:44 2022 ] Eval epoch: 79 +[ Fri Sep 16 01:57:33 2022 ] Mean test loss of 930 batches: 2.486677646636963. +[ Fri Sep 16 01:57:33 2022 ] Top1: 55.50% +[ Fri Sep 16 01:57:34 2022 ] Top5: 82.13% +[ Fri Sep 16 01:57:34 2022 ] Training epoch: 80 +[ Fri Sep 16 01:57:39 2022 ] Batch(1/162) done. Loss: 0.0030 lr:0.010000 network_time: 0.0301 +[ Fri Sep 16 01:58:51 2022 ] Batch(101/162) done. Loss: 0.0107 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:59:35 2022 ] Eval epoch: 80 +[ Fri Sep 16 02:01:24 2022 ] Mean test loss of 930 batches: 2.5012757778167725. +[ Fri Sep 16 02:01:24 2022 ] Top1: 55.39% +[ Fri Sep 16 02:01:25 2022 ] Top5: 82.13% +[ Fri Sep 16 02:01:25 2022 ] Training epoch: 81 +[ Fri Sep 16 02:01:57 2022 ] Batch(39/162) done. Loss: 0.0049 lr:0.001000 network_time: 0.0252 +[ Fri Sep 16 02:03:10 2022 ] Batch(139/162) done. Loss: 0.0029 lr:0.001000 network_time: 0.0298 +[ Fri Sep 16 02:03:26 2022 ] Eval epoch: 81 +[ Fri Sep 16 02:05:14 2022 ] Mean test loss of 930 batches: 2.506415843963623. +[ Fri Sep 16 02:05:14 2022 ] Top1: 55.65% +[ Fri Sep 16 02:05:15 2022 ] Top5: 82.03% +[ Fri Sep 16 02:05:15 2022 ] Training epoch: 82 +[ Fri Sep 16 02:06:15 2022 ] Batch(77/162) done. Loss: 0.0062 lr:0.001000 network_time: 0.0330 +[ Fri Sep 16 02:07:16 2022 ] Eval epoch: 82 +[ Fri Sep 16 02:09:05 2022 ] Mean test loss of 930 batches: 2.457895278930664. +[ Fri Sep 16 02:09:05 2022 ] Top1: 56.04% +[ Fri Sep 16 02:09:06 2022 ] Top5: 82.38% +[ Fri Sep 16 02:09:06 2022 ] Training epoch: 83 +[ Fri Sep 16 02:09:21 2022 ] Batch(15/162) done. Loss: 0.0077 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:10:33 2022 ] Batch(115/162) done. Loss: 0.0077 lr:0.001000 network_time: 0.0297 +[ Fri Sep 16 02:11:07 2022 ] Eval epoch: 83 +[ Fri Sep 16 02:12:55 2022 ] Mean test loss of 930 batches: 2.492645025253296. +[ Fri Sep 16 02:12:56 2022 ] Top1: 55.22% +[ Fri Sep 16 02:12:56 2022 ] Top5: 82.08% +[ Fri Sep 16 02:12:57 2022 ] Training epoch: 84 +[ Fri Sep 16 02:13:39 2022 ] Batch(53/162) done. Loss: 0.0060 lr:0.001000 network_time: 0.0295 +[ Fri Sep 16 02:14:52 2022 ] Batch(153/162) done. Loss: 0.0130 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:14:58 2022 ] Eval epoch: 84 +[ Fri Sep 16 02:16:47 2022 ] Mean test loss of 930 batches: 2.51953125. +[ Fri Sep 16 02:16:47 2022 ] Top1: 56.11% +[ Fri Sep 16 02:16:48 2022 ] Top5: 82.06% +[ Fri Sep 16 02:16:48 2022 ] Training epoch: 85 +[ Fri Sep 16 02:17:58 2022 ] Batch(91/162) done. Loss: 0.0052 lr:0.001000 network_time: 0.0278 +[ Fri Sep 16 02:18:49 2022 ] Eval epoch: 85 +[ Fri Sep 16 02:20:38 2022 ] Mean test loss of 930 batches: 2.5209572315216064. +[ Fri Sep 16 02:20:39 2022 ] Top1: 55.48% +[ Fri Sep 16 02:20:39 2022 ] Top5: 82.19% +[ Fri Sep 16 02:20:40 2022 ] Training epoch: 86 +[ Fri Sep 16 02:21:05 2022 ] Batch(29/162) done. Loss: 0.0048 lr:0.001000 network_time: 0.0270 +[ Fri Sep 16 02:22:17 2022 ] Batch(129/162) done. Loss: 0.0119 lr:0.001000 network_time: 0.0269 +[ Fri Sep 16 02:22:41 2022 ] Eval epoch: 86 +[ Fri Sep 16 02:24:30 2022 ] Mean test loss of 930 batches: 2.516221523284912. +[ Fri Sep 16 02:24:30 2022 ] Top1: 54.71% +[ Fri Sep 16 02:24:31 2022 ] Top5: 81.78% +[ Fri Sep 16 02:24:31 2022 ] Training epoch: 87 +[ Fri Sep 16 02:25:24 2022 ] Batch(67/162) done. Loss: 0.0088 lr:0.001000 network_time: 0.0544 +[ Fri Sep 16 02:26:32 2022 ] Eval epoch: 87 +[ Fri Sep 16 02:28:21 2022 ] Mean test loss of 930 batches: 2.474106550216675. +[ Fri Sep 16 02:28:21 2022 ] Top1: 56.02% +[ Fri Sep 16 02:28:22 2022 ] Top5: 82.42% +[ Fri Sep 16 02:28:22 2022 ] Training epoch: 88 +[ Fri Sep 16 02:28:29 2022 ] Batch(5/162) done. Loss: 0.0110 lr:0.001000 network_time: 0.0265 +[ Fri Sep 16 02:29:42 2022 ] Batch(105/162) done. Loss: 0.0054 lr:0.001000 network_time: 0.0281 +[ Fri Sep 16 02:30:23 2022 ] Eval epoch: 88 +[ Fri Sep 16 02:32:11 2022 ] Mean test loss of 930 batches: 2.4749226570129395. +[ Fri Sep 16 02:32:12 2022 ] Top1: 55.66% +[ Fri Sep 16 02:32:12 2022 ] Top5: 82.22% +[ Fri Sep 16 02:32:13 2022 ] Training epoch: 89 +[ Fri Sep 16 02:32:48 2022 ] Batch(43/162) done. Loss: 0.0048 lr:0.001000 network_time: 0.0293 +[ Fri Sep 16 02:34:00 2022 ] Batch(143/162) done. Loss: 0.0074 lr:0.001000 network_time: 0.0271 +[ Fri Sep 16 02:34:14 2022 ] Eval epoch: 89 +[ Fri Sep 16 02:36:02 2022 ] Mean test loss of 930 batches: 2.5148983001708984. +[ Fri Sep 16 02:36:02 2022 ] Top1: 54.74% +[ Fri Sep 16 02:36:03 2022 ] Top5: 82.05% +[ Fri Sep 16 02:36:03 2022 ] Training epoch: 90 +[ Fri Sep 16 02:37:06 2022 ] Batch(81/162) done. Loss: 0.0063 lr:0.001000 network_time: 0.0316 +[ Fri Sep 16 02:38:04 2022 ] Eval epoch: 90 +[ Fri Sep 16 02:39:53 2022 ] Mean test loss of 930 batches: 2.497434616088867. +[ Fri Sep 16 02:39:53 2022 ] Top1: 55.47% +[ Fri Sep 16 02:39:53 2022 ] Top5: 82.13% +[ Fri Sep 16 02:39:54 2022 ] Training epoch: 91 +[ Fri Sep 16 02:40:12 2022 ] Batch(19/162) done. Loss: 0.0053 lr:0.001000 network_time: 0.0273 +[ Fri Sep 16 02:41:24 2022 ] Batch(119/162) done. Loss: 0.0271 lr:0.001000 network_time: 0.0275 +[ Fri Sep 16 02:41:55 2022 ] Eval epoch: 91 +[ Fri Sep 16 02:43:43 2022 ] Mean test loss of 930 batches: 2.519994020462036. +[ Fri Sep 16 02:43:44 2022 ] Top1: 55.82% +[ Fri Sep 16 02:43:44 2022 ] Top5: 82.46% +[ Fri Sep 16 02:43:44 2022 ] Training epoch: 92 +[ Fri Sep 16 02:44:30 2022 ] Batch(57/162) done. Loss: 0.0090 lr:0.001000 network_time: 0.0321 +[ Fri Sep 16 02:45:42 2022 ] Batch(157/162) done. Loss: 0.0116 lr:0.001000 network_time: 0.0258 +[ Fri Sep 16 02:45:45 2022 ] Eval epoch: 92 +[ Fri Sep 16 02:47:34 2022 ] Mean test loss of 930 batches: 2.5442798137664795. +[ Fri Sep 16 02:47:34 2022 ] Top1: 54.29% +[ Fri Sep 16 02:47:35 2022 ] Top5: 81.77% +[ Fri Sep 16 02:47:35 2022 ] Training epoch: 93 +[ Fri Sep 16 02:48:48 2022 ] Batch(95/162) done. Loss: 0.0067 lr:0.001000 network_time: 0.0278 +[ Fri Sep 16 02:49:36 2022 ] Eval epoch: 93 +[ Fri Sep 16 02:51:24 2022 ] Mean test loss of 930 batches: 2.500120162963867. +[ Fri Sep 16 02:51:25 2022 ] Top1: 55.30% +[ Fri Sep 16 02:51:25 2022 ] Top5: 82.14% +[ Fri Sep 16 02:51:25 2022 ] Training epoch: 94 +[ Fri Sep 16 02:51:54 2022 ] Batch(33/162) done. Loss: 0.0062 lr:0.001000 network_time: 0.0301 +[ Fri Sep 16 02:53:06 2022 ] Batch(133/162) done. Loss: 0.0076 lr:0.001000 network_time: 0.0275 +[ Fri Sep 16 02:53:27 2022 ] Eval epoch: 94 +[ Fri Sep 16 02:55:16 2022 ] Mean test loss of 930 batches: 2.46699595451355. +[ Fri Sep 16 02:55:16 2022 ] Top1: 56.12% +[ Fri Sep 16 02:55:17 2022 ] Top5: 82.52% +[ Fri Sep 16 02:55:17 2022 ] Training epoch: 95 +[ Fri Sep 16 02:56:12 2022 ] Batch(71/162) done. Loss: 0.0181 lr:0.001000 network_time: 0.0295 +[ Fri Sep 16 02:57:18 2022 ] Eval epoch: 95 +[ Fri Sep 16 02:59:07 2022 ] Mean test loss of 930 batches: 2.488225221633911. +[ Fri Sep 16 02:59:07 2022 ] Top1: 55.72% +[ Fri Sep 16 02:59:07 2022 ] Top5: 82.19% +[ Fri Sep 16 02:59:08 2022 ] Training epoch: 96 +[ Fri Sep 16 02:59:18 2022 ] Batch(9/162) done. Loss: 0.0066 lr:0.001000 network_time: 0.0255 +[ Fri Sep 16 03:00:31 2022 ] Batch(109/162) done. Loss: 0.0051 lr:0.001000 network_time: 0.0260 +[ Fri Sep 16 03:01:09 2022 ] Eval epoch: 96 +[ Fri Sep 16 03:02:57 2022 ] Mean test loss of 930 batches: 2.567333936691284. +[ Fri Sep 16 03:02:58 2022 ] Top1: 53.87% +[ Fri Sep 16 03:02:58 2022 ] Top5: 81.54% +[ Fri Sep 16 03:02:59 2022 ] Training epoch: 97 +[ Fri Sep 16 03:03:36 2022 ] Batch(47/162) done. Loss: 0.0046 lr:0.001000 network_time: 0.0237 +[ Fri Sep 16 03:04:49 2022 ] Batch(147/162) done. Loss: 0.0059 lr:0.001000 network_time: 0.0283 +[ Fri Sep 16 03:04:59 2022 ] Eval epoch: 97 +[ Fri Sep 16 03:06:48 2022 ] Mean test loss of 930 batches: 2.4648122787475586. +[ Fri Sep 16 03:06:48 2022 ] Top1: 55.67% +[ Fri Sep 16 03:06:49 2022 ] Top5: 82.32% +[ Fri Sep 16 03:06:49 2022 ] Training epoch: 98 +[ Fri Sep 16 03:07:55 2022 ] Batch(85/162) done. Loss: 0.0076 lr:0.001000 network_time: 0.0262 +[ Fri Sep 16 03:08:50 2022 ] Eval epoch: 98 +[ Fri Sep 16 03:10:38 2022 ] Mean test loss of 930 batches: 2.4949991703033447. +[ Fri Sep 16 03:10:39 2022 ] Top1: 55.13% +[ Fri Sep 16 03:10:39 2022 ] Top5: 82.00% +[ Fri Sep 16 03:10:40 2022 ] Training epoch: 99 +[ Fri Sep 16 03:11:00 2022 ] Batch(23/162) done. Loss: 0.0024 lr:0.001000 network_time: 0.0289 +[ Fri Sep 16 03:12:13 2022 ] Batch(123/162) done. Loss: 0.0083 lr:0.001000 network_time: 0.0261 +[ Fri Sep 16 03:12:41 2022 ] Eval epoch: 99 +[ Fri Sep 16 03:14:29 2022 ] Mean test loss of 930 batches: 2.5010464191436768. +[ Fri Sep 16 03:14:29 2022 ] Top1: 54.66% +[ Fri Sep 16 03:14:30 2022 ] Top5: 81.95% +[ Fri Sep 16 03:14:30 2022 ] Training epoch: 100 +[ Fri Sep 16 03:15:18 2022 ] Batch(61/162) done. Loss: 0.0057 lr:0.001000 network_time: 0.0331 +[ Fri Sep 16 03:16:31 2022 ] Batch(161/162) done. Loss: 0.0055 lr:0.001000 network_time: 0.0280 +[ Fri Sep 16 03:16:31 2022 ] Eval epoch: 100 +[ Fri Sep 16 03:18:20 2022 ] Mean test loss of 930 batches: 2.5131969451904297. +[ Fri Sep 16 03:18:20 2022 ] Top1: 55.20% +[ Fri Sep 16 03:18:21 2022 ] Top5: 81.94% +[ Fri Sep 16 03:18:21 2022 ] Training epoch: 101 +[ Fri Sep 16 03:19:37 2022 ] Batch(99/162) done. Loss: 0.0046 lr:0.000100 network_time: 0.0303 +[ Fri Sep 16 03:20:22 2022 ] Eval epoch: 101 +[ Fri Sep 16 03:22:11 2022 ] Mean test loss of 930 batches: 2.4665334224700928. +[ Fri Sep 16 03:22:11 2022 ] Top1: 55.54% +[ Fri Sep 16 03:22:12 2022 ] Top5: 82.38% +[ Fri Sep 16 03:22:12 2022 ] Training epoch: 102 +[ Fri Sep 16 03:22:43 2022 ] Batch(37/162) done. Loss: 0.0060 lr:0.000100 network_time: 0.0228 +[ Fri Sep 16 03:23:55 2022 ] Batch(137/162) done. Loss: 0.0044 lr:0.000100 network_time: 0.0312 +[ Fri Sep 16 03:24:13 2022 ] Eval epoch: 102 +[ Fri Sep 16 03:26:01 2022 ] Mean test loss of 930 batches: 2.4502739906311035. +[ Fri Sep 16 03:26:02 2022 ] Top1: 55.90% +[ Fri Sep 16 03:26:02 2022 ] Top5: 82.50% +[ Fri Sep 16 03:26:02 2022 ] Training epoch: 103 +[ Fri Sep 16 03:27:01 2022 ] Batch(75/162) done. Loss: 0.0103 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 03:28:04 2022 ] Eval epoch: 103 +[ Fri Sep 16 03:29:53 2022 ] Mean test loss of 930 batches: 2.5235326290130615. +[ Fri Sep 16 03:29:53 2022 ] Top1: 55.81% +[ Fri Sep 16 03:29:54 2022 ] Top5: 82.14% +[ Fri Sep 16 03:29:54 2022 ] Training epoch: 104 +[ Fri Sep 16 03:30:07 2022 ] Batch(13/162) done. Loss: 0.0055 lr:0.000100 network_time: 0.0256 +[ Fri Sep 16 03:31:20 2022 ] Batch(113/162) done. Loss: 0.0029 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 03:31:55 2022 ] Eval epoch: 104 +[ Fri Sep 16 03:33:43 2022 ] Mean test loss of 930 batches: 2.4924840927124023. +[ Fri Sep 16 03:33:44 2022 ] Top1: 55.46% +[ Fri Sep 16 03:33:44 2022 ] Top5: 82.16% +[ Fri Sep 16 03:33:45 2022 ] Training epoch: 105 +[ Fri Sep 16 03:34:26 2022 ] Batch(51/162) done. Loss: 0.0061 lr:0.000100 network_time: 0.0315 +[ Fri Sep 16 03:35:38 2022 ] Batch(151/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 03:35:46 2022 ] Eval epoch: 105 +[ Fri Sep 16 03:37:34 2022 ] Mean test loss of 930 batches: 2.515953302383423. +[ Fri Sep 16 03:37:35 2022 ] Top1: 55.07% +[ Fri Sep 16 03:37:35 2022 ] Top5: 82.06% +[ Fri Sep 16 03:37:35 2022 ] Training epoch: 106 +[ Fri Sep 16 03:38:44 2022 ] Batch(89/162) done. Loss: 0.0054 lr:0.000100 network_time: 0.0260 +[ Fri Sep 16 03:39:36 2022 ] Eval epoch: 106 +[ Fri Sep 16 03:41:25 2022 ] Mean test loss of 930 batches: 2.479599714279175. +[ Fri Sep 16 03:41:25 2022 ] Top1: 56.04% +[ Fri Sep 16 03:41:26 2022 ] Top5: 82.38% +[ Fri Sep 16 03:41:26 2022 ] Training epoch: 107 +[ Fri Sep 16 03:41:49 2022 ] Batch(27/162) done. Loss: 0.0047 lr:0.000100 network_time: 0.0290 +[ Fri Sep 16 03:43:02 2022 ] Batch(127/162) done. Loss: 0.0053 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 03:43:27 2022 ] Eval epoch: 107 +[ Fri Sep 16 03:45:15 2022 ] Mean test loss of 930 batches: 2.5122196674346924. +[ Fri Sep 16 03:45:15 2022 ] Top1: 55.54% +[ Fri Sep 16 03:45:16 2022 ] Top5: 82.23% +[ Fri Sep 16 03:45:16 2022 ] Training epoch: 108 +[ Fri Sep 16 03:46:07 2022 ] Batch(65/162) done. Loss: 0.0042 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 03:47:17 2022 ] Eval epoch: 108 +[ Fri Sep 16 03:49:06 2022 ] Mean test loss of 930 batches: 2.4967191219329834. +[ Fri Sep 16 03:49:06 2022 ] Top1: 55.90% +[ Fri Sep 16 03:49:07 2022 ] Top5: 82.29% +[ Fri Sep 16 03:49:07 2022 ] Training epoch: 109 +[ Fri Sep 16 03:49:13 2022 ] Batch(3/162) done. Loss: 0.0055 lr:0.000100 network_time: 0.0331 +[ Fri Sep 16 03:50:26 2022 ] Batch(103/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 03:51:08 2022 ] Eval epoch: 109 +[ Fri Sep 16 03:52:56 2022 ] Mean test loss of 930 batches: 2.4798450469970703. +[ Fri Sep 16 03:52:57 2022 ] Top1: 55.22% +[ Fri Sep 16 03:52:57 2022 ] Top5: 82.04% +[ Fri Sep 16 03:52:58 2022 ] Training epoch: 110 +[ Fri Sep 16 03:53:31 2022 ] Batch(41/162) done. Loss: 0.0102 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 03:54:44 2022 ] Batch(141/162) done. Loss: 0.0033 lr:0.000100 network_time: 0.0272 +[ Fri Sep 16 03:54:58 2022 ] Eval epoch: 110 +[ Fri Sep 16 03:56:47 2022 ] Mean test loss of 930 batches: 2.487183094024658. +[ Fri Sep 16 03:56:47 2022 ] Top1: 55.46% +[ Fri Sep 16 03:56:48 2022 ] Top5: 82.16% +[ Fri Sep 16 03:56:48 2022 ] Training epoch: 111 +[ Fri Sep 16 03:57:49 2022 ] Batch(79/162) done. Loss: 0.0044 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 03:58:49 2022 ] Eval epoch: 111 +[ Fri Sep 16 04:00:37 2022 ] Mean test loss of 930 batches: 2.580383539199829. +[ Fri Sep 16 04:00:38 2022 ] Top1: 54.04% +[ Fri Sep 16 04:00:38 2022 ] Top5: 81.62% +[ Fri Sep 16 04:00:39 2022 ] Training epoch: 112 +[ Fri Sep 16 04:00:55 2022 ] Batch(17/162) done. Loss: 0.0042 lr:0.000100 network_time: 0.0292 +[ Fri Sep 16 04:02:08 2022 ] Batch(117/162) done. Loss: 0.0080 lr:0.000100 network_time: 0.0281 +[ Fri Sep 16 04:02:40 2022 ] Eval epoch: 112 +[ Fri Sep 16 04:04:28 2022 ] Mean test loss of 930 batches: 2.472482442855835. +[ Fri Sep 16 04:04:29 2022 ] Top1: 56.17% +[ Fri Sep 16 04:04:29 2022 ] Top5: 82.30% +[ Fri Sep 16 04:04:29 2022 ] Training epoch: 113 +[ Fri Sep 16 04:05:13 2022 ] Batch(55/162) done. Loss: 0.0036 lr:0.000100 network_time: 0.0282 +[ Fri Sep 16 04:06:26 2022 ] Batch(155/162) done. Loss: 0.0104 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:06:31 2022 ] Eval epoch: 113 +[ Fri Sep 16 04:08:19 2022 ] Mean test loss of 930 batches: 2.5517749786376953. +[ Fri Sep 16 04:08:20 2022 ] Top1: 54.01% +[ Fri Sep 16 04:08:20 2022 ] Top5: 81.42% +[ Fri Sep 16 04:08:21 2022 ] Training epoch: 114 +[ Fri Sep 16 04:09:32 2022 ] Batch(93/162) done. Loss: 0.0054 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 04:10:22 2022 ] Eval epoch: 114 +[ Fri Sep 16 04:12:10 2022 ] Mean test loss of 930 batches: 2.4713258743286133. +[ Fri Sep 16 04:12:10 2022 ] Top1: 55.90% +[ Fri Sep 16 04:12:11 2022 ] Top5: 82.37% +[ Fri Sep 16 04:12:11 2022 ] Training epoch: 115 +[ Fri Sep 16 04:12:38 2022 ] Batch(31/162) done. Loss: 0.0062 lr:0.000100 network_time: 0.0263 +[ Fri Sep 16 04:13:50 2022 ] Batch(131/162) done. Loss: 0.0058 lr:0.000100 network_time: 0.0277 +[ Fri Sep 16 04:14:12 2022 ] Eval epoch: 115 +[ Fri Sep 16 04:16:01 2022 ] Mean test loss of 930 batches: 2.5101470947265625. +[ Fri Sep 16 04:16:02 2022 ] Top1: 54.96% +[ Fri Sep 16 04:16:02 2022 ] Top5: 81.76% +[ Fri Sep 16 04:16:02 2022 ] Training epoch: 116 +[ Fri Sep 16 04:16:57 2022 ] Batch(69/162) done. Loss: 0.0025 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 04:18:04 2022 ] Eval epoch: 116 +[ Fri Sep 16 04:19:52 2022 ] Mean test loss of 930 batches: 2.466994047164917. +[ Fri Sep 16 04:19:52 2022 ] Top1: 55.87% +[ Fri Sep 16 04:19:53 2022 ] Top5: 82.43% +[ Fri Sep 16 04:19:53 2022 ] Training epoch: 117 +[ Fri Sep 16 04:20:02 2022 ] Batch(7/162) done. Loss: 0.0102 lr:0.000100 network_time: 0.0335 +[ Fri Sep 16 04:21:14 2022 ] Batch(107/162) done. Loss: 0.0040 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:21:54 2022 ] Eval epoch: 117 +[ Fri Sep 16 04:23:42 2022 ] Mean test loss of 930 batches: 2.4645636081695557. +[ Fri Sep 16 04:23:43 2022 ] Top1: 56.02% +[ Fri Sep 16 04:23:43 2022 ] Top5: 82.30% +[ Fri Sep 16 04:23:43 2022 ] Training epoch: 118 +[ Fri Sep 16 04:24:20 2022 ] Batch(45/162) done. Loss: 0.0073 lr:0.000100 network_time: 0.0287 +[ Fri Sep 16 04:25:33 2022 ] Batch(145/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 04:25:44 2022 ] Eval epoch: 118 +[ Fri Sep 16 04:27:33 2022 ] Mean test loss of 930 batches: 2.5144882202148438. +[ Fri Sep 16 04:27:34 2022 ] Top1: 55.68% +[ Fri Sep 16 04:27:34 2022 ] Top5: 82.14% +[ Fri Sep 16 04:27:34 2022 ] Training epoch: 119 +[ Fri Sep 16 04:28:38 2022 ] Batch(83/162) done. Loss: 0.0052 lr:0.000100 network_time: 0.0334 +[ Fri Sep 16 04:29:35 2022 ] Eval epoch: 119 +[ Fri Sep 16 04:31:24 2022 ] Mean test loss of 930 batches: 2.436511993408203. +[ Fri Sep 16 04:31:25 2022 ] Top1: 56.25% +[ Fri Sep 16 04:31:25 2022 ] Top5: 82.43% +[ Fri Sep 16 04:31:25 2022 ] Training epoch: 120 +[ Fri Sep 16 04:31:45 2022 ] Batch(21/162) done. Loss: 0.0137 lr:0.000100 network_time: 0.0301 +[ Fri Sep 16 04:32:57 2022 ] Batch(121/162) done. Loss: 0.0038 lr:0.000100 network_time: 0.0281 +[ Fri Sep 16 04:33:26 2022 ] Eval epoch: 120 +[ Fri Sep 16 04:35:15 2022 ] Mean test loss of 930 batches: 2.4610888957977295. +[ Fri Sep 16 04:35:15 2022 ] Top1: 55.96% +[ Fri Sep 16 04:35:16 2022 ] Top5: 82.46% +[ Fri Sep 16 04:35:16 2022 ] Training epoch: 121 +[ Fri Sep 16 04:36:03 2022 ] Batch(59/162) done. Loss: 0.0081 lr:0.000100 network_time: 0.0287 +[ Fri Sep 16 04:37:16 2022 ] Batch(159/162) done. Loss: 0.0052 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 04:37:17 2022 ] Eval epoch: 121 +[ Fri Sep 16 04:39:06 2022 ] Mean test loss of 930 batches: 2.50304913520813. +[ Fri Sep 16 04:39:06 2022 ] Top1: 55.96% +[ Fri Sep 16 04:39:07 2022 ] Top5: 82.19% +[ Fri Sep 16 04:39:07 2022 ] Training epoch: 122 +[ Fri Sep 16 04:40:21 2022 ] Batch(97/162) done. Loss: 0.0044 lr:0.000100 network_time: 0.0331 +[ Fri Sep 16 04:41:08 2022 ] Eval epoch: 122 +[ Fri Sep 16 04:42:57 2022 ] Mean test loss of 930 batches: 2.4955897331237793. +[ Fri Sep 16 04:42:57 2022 ] Top1: 55.60% +[ Fri Sep 16 04:42:58 2022 ] Top5: 82.18% +[ Fri Sep 16 04:42:58 2022 ] Training epoch: 123 +[ Fri Sep 16 04:43:27 2022 ] Batch(35/162) done. Loss: 0.0118 lr:0.000100 network_time: 0.0309 +[ Fri Sep 16 04:44:40 2022 ] Batch(135/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:44:59 2022 ] Eval epoch: 123 +[ Fri Sep 16 04:46:47 2022 ] Mean test loss of 930 batches: 2.523346185684204. +[ Fri Sep 16 04:46:48 2022 ] Top1: 54.91% +[ Fri Sep 16 04:46:48 2022 ] Top5: 81.91% +[ Fri Sep 16 04:46:48 2022 ] Training epoch: 124 +[ Fri Sep 16 04:47:45 2022 ] Batch(73/162) done. Loss: 0.0091 lr:0.000100 network_time: 0.0302 +[ Fri Sep 16 04:48:50 2022 ] Eval epoch: 124 +[ Fri Sep 16 04:50:39 2022 ] Mean test loss of 930 batches: 2.455005168914795. +[ Fri Sep 16 04:50:40 2022 ] Top1: 56.26% +[ Fri Sep 16 04:50:40 2022 ] Top5: 82.41% +[ Fri Sep 16 04:50:40 2022 ] Training epoch: 125 +[ Fri Sep 16 04:50:52 2022 ] Batch(11/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0295 +[ Fri Sep 16 04:52:05 2022 ] Batch(111/162) done. Loss: 0.0035 lr:0.000100 network_time: 0.0289 +[ Fri Sep 16 04:52:42 2022 ] Eval epoch: 125 +[ Fri Sep 16 04:54:30 2022 ] Mean test loss of 930 batches: 2.5138392448425293. +[ Fri Sep 16 04:54:30 2022 ] Top1: 55.17% +[ Fri Sep 16 04:54:31 2022 ] Top5: 82.06% +[ Fri Sep 16 04:54:31 2022 ] Training epoch: 126 +[ Fri Sep 16 04:55:11 2022 ] Batch(49/162) done. Loss: 0.0060 lr:0.000100 network_time: 0.0278 +[ Fri Sep 16 04:56:23 2022 ] Batch(149/162) done. Loss: 0.0061 lr:0.000100 network_time: 0.0257 +[ Fri Sep 16 04:56:32 2022 ] Eval epoch: 126 +[ Fri Sep 16 04:58:20 2022 ] Mean test loss of 930 batches: 2.4895901679992676. +[ Fri Sep 16 04:58:21 2022 ] Top1: 55.15% +[ Fri Sep 16 04:58:21 2022 ] Top5: 82.06% +[ Fri Sep 16 04:58:22 2022 ] Training epoch: 127 +[ Fri Sep 16 04:59:29 2022 ] Batch(87/162) done. Loss: 0.0051 lr:0.000100 network_time: 0.0358 +[ Fri Sep 16 05:00:22 2022 ] Eval epoch: 127 +[ Fri Sep 16 05:02:11 2022 ] Mean test loss of 930 batches: 2.492002010345459. +[ Fri Sep 16 05:02:11 2022 ] Top1: 55.78% +[ Fri Sep 16 05:02:12 2022 ] Top5: 82.30% +[ Fri Sep 16 05:02:12 2022 ] Training epoch: 128 +[ Fri Sep 16 05:02:34 2022 ] Batch(25/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0336 +[ Fri Sep 16 05:03:47 2022 ] Batch(125/162) done. Loss: 0.0035 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:04:13 2022 ] Eval epoch: 128 +[ Fri Sep 16 05:06:02 2022 ] Mean test loss of 930 batches: 2.4913203716278076. +[ Fri Sep 16 05:06:02 2022 ] Top1: 55.61% +[ Fri Sep 16 05:06:02 2022 ] Top5: 82.05% +[ Fri Sep 16 05:06:03 2022 ] Training epoch: 129 +[ Fri Sep 16 05:06:53 2022 ] Batch(63/162) done. Loss: 0.0023 lr:0.000100 network_time: 0.0286 +[ Fri Sep 16 05:08:04 2022 ] Eval epoch: 129 +[ Fri Sep 16 05:09:52 2022 ] Mean test loss of 930 batches: 2.4635448455810547. +[ Fri Sep 16 05:09:53 2022 ] Top1: 55.69% +[ Fri Sep 16 05:09:53 2022 ] Top5: 82.30% +[ Fri Sep 16 05:09:53 2022 ] Training epoch: 130 +[ Fri Sep 16 05:09:58 2022 ] Batch(1/162) done. Loss: 0.0034 lr:0.000100 network_time: 0.0330 +[ Fri Sep 16 05:11:11 2022 ] Batch(101/162) done. Loss: 0.0111 lr:0.000100 network_time: 0.0464 +[ Fri Sep 16 05:11:55 2022 ] Eval epoch: 130 +[ Fri Sep 16 05:13:43 2022 ] Mean test loss of 930 batches: 2.4768619537353516. +[ Fri Sep 16 05:13:44 2022 ] Top1: 56.12% +[ Fri Sep 16 05:13:44 2022 ] Top5: 82.42% +[ Fri Sep 16 05:13:44 2022 ] Training epoch: 131 +[ Fri Sep 16 05:14:17 2022 ] Batch(39/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0280 +[ Fri Sep 16 05:15:29 2022 ] Batch(139/162) done. Loss: 0.0040 lr:0.000100 network_time: 0.0272 +[ Fri Sep 16 05:15:46 2022 ] Eval epoch: 131 +[ Fri Sep 16 05:17:34 2022 ] Mean test loss of 930 batches: 2.473442554473877. +[ Fri Sep 16 05:17:34 2022 ] Top1: 56.14% +[ Fri Sep 16 05:17:35 2022 ] Top5: 82.52% +[ Fri Sep 16 05:17:35 2022 ] Training epoch: 132 +[ Fri Sep 16 05:18:35 2022 ] Batch(77/162) done. Loss: 0.0044 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:19:36 2022 ] Eval epoch: 132 +[ Fri Sep 16 05:21:24 2022 ] Mean test loss of 930 batches: 2.4647676944732666. +[ Fri Sep 16 05:21:25 2022 ] Top1: 55.30% +[ Fri Sep 16 05:21:25 2022 ] Top5: 82.25% +[ Fri Sep 16 05:21:26 2022 ] Training epoch: 133 +[ Fri Sep 16 05:21:40 2022 ] Batch(15/162) done. Loss: 0.0071 lr:0.000100 network_time: 0.0346 +[ Fri Sep 16 05:22:53 2022 ] Batch(115/162) done. Loss: 0.0053 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 05:23:27 2022 ] Eval epoch: 133 +[ Fri Sep 16 05:25:15 2022 ] Mean test loss of 930 batches: 2.603180408477783. +[ Fri Sep 16 05:25:16 2022 ] Top1: 53.21% +[ Fri Sep 16 05:25:16 2022 ] Top5: 81.03% +[ Fri Sep 16 05:25:16 2022 ] Training epoch: 134 +[ Fri Sep 16 05:25:59 2022 ] Batch(53/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 05:27:12 2022 ] Batch(153/162) done. Loss: 0.0059 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 05:27:18 2022 ] Eval epoch: 134 +[ Fri Sep 16 05:29:06 2022 ] Mean test loss of 930 batches: 2.610170364379883. +[ Fri Sep 16 05:29:06 2022 ] Top1: 53.38% +[ Fri Sep 16 05:29:07 2022 ] Top5: 81.18% +[ Fri Sep 16 05:29:07 2022 ] Training epoch: 135 +[ Fri Sep 16 05:30:17 2022 ] Batch(91/162) done. Loss: 0.0038 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 05:31:08 2022 ] Eval epoch: 135 +[ Fri Sep 16 05:32:57 2022 ] Mean test loss of 930 batches: 2.456367254257202. +[ Fri Sep 16 05:32:57 2022 ] Top1: 55.63% +[ Fri Sep 16 05:32:58 2022 ] Top5: 82.31% +[ Fri Sep 16 05:32:58 2022 ] Training epoch: 136 +[ Fri Sep 16 05:33:23 2022 ] Batch(29/162) done. Loss: 0.0082 lr:0.000100 network_time: 0.0315 +[ Fri Sep 16 05:34:36 2022 ] Batch(129/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0327 +[ Fri Sep 16 05:34:59 2022 ] Eval epoch: 136 +[ Fri Sep 16 05:36:48 2022 ] Mean test loss of 930 batches: 2.4485137462615967. +[ Fri Sep 16 05:36:48 2022 ] Top1: 56.15% +[ Fri Sep 16 05:36:49 2022 ] Top5: 82.54% +[ Fri Sep 16 05:36:49 2022 ] Training epoch: 137 +[ Fri Sep 16 05:37:42 2022 ] Batch(67/162) done. Loss: 0.0027 lr:0.000100 network_time: 0.0256 +[ Fri Sep 16 05:38:50 2022 ] Eval epoch: 137 +[ Fri Sep 16 05:40:39 2022 ] Mean test loss of 930 batches: 2.4866716861724854. +[ Fri Sep 16 05:40:40 2022 ] Top1: 55.65% +[ Fri Sep 16 05:40:40 2022 ] Top5: 82.24% +[ Fri Sep 16 05:40:41 2022 ] Training epoch: 138 +[ Fri Sep 16 05:40:48 2022 ] Batch(5/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 05:42:01 2022 ] Batch(105/162) done. Loss: 0.0069 lr:0.000100 network_time: 0.0314 +[ Fri Sep 16 05:42:42 2022 ] Eval epoch: 138 +[ Fri Sep 16 05:44:30 2022 ] Mean test loss of 930 batches: 2.444272518157959. +[ Fri Sep 16 05:44:31 2022 ] Top1: 56.21% +[ Fri Sep 16 05:44:31 2022 ] Top5: 82.68% +[ Fri Sep 16 05:44:31 2022 ] Training epoch: 139 +[ Fri Sep 16 05:45:07 2022 ] Batch(43/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0277 +[ Fri Sep 16 05:46:19 2022 ] Batch(143/162) done. Loss: 0.0030 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 05:46:33 2022 ] Eval epoch: 139 +[ Fri Sep 16 05:48:21 2022 ] Mean test loss of 930 batches: 2.5009186267852783. +[ Fri Sep 16 05:48:22 2022 ] Top1: 55.14% +[ Fri Sep 16 05:48:22 2022 ] Top5: 81.98% +[ Fri Sep 16 05:48:22 2022 ] Training epoch: 140 +[ Fri Sep 16 05:49:25 2022 ] Batch(81/162) done. Loss: 0.0030 lr:0.000100 network_time: 0.0236 +[ Fri Sep 16 05:50:24 2022 ] Eval epoch: 140 +[ Fri Sep 16 05:52:12 2022 ] Mean test loss of 930 batches: 2.492652177810669. +[ Fri Sep 16 05:52:13 2022 ] Top1: 55.05% +[ Fri Sep 16 05:52:13 2022 ] Top5: 81.94% diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_motion_xset/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..207f750ba88d6204c6506e9e17fd4974f26fb7f1 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_bone_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_bone.yaml +device: +- 0 +- 1 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_xset +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_xset diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a7d41735433135d4fd161422b313f3441c4bacb6 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f590d1de630391236ee05c1526ce6353bd0452854b5fdfb3a58563be9687d31 +size 34946665 diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/log.txt b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..6bc67d8b6d8e74d6970fbaccfaf49252c9901a1e --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/log.txt @@ -0,0 +1,929 @@ +[ Thu Sep 15 20:53:09 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_xset', 'model_saved_name': './save_models/ntu120_bone_xset', 'Experiment_name': 'ntu120_bone_xset', 'config': './config/ntu120_xset/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [0, 1], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 20:53:09 2022 ] Training epoch: 1 +[ Thu Sep 15 20:54:27 2022 ] Batch(99/162) done. Loss: 3.1745 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 20:55:12 2022 ] Eval epoch: 1 +[ Thu Sep 15 20:57:02 2022 ] Mean test loss of 930 batches: 5.054730415344238. +[ Thu Sep 15 20:57:03 2022 ] Top1: 7.17% +[ Thu Sep 15 20:57:03 2022 ] Top5: 25.35% +[ Thu Sep 15 20:57:03 2022 ] Training epoch: 2 +[ Thu Sep 15 20:57:34 2022 ] Batch(37/162) done. Loss: 2.2364 lr:0.100000 network_time: 0.0296 +[ Thu Sep 15 20:58:47 2022 ] Batch(137/162) done. Loss: 2.4230 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 20:59:04 2022 ] Eval epoch: 2 +[ Thu Sep 15 21:00:55 2022 ] Mean test loss of 930 batches: 4.307687282562256. +[ Thu Sep 15 21:00:55 2022 ] Top1: 13.88% +[ Thu Sep 15 21:00:56 2022 ] Top5: 35.46% +[ Thu Sep 15 21:00:56 2022 ] Training epoch: 3 +[ Thu Sep 15 21:01:54 2022 ] Batch(75/162) done. Loss: 2.3808 lr:0.100000 network_time: 0.0324 +[ Thu Sep 15 21:02:57 2022 ] Eval epoch: 3 +[ Thu Sep 15 21:04:47 2022 ] Mean test loss of 930 batches: 3.7918362617492676. +[ Thu Sep 15 21:04:47 2022 ] Top1: 19.66% +[ Thu Sep 15 21:04:48 2022 ] Top5: 44.34% +[ Thu Sep 15 21:04:48 2022 ] Training epoch: 4 +[ Thu Sep 15 21:05:01 2022 ] Batch(13/162) done. Loss: 1.9126 lr:0.100000 network_time: 0.0397 +[ Thu Sep 15 21:06:14 2022 ] Batch(113/162) done. Loss: 1.7151 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 21:06:49 2022 ] Eval epoch: 4 +[ Thu Sep 15 21:08:38 2022 ] Mean test loss of 930 batches: 3.3427066802978516. +[ Thu Sep 15 21:08:39 2022 ] Top1: 23.91% +[ Thu Sep 15 21:08:39 2022 ] Top5: 49.61% +[ Thu Sep 15 21:08:40 2022 ] Training epoch: 5 +[ Thu Sep 15 21:09:20 2022 ] Batch(51/162) done. Loss: 1.5963 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 21:10:33 2022 ] Batch(151/162) done. Loss: 2.0971 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 21:10:40 2022 ] Eval epoch: 5 +[ Thu Sep 15 21:12:30 2022 ] Mean test loss of 930 batches: 3.069018602371216. +[ Thu Sep 15 21:12:31 2022 ] Top1: 28.82% +[ Thu Sep 15 21:12:31 2022 ] Top5: 57.04% +[ Thu Sep 15 21:12:31 2022 ] Training epoch: 6 +[ Thu Sep 15 21:13:40 2022 ] Batch(89/162) done. Loss: 1.6986 lr:0.100000 network_time: 0.0322 +[ Thu Sep 15 21:14:32 2022 ] Eval epoch: 6 +[ Thu Sep 15 21:16:23 2022 ] Mean test loss of 930 batches: 2.9965245723724365. +[ Thu Sep 15 21:16:23 2022 ] Top1: 29.52% +[ Thu Sep 15 21:16:24 2022 ] Top5: 59.43% +[ Thu Sep 15 21:16:24 2022 ] Training epoch: 7 +[ Thu Sep 15 21:16:47 2022 ] Batch(27/162) done. Loss: 1.3754 lr:0.100000 network_time: 0.0283 +[ Thu Sep 15 21:18:00 2022 ] Batch(127/162) done. Loss: 1.2434 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 21:18:25 2022 ] Eval epoch: 7 +[ Thu Sep 15 21:20:14 2022 ] Mean test loss of 930 batches: 2.6982879638671875. +[ Thu Sep 15 21:20:15 2022 ] Top1: 35.94% +[ Thu Sep 15 21:20:15 2022 ] Top5: 66.52% +[ Thu Sep 15 21:20:15 2022 ] Training epoch: 8 +[ Thu Sep 15 21:21:06 2022 ] Batch(65/162) done. Loss: 1.2703 lr:0.100000 network_time: 0.0313 +[ Thu Sep 15 21:22:16 2022 ] Eval epoch: 8 +[ Thu Sep 15 21:24:05 2022 ] Mean test loss of 930 batches: 2.8924400806427. +[ Thu Sep 15 21:24:06 2022 ] Top1: 34.11% +[ Thu Sep 15 21:24:06 2022 ] Top5: 63.32% +[ Thu Sep 15 21:24:06 2022 ] Training epoch: 9 +[ Thu Sep 15 21:24:12 2022 ] Batch(3/162) done. Loss: 0.9692 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 21:25:25 2022 ] Batch(103/162) done. Loss: 1.3851 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 21:26:07 2022 ] Eval epoch: 9 +[ Thu Sep 15 21:27:58 2022 ] Mean test loss of 930 batches: 3.032758951187134. +[ Thu Sep 15 21:27:58 2022 ] Top1: 35.23% +[ Thu Sep 15 21:27:59 2022 ] Top5: 64.33% +[ Thu Sep 15 21:27:59 2022 ] Training epoch: 10 +[ Thu Sep 15 21:28:32 2022 ] Batch(41/162) done. Loss: 0.9587 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 21:29:45 2022 ] Batch(141/162) done. Loss: 0.9386 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 21:29:59 2022 ] Eval epoch: 10 +[ Thu Sep 15 21:31:49 2022 ] Mean test loss of 930 batches: 2.911442995071411. +[ Thu Sep 15 21:31:50 2022 ] Top1: 37.85% +[ Thu Sep 15 21:31:50 2022 ] Top5: 66.94% +[ Thu Sep 15 21:31:51 2022 ] Training epoch: 11 +[ Thu Sep 15 21:32:52 2022 ] Batch(79/162) done. Loss: 1.0747 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 21:33:51 2022 ] Eval epoch: 11 +[ Thu Sep 15 21:35:41 2022 ] Mean test loss of 930 batches: 2.7861416339874268. +[ Thu Sep 15 21:35:41 2022 ] Top1: 37.24% +[ Thu Sep 15 21:35:42 2022 ] Top5: 68.24% +[ Thu Sep 15 21:35:42 2022 ] Training epoch: 12 +[ Thu Sep 15 21:35:58 2022 ] Batch(17/162) done. Loss: 0.8764 lr:0.100000 network_time: 0.0337 +[ Thu Sep 15 21:37:10 2022 ] Batch(117/162) done. Loss: 1.0292 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 21:37:43 2022 ] Eval epoch: 12 +[ Thu Sep 15 21:39:32 2022 ] Mean test loss of 930 batches: 2.67501163482666. +[ Thu Sep 15 21:39:33 2022 ] Top1: 38.34% +[ Thu Sep 15 21:39:33 2022 ] Top5: 71.56% +[ Thu Sep 15 21:39:33 2022 ] Training epoch: 13 +[ Thu Sep 15 21:40:16 2022 ] Batch(55/162) done. Loss: 0.7406 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 21:41:29 2022 ] Batch(155/162) done. Loss: 1.0139 lr:0.100000 network_time: 0.0253 +[ Thu Sep 15 21:41:33 2022 ] Eval epoch: 13 +[ Thu Sep 15 21:43:24 2022 ] Mean test loss of 930 batches: 2.403536558151245. +[ Thu Sep 15 21:43:24 2022 ] Top1: 40.37% +[ Thu Sep 15 21:43:25 2022 ] Top5: 72.13% +[ Thu Sep 15 21:43:25 2022 ] Training epoch: 14 +[ Thu Sep 15 21:44:36 2022 ] Batch(93/162) done. Loss: 1.0867 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 21:45:26 2022 ] Eval epoch: 14 +[ Thu Sep 15 21:47:15 2022 ] Mean test loss of 930 batches: 2.493147373199463. +[ Thu Sep 15 21:47:15 2022 ] Top1: 39.78% +[ Thu Sep 15 21:47:15 2022 ] Top5: 72.48% +[ Thu Sep 15 21:47:16 2022 ] Training epoch: 15 +[ Thu Sep 15 21:47:42 2022 ] Batch(31/162) done. Loss: 0.8762 lr:0.100000 network_time: 0.0296 +[ Thu Sep 15 21:48:54 2022 ] Batch(131/162) done. Loss: 0.5847 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 21:49:16 2022 ] Eval epoch: 15 +[ Thu Sep 15 21:51:06 2022 ] Mean test loss of 930 batches: 2.6337034702301025. +[ Thu Sep 15 21:51:06 2022 ] Top1: 40.75% +[ Thu Sep 15 21:51:07 2022 ] Top5: 72.22% +[ Thu Sep 15 21:51:07 2022 ] Training epoch: 16 +[ Thu Sep 15 21:52:01 2022 ] Batch(69/162) done. Loss: 0.6017 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 21:53:08 2022 ] Eval epoch: 16 +[ Thu Sep 15 21:54:58 2022 ] Mean test loss of 930 batches: 2.993208885192871. +[ Thu Sep 15 21:54:58 2022 ] Top1: 39.27% +[ Thu Sep 15 21:54:58 2022 ] Top5: 71.64% +[ Thu Sep 15 21:54:59 2022 ] Training epoch: 17 +[ Thu Sep 15 21:55:07 2022 ] Batch(7/162) done. Loss: 0.6349 lr:0.100000 network_time: 0.0286 +[ Thu Sep 15 21:56:20 2022 ] Batch(107/162) done. Loss: 0.8137 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 21:56:59 2022 ] Eval epoch: 17 +[ Thu Sep 15 21:58:50 2022 ] Mean test loss of 930 batches: 2.7564895153045654. +[ Thu Sep 15 21:58:50 2022 ] Top1: 38.92% +[ Thu Sep 15 21:58:51 2022 ] Top5: 71.24% +[ Thu Sep 15 21:58:51 2022 ] Training epoch: 18 +[ Thu Sep 15 21:59:27 2022 ] Batch(45/162) done. Loss: 0.7027 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 22:00:39 2022 ] Batch(145/162) done. Loss: 0.6394 lr:0.100000 network_time: 0.0250 +[ Thu Sep 15 22:00:51 2022 ] Eval epoch: 18 +[ Thu Sep 15 22:02:41 2022 ] Mean test loss of 930 batches: 2.665600299835205. +[ Thu Sep 15 22:02:41 2022 ] Top1: 41.95% +[ Thu Sep 15 22:02:42 2022 ] Top5: 72.42% +[ Thu Sep 15 22:02:42 2022 ] Training epoch: 19 +[ Thu Sep 15 22:03:46 2022 ] Batch(83/162) done. Loss: 0.4636 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:04:43 2022 ] Eval epoch: 19 +[ Thu Sep 15 22:06:32 2022 ] Mean test loss of 930 batches: 2.7386324405670166. +[ Thu Sep 15 22:06:33 2022 ] Top1: 41.85% +[ Thu Sep 15 22:06:33 2022 ] Top5: 72.58% +[ Thu Sep 15 22:06:33 2022 ] Training epoch: 20 +[ Thu Sep 15 22:06:53 2022 ] Batch(21/162) done. Loss: 0.6220 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 22:08:06 2022 ] Batch(121/162) done. Loss: 0.8370 lr:0.100000 network_time: 0.0304 +[ Thu Sep 15 22:08:35 2022 ] Eval epoch: 20 +[ Thu Sep 15 22:10:24 2022 ] Mean test loss of 930 batches: 3.217198610305786. +[ Thu Sep 15 22:10:25 2022 ] Top1: 39.58% +[ Thu Sep 15 22:10:25 2022 ] Top5: 71.58% +[ Thu Sep 15 22:10:25 2022 ] Training epoch: 21 +[ Thu Sep 15 22:11:12 2022 ] Batch(59/162) done. Loss: 0.5685 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 22:12:26 2022 ] Batch(159/162) done. Loss: 0.4968 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 22:12:28 2022 ] Eval epoch: 21 +[ Thu Sep 15 22:14:18 2022 ] Mean test loss of 930 batches: 2.607473850250244. +[ Thu Sep 15 22:14:18 2022 ] Top1: 45.13% +[ Thu Sep 15 22:14:19 2022 ] Top5: 74.89% +[ Thu Sep 15 22:14:19 2022 ] Training epoch: 22 +[ Thu Sep 15 22:15:34 2022 ] Batch(97/162) done. Loss: 0.6277 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 22:16:21 2022 ] Eval epoch: 22 +[ Thu Sep 15 22:18:10 2022 ] Mean test loss of 930 batches: 3.202122688293457. +[ Thu Sep 15 22:18:11 2022 ] Top1: 41.16% +[ Thu Sep 15 22:18:11 2022 ] Top5: 70.16% +[ Thu Sep 15 22:18:12 2022 ] Training epoch: 23 +[ Thu Sep 15 22:18:41 2022 ] Batch(35/162) done. Loss: 0.4671 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 22:19:53 2022 ] Batch(135/162) done. Loss: 0.2605 lr:0.100000 network_time: 0.0295 +[ Thu Sep 15 22:20:12 2022 ] Eval epoch: 23 +[ Thu Sep 15 22:22:01 2022 ] Mean test loss of 930 batches: 2.9939019680023193. +[ Thu Sep 15 22:22:02 2022 ] Top1: 39.67% +[ Thu Sep 15 22:22:02 2022 ] Top5: 71.05% +[ Thu Sep 15 22:22:03 2022 ] Training epoch: 24 +[ Thu Sep 15 22:23:00 2022 ] Batch(73/162) done. Loss: 0.3403 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 22:24:04 2022 ] Eval epoch: 24 +[ Thu Sep 15 22:25:54 2022 ] Mean test loss of 930 batches: 2.6512110233306885. +[ Thu Sep 15 22:25:55 2022 ] Top1: 43.70% +[ Thu Sep 15 22:25:55 2022 ] Top5: 74.94% +[ Thu Sep 15 22:25:56 2022 ] Training epoch: 25 +[ Thu Sep 15 22:26:08 2022 ] Batch(11/162) done. Loss: 0.3482 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 22:27:20 2022 ] Batch(111/162) done. Loss: 0.5646 lr:0.100000 network_time: 0.0248 +[ Thu Sep 15 22:27:57 2022 ] Eval epoch: 25 +[ Thu Sep 15 22:29:47 2022 ] Mean test loss of 930 batches: 2.80203914642334. +[ Thu Sep 15 22:29:48 2022 ] Top1: 43.78% +[ Thu Sep 15 22:29:48 2022 ] Top5: 75.42% +[ Thu Sep 15 22:29:48 2022 ] Training epoch: 26 +[ Thu Sep 15 22:30:28 2022 ] Batch(49/162) done. Loss: 0.4110 lr:0.100000 network_time: 0.0359 +[ Thu Sep 15 22:31:41 2022 ] Batch(149/162) done. Loss: 0.6011 lr:0.100000 network_time: 0.0454 +[ Thu Sep 15 22:31:50 2022 ] Eval epoch: 26 +[ Thu Sep 15 22:33:40 2022 ] Mean test loss of 930 batches: 3.0699634552001953. +[ Thu Sep 15 22:33:40 2022 ] Top1: 42.35% +[ Thu Sep 15 22:33:41 2022 ] Top5: 72.71% +[ Thu Sep 15 22:33:41 2022 ] Training epoch: 27 +[ Thu Sep 15 22:34:49 2022 ] Batch(87/162) done. Loss: 0.3504 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:35:43 2022 ] Eval epoch: 27 +[ Thu Sep 15 22:37:33 2022 ] Mean test loss of 930 batches: 3.0952885150909424. +[ Thu Sep 15 22:37:34 2022 ] Top1: 42.13% +[ Thu Sep 15 22:37:34 2022 ] Top5: 71.48% +[ Thu Sep 15 22:37:35 2022 ] Training epoch: 28 +[ Thu Sep 15 22:37:57 2022 ] Batch(25/162) done. Loss: 0.2971 lr:0.100000 network_time: 0.0287 +[ Thu Sep 15 22:39:10 2022 ] Batch(125/162) done. Loss: 0.4361 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 22:39:36 2022 ] Eval epoch: 28 +[ Thu Sep 15 22:41:27 2022 ] Mean test loss of 930 batches: 2.9900245666503906. +[ Thu Sep 15 22:41:27 2022 ] Top1: 43.13% +[ Thu Sep 15 22:41:28 2022 ] Top5: 73.28% +[ Thu Sep 15 22:41:28 2022 ] Training epoch: 29 +[ Thu Sep 15 22:42:18 2022 ] Batch(63/162) done. Loss: 0.3050 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 22:43:30 2022 ] Eval epoch: 29 +[ Thu Sep 15 22:45:21 2022 ] Mean test loss of 930 batches: 2.6688146591186523. +[ Thu Sep 15 22:45:21 2022 ] Top1: 47.25% +[ Thu Sep 15 22:45:22 2022 ] Top5: 76.30% +[ Thu Sep 15 22:45:22 2022 ] Training epoch: 30 +[ Thu Sep 15 22:45:26 2022 ] Batch(1/162) done. Loss: 0.2491 lr:0.100000 network_time: 0.0287 +[ Thu Sep 15 22:46:39 2022 ] Batch(101/162) done. Loss: 0.2432 lr:0.100000 network_time: 0.0323 +[ Thu Sep 15 22:47:23 2022 ] Eval epoch: 30 +[ Thu Sep 15 22:49:12 2022 ] Mean test loss of 930 batches: 2.776120901107788. +[ Thu Sep 15 22:49:12 2022 ] Top1: 44.79% +[ Thu Sep 15 22:49:13 2022 ] Top5: 74.90% +[ Thu Sep 15 22:49:13 2022 ] Training epoch: 31 +[ Thu Sep 15 22:49:45 2022 ] Batch(39/162) done. Loss: 0.3644 lr:0.100000 network_time: 0.0311 +[ Thu Sep 15 22:50:57 2022 ] Batch(139/162) done. Loss: 0.6405 lr:0.100000 network_time: 0.0285 +[ Thu Sep 15 22:51:14 2022 ] Eval epoch: 31 +[ Thu Sep 15 22:53:03 2022 ] Mean test loss of 930 batches: 2.716003179550171. +[ Thu Sep 15 22:53:04 2022 ] Top1: 44.46% +[ Thu Sep 15 22:53:04 2022 ] Top5: 74.83% +[ Thu Sep 15 22:53:05 2022 ] Training epoch: 32 +[ Thu Sep 15 22:54:04 2022 ] Batch(77/162) done. Loss: 0.2649 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 22:55:05 2022 ] Eval epoch: 32 +[ Thu Sep 15 22:56:56 2022 ] Mean test loss of 930 batches: 2.863656997680664. +[ Thu Sep 15 22:56:56 2022 ] Top1: 45.39% +[ Thu Sep 15 22:56:57 2022 ] Top5: 74.88% +[ Thu Sep 15 22:56:57 2022 ] Training epoch: 33 +[ Thu Sep 15 22:57:12 2022 ] Batch(15/162) done. Loss: 0.3665 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 22:58:24 2022 ] Batch(115/162) done. Loss: 0.7653 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:58:58 2022 ] Eval epoch: 33 +[ Thu Sep 15 23:00:47 2022 ] Mean test loss of 930 batches: 2.677206039428711. +[ Thu Sep 15 23:00:48 2022 ] Top1: 45.82% +[ Thu Sep 15 23:00:48 2022 ] Top5: 75.90% +[ Thu Sep 15 23:00:48 2022 ] Training epoch: 34 +[ Thu Sep 15 23:01:30 2022 ] Batch(53/162) done. Loss: 0.3284 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:02:43 2022 ] Batch(153/162) done. Loss: 0.4470 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 23:02:49 2022 ] Eval epoch: 34 +[ Thu Sep 15 23:04:38 2022 ] Mean test loss of 930 batches: 2.884477138519287. +[ Thu Sep 15 23:04:38 2022 ] Top1: 43.68% +[ Thu Sep 15 23:04:39 2022 ] Top5: 74.68% +[ Thu Sep 15 23:04:39 2022 ] Training epoch: 35 +[ Thu Sep 15 23:05:48 2022 ] Batch(91/162) done. Loss: 0.4457 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 23:06:39 2022 ] Eval epoch: 35 +[ Thu Sep 15 23:08:29 2022 ] Mean test loss of 930 batches: 3.4506053924560547. +[ Thu Sep 15 23:08:29 2022 ] Top1: 42.19% +[ Thu Sep 15 23:08:30 2022 ] Top5: 71.98% +[ Thu Sep 15 23:08:30 2022 ] Training epoch: 36 +[ Thu Sep 15 23:08:54 2022 ] Batch(29/162) done. Loss: 0.2119 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 23:10:07 2022 ] Batch(129/162) done. Loss: 0.4515 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 23:10:31 2022 ] Eval epoch: 36 +[ Thu Sep 15 23:12:20 2022 ] Mean test loss of 930 batches: 2.7693991661071777. +[ Thu Sep 15 23:12:20 2022 ] Top1: 45.62% +[ Thu Sep 15 23:12:20 2022 ] Top5: 76.00% +[ Thu Sep 15 23:12:21 2022 ] Training epoch: 37 +[ Thu Sep 15 23:13:13 2022 ] Batch(67/162) done. Loss: 0.2028 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 23:14:22 2022 ] Eval epoch: 37 +[ Thu Sep 15 23:16:11 2022 ] Mean test loss of 930 batches: 3.095299005508423. +[ Thu Sep 15 23:16:12 2022 ] Top1: 42.58% +[ Thu Sep 15 23:16:12 2022 ] Top5: 73.84% +[ Thu Sep 15 23:16:12 2022 ] Training epoch: 38 +[ Thu Sep 15 23:16:20 2022 ] Batch(5/162) done. Loss: 0.4349 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 23:17:32 2022 ] Batch(105/162) done. Loss: 0.2566 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:18:13 2022 ] Eval epoch: 38 +[ Thu Sep 15 23:20:03 2022 ] Mean test loss of 930 batches: 3.1413280963897705. +[ Thu Sep 15 23:20:04 2022 ] Top1: 43.90% +[ Thu Sep 15 23:20:04 2022 ] Top5: 74.12% +[ Thu Sep 15 23:20:04 2022 ] Training epoch: 39 +[ Thu Sep 15 23:20:39 2022 ] Batch(43/162) done. Loss: 0.2047 lr:0.100000 network_time: 0.0348 +[ Thu Sep 15 23:21:52 2022 ] Batch(143/162) done. Loss: 0.3025 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 23:22:05 2022 ] Eval epoch: 39 +[ Thu Sep 15 23:23:54 2022 ] Mean test loss of 930 batches: 3.481278419494629. +[ Thu Sep 15 23:23:54 2022 ] Top1: 41.59% +[ Thu Sep 15 23:23:55 2022 ] Top5: 72.09% +[ Thu Sep 15 23:23:55 2022 ] Training epoch: 40 +[ Thu Sep 15 23:24:57 2022 ] Batch(81/162) done. Loss: 0.3147 lr:0.100000 network_time: 0.0350 +[ Thu Sep 15 23:25:56 2022 ] Eval epoch: 40 +[ Thu Sep 15 23:27:45 2022 ] Mean test loss of 930 batches: 2.9946095943450928. +[ Thu Sep 15 23:27:46 2022 ] Top1: 44.16% +[ Thu Sep 15 23:27:46 2022 ] Top5: 74.71% +[ Thu Sep 15 23:27:46 2022 ] Training epoch: 41 +[ Thu Sep 15 23:28:03 2022 ] Batch(19/162) done. Loss: 0.3987 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 23:29:16 2022 ] Batch(119/162) done. Loss: 0.4457 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 23:29:47 2022 ] Eval epoch: 41 +[ Thu Sep 15 23:31:36 2022 ] Mean test loss of 930 batches: 3.4053616523742676. +[ Thu Sep 15 23:31:37 2022 ] Top1: 43.50% +[ Thu Sep 15 23:31:37 2022 ] Top5: 73.65% +[ Thu Sep 15 23:31:38 2022 ] Training epoch: 42 +[ Thu Sep 15 23:32:22 2022 ] Batch(57/162) done. Loss: 0.2492 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:33:35 2022 ] Batch(157/162) done. Loss: 0.4862 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 23:33:38 2022 ] Eval epoch: 42 +[ Thu Sep 15 23:35:28 2022 ] Mean test loss of 930 batches: 2.9815728664398193. +[ Thu Sep 15 23:35:28 2022 ] Top1: 46.10% +[ Thu Sep 15 23:35:28 2022 ] Top5: 74.05% +[ Thu Sep 15 23:35:29 2022 ] Training epoch: 43 +[ Thu Sep 15 23:36:41 2022 ] Batch(95/162) done. Loss: 0.4019 lr:0.100000 network_time: 0.0485 +[ Thu Sep 15 23:37:29 2022 ] Eval epoch: 43 +[ Thu Sep 15 23:39:19 2022 ] Mean test loss of 930 batches: 2.9869003295898438. +[ Thu Sep 15 23:39:20 2022 ] Top1: 46.54% +[ Thu Sep 15 23:39:20 2022 ] Top5: 77.24% +[ Thu Sep 15 23:39:20 2022 ] Training epoch: 44 +[ Thu Sep 15 23:39:48 2022 ] Batch(33/162) done. Loss: 0.2617 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 23:41:00 2022 ] Batch(133/162) done. Loss: 0.3671 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:41:21 2022 ] Eval epoch: 44 +[ Thu Sep 15 23:43:10 2022 ] Mean test loss of 930 batches: 3.2402682304382324. +[ Thu Sep 15 23:43:11 2022 ] Top1: 41.60% +[ Thu Sep 15 23:43:11 2022 ] Top5: 72.04% +[ Thu Sep 15 23:43:11 2022 ] Training epoch: 45 +[ Thu Sep 15 23:44:06 2022 ] Batch(71/162) done. Loss: 0.3470 lr:0.100000 network_time: 0.0251 +[ Thu Sep 15 23:45:12 2022 ] Eval epoch: 45 +[ Thu Sep 15 23:47:01 2022 ] Mean test loss of 930 batches: 2.804935932159424. +[ Thu Sep 15 23:47:01 2022 ] Top1: 46.84% +[ Thu Sep 15 23:47:02 2022 ] Top5: 76.32% +[ Thu Sep 15 23:47:02 2022 ] Training epoch: 46 +[ Thu Sep 15 23:47:12 2022 ] Batch(9/162) done. Loss: 0.1822 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 23:48:24 2022 ] Batch(109/162) done. Loss: 0.1606 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 23:49:02 2022 ] Eval epoch: 46 +[ Thu Sep 15 23:50:53 2022 ] Mean test loss of 930 batches: 2.79233980178833. +[ Thu Sep 15 23:50:53 2022 ] Top1: 46.51% +[ Thu Sep 15 23:50:54 2022 ] Top5: 77.12% +[ Thu Sep 15 23:50:54 2022 ] Training epoch: 47 +[ Thu Sep 15 23:51:32 2022 ] Batch(47/162) done. Loss: 0.2231 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 23:52:45 2022 ] Batch(147/162) done. Loss: 0.2199 lr:0.100000 network_time: 0.0330 +[ Thu Sep 15 23:52:55 2022 ] Eval epoch: 47 +[ Thu Sep 15 23:54:45 2022 ] Mean test loss of 930 batches: 2.7845840454101562. +[ Thu Sep 15 23:54:45 2022 ] Top1: 46.73% +[ Thu Sep 15 23:54:46 2022 ] Top5: 76.58% +[ Thu Sep 15 23:54:46 2022 ] Training epoch: 48 +[ Thu Sep 15 23:55:51 2022 ] Batch(85/162) done. Loss: 0.1455 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 23:56:47 2022 ] Eval epoch: 48 +[ Thu Sep 15 23:58:36 2022 ] Mean test loss of 930 batches: 2.9818427562713623. +[ Thu Sep 15 23:58:37 2022 ] Top1: 44.06% +[ Thu Sep 15 23:58:37 2022 ] Top5: 74.52% +[ Thu Sep 15 23:58:37 2022 ] Training epoch: 49 +[ Thu Sep 15 23:58:58 2022 ] Batch(23/162) done. Loss: 0.1679 lr:0.100000 network_time: 0.0315 +[ Fri Sep 16 00:00:10 2022 ] Batch(123/162) done. Loss: 0.2418 lr:0.100000 network_time: 0.0252 +[ Fri Sep 16 00:00:38 2022 ] Eval epoch: 49 +[ Fri Sep 16 00:02:28 2022 ] Mean test loss of 930 batches: 3.2953529357910156. +[ Fri Sep 16 00:02:28 2022 ] Top1: 44.71% +[ Fri Sep 16 00:02:28 2022 ] Top5: 73.97% +[ Fri Sep 16 00:02:29 2022 ] Training epoch: 50 +[ Fri Sep 16 00:03:17 2022 ] Batch(61/162) done. Loss: 0.2752 lr:0.100000 network_time: 0.0311 +[ Fri Sep 16 00:04:29 2022 ] Batch(161/162) done. Loss: 0.2480 lr:0.100000 network_time: 0.0275 +[ Fri Sep 16 00:04:29 2022 ] Eval epoch: 50 +[ Fri Sep 16 00:06:19 2022 ] Mean test loss of 930 batches: 3.178257942199707. +[ Fri Sep 16 00:06:20 2022 ] Top1: 46.33% +[ Fri Sep 16 00:06:20 2022 ] Top5: 75.49% +[ Fri Sep 16 00:06:21 2022 ] Training epoch: 51 +[ Fri Sep 16 00:07:36 2022 ] Batch(99/162) done. Loss: 0.2603 lr:0.100000 network_time: 0.0318 +[ Fri Sep 16 00:08:21 2022 ] Eval epoch: 51 +[ Fri Sep 16 00:10:11 2022 ] Mean test loss of 930 batches: 2.8996267318725586. +[ Fri Sep 16 00:10:11 2022 ] Top1: 45.21% +[ Fri Sep 16 00:10:12 2022 ] Top5: 75.58% +[ Fri Sep 16 00:10:12 2022 ] Training epoch: 52 +[ Fri Sep 16 00:10:43 2022 ] Batch(37/162) done. Loss: 0.2263 lr:0.100000 network_time: 0.0314 +[ Fri Sep 16 00:11:55 2022 ] Batch(137/162) done. Loss: 0.2389 lr:0.100000 network_time: 0.0305 +[ Fri Sep 16 00:12:13 2022 ] Eval epoch: 52 +[ Fri Sep 16 00:14:03 2022 ] Mean test loss of 930 batches: 3.1332056522369385. +[ Fri Sep 16 00:14:04 2022 ] Top1: 43.39% +[ Fri Sep 16 00:14:04 2022 ] Top5: 74.20% +[ Fri Sep 16 00:14:05 2022 ] Training epoch: 53 +[ Fri Sep 16 00:15:02 2022 ] Batch(75/162) done. Loss: 0.2281 lr:0.100000 network_time: 0.0278 +[ Fri Sep 16 00:16:05 2022 ] Eval epoch: 53 +[ Fri Sep 16 00:17:55 2022 ] Mean test loss of 930 batches: 3.1676747798919678. +[ Fri Sep 16 00:17:55 2022 ] Top1: 45.65% +[ Fri Sep 16 00:17:56 2022 ] Top5: 74.51% +[ Fri Sep 16 00:17:56 2022 ] Training epoch: 54 +[ Fri Sep 16 00:18:09 2022 ] Batch(13/162) done. Loss: 0.2242 lr:0.100000 network_time: 0.0284 +[ Fri Sep 16 00:19:21 2022 ] Batch(113/162) done. Loss: 0.3021 lr:0.100000 network_time: 0.0271 +[ Fri Sep 16 00:19:56 2022 ] Eval epoch: 54 +[ Fri Sep 16 00:21:46 2022 ] Mean test loss of 930 batches: 2.7405104637145996. +[ Fri Sep 16 00:21:47 2022 ] Top1: 47.49% +[ Fri Sep 16 00:21:47 2022 ] Top5: 77.36% +[ Fri Sep 16 00:21:47 2022 ] Training epoch: 55 +[ Fri Sep 16 00:22:28 2022 ] Batch(51/162) done. Loss: 0.0692 lr:0.100000 network_time: 0.0263 +[ Fri Sep 16 00:23:40 2022 ] Batch(151/162) done. Loss: 0.1702 lr:0.100000 network_time: 0.0274 +[ Fri Sep 16 00:23:47 2022 ] Eval epoch: 55 +[ Fri Sep 16 00:25:37 2022 ] Mean test loss of 930 batches: 2.920945167541504. +[ Fri Sep 16 00:25:38 2022 ] Top1: 45.22% +[ Fri Sep 16 00:25:38 2022 ] Top5: 74.90% +[ Fri Sep 16 00:25:38 2022 ] Training epoch: 56 +[ Fri Sep 16 00:26:46 2022 ] Batch(89/162) done. Loss: 0.2224 lr:0.100000 network_time: 0.0254 +[ Fri Sep 16 00:27:39 2022 ] Eval epoch: 56 +[ Fri Sep 16 00:29:29 2022 ] Mean test loss of 930 batches: 2.671355724334717. +[ Fri Sep 16 00:29:29 2022 ] Top1: 47.52% +[ Fri Sep 16 00:29:30 2022 ] Top5: 77.24% +[ Fri Sep 16 00:29:30 2022 ] Training epoch: 57 +[ Fri Sep 16 00:29:53 2022 ] Batch(27/162) done. Loss: 0.2738 lr:0.100000 network_time: 0.0271 +[ Fri Sep 16 00:31:05 2022 ] Batch(127/162) done. Loss: 0.2604 lr:0.100000 network_time: 0.0356 +[ Fri Sep 16 00:31:30 2022 ] Eval epoch: 57 +[ Fri Sep 16 00:33:20 2022 ] Mean test loss of 930 batches: 2.9581689834594727. +[ Fri Sep 16 00:33:20 2022 ] Top1: 46.12% +[ Fri Sep 16 00:33:21 2022 ] Top5: 75.99% +[ Fri Sep 16 00:33:21 2022 ] Training epoch: 58 +[ Fri Sep 16 00:34:11 2022 ] Batch(65/162) done. Loss: 0.1503 lr:0.100000 network_time: 0.0274 +[ Fri Sep 16 00:35:21 2022 ] Eval epoch: 58 +[ Fri Sep 16 00:37:11 2022 ] Mean test loss of 930 batches: 2.9120888710021973. +[ Fri Sep 16 00:37:11 2022 ] Top1: 47.65% +[ Fri Sep 16 00:37:12 2022 ] Top5: 77.31% +[ Fri Sep 16 00:37:12 2022 ] Training epoch: 59 +[ Fri Sep 16 00:37:17 2022 ] Batch(3/162) done. Loss: 0.2663 lr:0.100000 network_time: 0.0256 +[ Fri Sep 16 00:38:30 2022 ] Batch(103/162) done. Loss: 0.1600 lr:0.100000 network_time: 0.0275 +[ Fri Sep 16 00:39:12 2022 ] Eval epoch: 59 +[ Fri Sep 16 00:41:02 2022 ] Mean test loss of 930 batches: 2.8816802501678467. +[ Fri Sep 16 00:41:02 2022 ] Top1: 46.62% +[ Fri Sep 16 00:41:03 2022 ] Top5: 75.71% +[ Fri Sep 16 00:41:03 2022 ] Training epoch: 60 +[ Fri Sep 16 00:41:36 2022 ] Batch(41/162) done. Loss: 0.3349 lr:0.100000 network_time: 0.0259 +[ Fri Sep 16 00:42:49 2022 ] Batch(141/162) done. Loss: 0.2002 lr:0.100000 network_time: 0.0256 +[ Fri Sep 16 00:43:04 2022 ] Eval epoch: 60 +[ Fri Sep 16 00:44:53 2022 ] Mean test loss of 930 batches: 3.1312150955200195. +[ Fri Sep 16 00:44:54 2022 ] Top1: 46.17% +[ Fri Sep 16 00:44:54 2022 ] Top5: 74.38% +[ Fri Sep 16 00:44:54 2022 ] Training epoch: 61 +[ Fri Sep 16 00:45:55 2022 ] Batch(79/162) done. Loss: 0.1466 lr:0.010000 network_time: 0.0264 +[ Fri Sep 16 00:46:55 2022 ] Eval epoch: 61 +[ Fri Sep 16 00:48:45 2022 ] Mean test loss of 930 batches: 2.4748167991638184. +[ Fri Sep 16 00:48:45 2022 ] Top1: 53.49% +[ Fri Sep 16 00:48:46 2022 ] Top5: 80.49% +[ Fri Sep 16 00:48:46 2022 ] Training epoch: 62 +[ Fri Sep 16 00:49:01 2022 ] Batch(17/162) done. Loss: 0.0275 lr:0.010000 network_time: 0.0258 +[ Fri Sep 16 00:50:14 2022 ] Batch(117/162) done. Loss: 0.0287 lr:0.010000 network_time: 0.0290 +[ Fri Sep 16 00:50:46 2022 ] Eval epoch: 62 +[ Fri Sep 16 00:52:36 2022 ] Mean test loss of 930 batches: 2.490679979324341. +[ Fri Sep 16 00:52:36 2022 ] Top1: 53.89% +[ Fri Sep 16 00:52:37 2022 ] Top5: 80.59% +[ Fri Sep 16 00:52:37 2022 ] Training epoch: 63 +[ Fri Sep 16 00:53:20 2022 ] Batch(55/162) done. Loss: 0.0312 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 00:54:33 2022 ] Batch(155/162) done. Loss: 0.0125 lr:0.010000 network_time: 0.0260 +[ Fri Sep 16 00:54:37 2022 ] Eval epoch: 63 +[ Fri Sep 16 00:56:27 2022 ] Mean test loss of 930 batches: 2.4980976581573486. +[ Fri Sep 16 00:56:27 2022 ] Top1: 54.03% +[ Fri Sep 16 00:56:28 2022 ] Top5: 80.86% +[ Fri Sep 16 00:56:28 2022 ] Training epoch: 64 +[ Fri Sep 16 00:57:39 2022 ] Batch(93/162) done. Loss: 0.0411 lr:0.010000 network_time: 0.0260 +[ Fri Sep 16 00:58:29 2022 ] Eval epoch: 64 +[ Fri Sep 16 01:00:18 2022 ] Mean test loss of 930 batches: 2.519536018371582. +[ Fri Sep 16 01:00:18 2022 ] Top1: 54.18% +[ Fri Sep 16 01:00:19 2022 ] Top5: 80.82% +[ Fri Sep 16 01:00:19 2022 ] Training epoch: 65 +[ Fri Sep 16 01:00:45 2022 ] Batch(31/162) done. Loss: 0.0299 lr:0.010000 network_time: 0.0276 +[ Fri Sep 16 01:01:58 2022 ] Batch(131/162) done. Loss: 0.0315 lr:0.010000 network_time: 0.0299 +[ Fri Sep 16 01:02:20 2022 ] Eval epoch: 65 +[ Fri Sep 16 01:04:09 2022 ] Mean test loss of 930 batches: 2.5533487796783447. +[ Fri Sep 16 01:04:09 2022 ] Top1: 53.81% +[ Fri Sep 16 01:04:10 2022 ] Top5: 80.72% +[ Fri Sep 16 01:04:10 2022 ] Training epoch: 66 +[ Fri Sep 16 01:05:04 2022 ] Batch(69/162) done. Loss: 0.0256 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:06:11 2022 ] Eval epoch: 66 +[ Fri Sep 16 01:08:01 2022 ] Mean test loss of 930 batches: 2.5415453910827637. +[ Fri Sep 16 01:08:01 2022 ] Top1: 54.21% +[ Fri Sep 16 01:08:01 2022 ] Top5: 80.87% +[ Fri Sep 16 01:08:02 2022 ] Training epoch: 67 +[ Fri Sep 16 01:08:11 2022 ] Batch(7/162) done. Loss: 0.0065 lr:0.010000 network_time: 0.0315 +[ Fri Sep 16 01:09:23 2022 ] Batch(107/162) done. Loss: 0.0125 lr:0.010000 network_time: 0.0663 +[ Fri Sep 16 01:10:03 2022 ] Eval epoch: 67 +[ Fri Sep 16 01:11:52 2022 ] Mean test loss of 930 batches: 2.536372661590576. +[ Fri Sep 16 01:11:53 2022 ] Top1: 54.25% +[ Fri Sep 16 01:11:53 2022 ] Top5: 80.92% +[ Fri Sep 16 01:11:53 2022 ] Training epoch: 68 +[ Fri Sep 16 01:12:29 2022 ] Batch(45/162) done. Loss: 0.0395 lr:0.010000 network_time: 0.0299 +[ Fri Sep 16 01:13:42 2022 ] Batch(145/162) done. Loss: 0.0242 lr:0.010000 network_time: 0.0269 +[ Fri Sep 16 01:13:54 2022 ] Eval epoch: 68 +[ Fri Sep 16 01:15:43 2022 ] Mean test loss of 930 batches: 2.539608955383301. +[ Fri Sep 16 01:15:43 2022 ] Top1: 54.20% +[ Fri Sep 16 01:15:44 2022 ] Top5: 81.04% +[ Fri Sep 16 01:15:44 2022 ] Training epoch: 69 +[ Fri Sep 16 01:16:48 2022 ] Batch(83/162) done. Loss: 0.0367 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:17:44 2022 ] Eval epoch: 69 +[ Fri Sep 16 01:19:34 2022 ] Mean test loss of 930 batches: 2.5502073764801025. +[ Fri Sep 16 01:19:35 2022 ] Top1: 54.33% +[ Fri Sep 16 01:19:35 2022 ] Top5: 80.96% +[ Fri Sep 16 01:19:35 2022 ] Training epoch: 70 +[ Fri Sep 16 01:19:55 2022 ] Batch(21/162) done. Loss: 0.0162 lr:0.010000 network_time: 0.0308 +[ Fri Sep 16 01:21:07 2022 ] Batch(121/162) done. Loss: 0.0184 lr:0.010000 network_time: 0.0257 +[ Fri Sep 16 01:21:36 2022 ] Eval epoch: 70 +[ Fri Sep 16 01:23:26 2022 ] Mean test loss of 930 batches: 2.5347869396209717. +[ Fri Sep 16 01:23:27 2022 ] Top1: 54.52% +[ Fri Sep 16 01:23:27 2022 ] Top5: 81.08% +[ Fri Sep 16 01:23:27 2022 ] Training epoch: 71 +[ Fri Sep 16 01:24:14 2022 ] Batch(59/162) done. Loss: 0.0151 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 01:25:26 2022 ] Batch(159/162) done. Loss: 0.0078 lr:0.010000 network_time: 0.0268 +[ Fri Sep 16 01:25:28 2022 ] Eval epoch: 71 +[ Fri Sep 16 01:27:18 2022 ] Mean test loss of 930 batches: 2.5435900688171387. +[ Fri Sep 16 01:27:18 2022 ] Top1: 54.45% +[ Fri Sep 16 01:27:18 2022 ] Top5: 81.15% +[ Fri Sep 16 01:27:19 2022 ] Training epoch: 72 +[ Fri Sep 16 01:28:33 2022 ] Batch(97/162) done. Loss: 0.0244 lr:0.010000 network_time: 0.0289 +[ Fri Sep 16 01:29:19 2022 ] Eval epoch: 72 +[ Fri Sep 16 01:31:09 2022 ] Mean test loss of 930 batches: 2.6118814945220947. +[ Fri Sep 16 01:31:09 2022 ] Top1: 54.16% +[ Fri Sep 16 01:31:10 2022 ] Top5: 80.86% +[ Fri Sep 16 01:31:10 2022 ] Training epoch: 73 +[ Fri Sep 16 01:31:39 2022 ] Batch(35/162) done. Loss: 0.0033 lr:0.010000 network_time: 0.0262 +[ Fri Sep 16 01:32:52 2022 ] Batch(135/162) done. Loss: 0.0129 lr:0.010000 network_time: 0.0261 +[ Fri Sep 16 01:33:11 2022 ] Eval epoch: 73 +[ Fri Sep 16 01:35:00 2022 ] Mean test loss of 930 batches: 2.569013833999634. +[ Fri Sep 16 01:35:01 2022 ] Top1: 54.44% +[ Fri Sep 16 01:35:01 2022 ] Top5: 81.23% +[ Fri Sep 16 01:35:01 2022 ] Training epoch: 74 +[ Fri Sep 16 01:35:58 2022 ] Batch(73/162) done. Loss: 0.0043 lr:0.010000 network_time: 0.0440 +[ Fri Sep 16 01:37:02 2022 ] Eval epoch: 74 +[ Fri Sep 16 01:38:51 2022 ] Mean test loss of 930 batches: 2.572516679763794. +[ Fri Sep 16 01:38:52 2022 ] Top1: 54.38% +[ Fri Sep 16 01:38:52 2022 ] Top5: 81.14% +[ Fri Sep 16 01:38:52 2022 ] Training epoch: 75 +[ Fri Sep 16 01:39:04 2022 ] Batch(11/162) done. Loss: 0.0047 lr:0.010000 network_time: 0.0298 +[ Fri Sep 16 01:40:16 2022 ] Batch(111/162) done. Loss: 0.0327 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:40:53 2022 ] Eval epoch: 75 +[ Fri Sep 16 01:42:43 2022 ] Mean test loss of 930 batches: 2.5794084072113037. +[ Fri Sep 16 01:42:43 2022 ] Top1: 54.24% +[ Fri Sep 16 01:42:43 2022 ] Top5: 80.90% +[ Fri Sep 16 01:42:44 2022 ] Training epoch: 76 +[ Fri Sep 16 01:43:23 2022 ] Batch(49/162) done. Loss: 0.0127 lr:0.010000 network_time: 0.0267 +[ Fri Sep 16 01:44:35 2022 ] Batch(149/162) done. Loss: 0.0227 lr:0.010000 network_time: 0.0269 +[ Fri Sep 16 01:44:44 2022 ] Eval epoch: 76 +[ Fri Sep 16 01:46:34 2022 ] Mean test loss of 930 batches: 2.6028902530670166. +[ Fri Sep 16 01:46:34 2022 ] Top1: 54.26% +[ Fri Sep 16 01:46:35 2022 ] Top5: 80.83% +[ Fri Sep 16 01:46:35 2022 ] Training epoch: 77 +[ Fri Sep 16 01:47:42 2022 ] Batch(87/162) done. Loss: 0.0140 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:48:36 2022 ] Eval epoch: 77 +[ Fri Sep 16 01:50:25 2022 ] Mean test loss of 930 batches: 2.587498903274536. +[ Fri Sep 16 01:50:25 2022 ] Top1: 54.37% +[ Fri Sep 16 01:50:26 2022 ] Top5: 80.98% +[ Fri Sep 16 01:50:26 2022 ] Training epoch: 78 +[ Fri Sep 16 01:50:48 2022 ] Batch(25/162) done. Loss: 0.0059 lr:0.010000 network_time: 0.0265 +[ Fri Sep 16 01:52:00 2022 ] Batch(125/162) done. Loss: 0.0070 lr:0.010000 network_time: 0.0260 +[ Fri Sep 16 01:52:27 2022 ] Eval epoch: 78 +[ Fri Sep 16 01:54:17 2022 ] Mean test loss of 930 batches: 2.579465866088867. +[ Fri Sep 16 01:54:17 2022 ] Top1: 54.72% +[ Fri Sep 16 01:54:17 2022 ] Top5: 81.03% +[ Fri Sep 16 01:54:18 2022 ] Training epoch: 79 +[ Fri Sep 16 01:55:07 2022 ] Batch(63/162) done. Loss: 0.0047 lr:0.010000 network_time: 0.0279 +[ Fri Sep 16 01:56:18 2022 ] Eval epoch: 79 +[ Fri Sep 16 01:58:08 2022 ] Mean test loss of 930 batches: 2.603970527648926. +[ Fri Sep 16 01:58:08 2022 ] Top1: 54.30% +[ Fri Sep 16 01:58:09 2022 ] Top5: 80.87% +[ Fri Sep 16 01:58:09 2022 ] Training epoch: 80 +[ Fri Sep 16 01:58:13 2022 ] Batch(1/162) done. Loss: 0.0055 lr:0.010000 network_time: 0.0293 +[ Fri Sep 16 01:59:26 2022 ] Batch(101/162) done. Loss: 0.0043 lr:0.010000 network_time: 0.0263 +[ Fri Sep 16 02:00:10 2022 ] Eval epoch: 80 +[ Fri Sep 16 02:01:59 2022 ] Mean test loss of 930 batches: 2.5931570529937744. +[ Fri Sep 16 02:01:59 2022 ] Top1: 54.43% +[ Fri Sep 16 02:02:00 2022 ] Top5: 81.05% +[ Fri Sep 16 02:02:00 2022 ] Training epoch: 81 +[ Fri Sep 16 02:02:32 2022 ] Batch(39/162) done. Loss: 0.0105 lr:0.001000 network_time: 0.0305 +[ Fri Sep 16 02:03:44 2022 ] Batch(139/162) done. Loss: 0.0017 lr:0.001000 network_time: 0.0307 +[ Fri Sep 16 02:04:00 2022 ] Eval epoch: 81 +[ Fri Sep 16 02:05:50 2022 ] Mean test loss of 930 batches: 2.6085758209228516. +[ Fri Sep 16 02:05:51 2022 ] Top1: 54.33% +[ Fri Sep 16 02:05:51 2022 ] Top5: 80.87% +[ Fri Sep 16 02:05:51 2022 ] Training epoch: 82 +[ Fri Sep 16 02:06:51 2022 ] Batch(77/162) done. Loss: 0.0111 lr:0.001000 network_time: 0.0309 +[ Fri Sep 16 02:07:52 2022 ] Eval epoch: 82 +[ Fri Sep 16 02:09:42 2022 ] Mean test loss of 930 batches: 2.5661566257476807. +[ Fri Sep 16 02:09:42 2022 ] Top1: 54.70% +[ Fri Sep 16 02:09:43 2022 ] Top5: 81.25% +[ Fri Sep 16 02:09:43 2022 ] Training epoch: 83 +[ Fri Sep 16 02:09:57 2022 ] Batch(15/162) done. Loss: 0.0092 lr:0.001000 network_time: 0.0292 +[ Fri Sep 16 02:11:10 2022 ] Batch(115/162) done. Loss: 0.0125 lr:0.001000 network_time: 0.0315 +[ Fri Sep 16 02:11:44 2022 ] Eval epoch: 83 +[ Fri Sep 16 02:13:33 2022 ] Mean test loss of 930 batches: 2.590534210205078. +[ Fri Sep 16 02:13:33 2022 ] Top1: 54.68% +[ Fri Sep 16 02:13:34 2022 ] Top5: 81.02% +[ Fri Sep 16 02:13:34 2022 ] Training epoch: 84 +[ Fri Sep 16 02:14:16 2022 ] Batch(53/162) done. Loss: 0.0171 lr:0.001000 network_time: 0.0291 +[ Fri Sep 16 02:15:29 2022 ] Batch(153/162) done. Loss: 0.0115 lr:0.001000 network_time: 0.0257 +[ Fri Sep 16 02:15:35 2022 ] Eval epoch: 84 +[ Fri Sep 16 02:17:25 2022 ] Mean test loss of 930 batches: 2.5949807167053223. +[ Fri Sep 16 02:17:25 2022 ] Top1: 54.61% +[ Fri Sep 16 02:17:25 2022 ] Top5: 80.92% +[ Fri Sep 16 02:17:26 2022 ] Training epoch: 85 +[ Fri Sep 16 02:18:36 2022 ] Batch(91/162) done. Loss: 0.0127 lr:0.001000 network_time: 0.0315 +[ Fri Sep 16 02:19:27 2022 ] Eval epoch: 85 +[ Fri Sep 16 02:21:17 2022 ] Mean test loss of 930 batches: 2.608656406402588. +[ Fri Sep 16 02:21:17 2022 ] Top1: 54.46% +[ Fri Sep 16 02:21:18 2022 ] Top5: 81.11% +[ Fri Sep 16 02:21:18 2022 ] Training epoch: 86 +[ Fri Sep 16 02:21:42 2022 ] Batch(29/162) done. Loss: 0.0120 lr:0.001000 network_time: 0.0271 +[ Fri Sep 16 02:22:55 2022 ] Batch(129/162) done. Loss: 0.0073 lr:0.001000 network_time: 0.0270 +[ Fri Sep 16 02:23:18 2022 ] Eval epoch: 86 +[ Fri Sep 16 02:25:08 2022 ] Mean test loss of 930 batches: 2.586932420730591. +[ Fri Sep 16 02:25:08 2022 ] Top1: 54.47% +[ Fri Sep 16 02:25:09 2022 ] Top5: 81.06% +[ Fri Sep 16 02:25:09 2022 ] Training epoch: 87 +[ Fri Sep 16 02:26:01 2022 ] Batch(67/162) done. Loss: 0.0127 lr:0.001000 network_time: 0.0279 +[ Fri Sep 16 02:27:09 2022 ] Eval epoch: 87 +[ Fri Sep 16 02:28:58 2022 ] Mean test loss of 930 batches: 2.5606770515441895. +[ Fri Sep 16 02:28:59 2022 ] Top1: 54.67% +[ Fri Sep 16 02:28:59 2022 ] Top5: 81.27% +[ Fri Sep 16 02:28:59 2022 ] Training epoch: 88 +[ Fri Sep 16 02:29:07 2022 ] Batch(5/162) done. Loss: 0.0105 lr:0.001000 network_time: 0.0260 +[ Fri Sep 16 02:30:19 2022 ] Batch(105/162) done. Loss: 0.0090 lr:0.001000 network_time: 0.0265 +[ Fri Sep 16 02:31:00 2022 ] Eval epoch: 88 +[ Fri Sep 16 02:32:49 2022 ] Mean test loss of 930 batches: 2.5782501697540283. +[ Fri Sep 16 02:32:49 2022 ] Top1: 54.59% +[ Fri Sep 16 02:32:50 2022 ] Top5: 81.03% +[ Fri Sep 16 02:32:50 2022 ] Training epoch: 89 +[ Fri Sep 16 02:33:25 2022 ] Batch(43/162) done. Loss: 0.0029 lr:0.001000 network_time: 0.0306 +[ Fri Sep 16 02:34:38 2022 ] Batch(143/162) done. Loss: 0.0068 lr:0.001000 network_time: 0.0255 +[ Fri Sep 16 02:34:51 2022 ] Eval epoch: 89 +[ Fri Sep 16 02:36:40 2022 ] Mean test loss of 930 batches: 2.583160161972046. +[ Fri Sep 16 02:36:41 2022 ] Top1: 54.79% +[ Fri Sep 16 02:36:41 2022 ] Top5: 81.18% +[ Fri Sep 16 02:36:41 2022 ] Training epoch: 90 +[ Fri Sep 16 02:37:43 2022 ] Batch(81/162) done. Loss: 0.0046 lr:0.001000 network_time: 0.0283 +[ Fri Sep 16 02:38:42 2022 ] Eval epoch: 90 +[ Fri Sep 16 02:40:31 2022 ] Mean test loss of 930 batches: 2.5848512649536133. +[ Fri Sep 16 02:40:31 2022 ] Top1: 54.56% +[ Fri Sep 16 02:40:32 2022 ] Top5: 81.17% +[ Fri Sep 16 02:40:32 2022 ] Training epoch: 91 +[ Fri Sep 16 02:40:49 2022 ] Batch(19/162) done. Loss: 0.0071 lr:0.001000 network_time: 0.0335 +[ Fri Sep 16 02:42:01 2022 ] Batch(119/162) done. Loss: 0.0055 lr:0.001000 network_time: 0.0258 +[ Fri Sep 16 02:42:32 2022 ] Eval epoch: 91 +[ Fri Sep 16 02:44:21 2022 ] Mean test loss of 930 batches: 2.625767707824707. +[ Fri Sep 16 02:44:22 2022 ] Top1: 54.39% +[ Fri Sep 16 02:44:22 2022 ] Top5: 81.06% +[ Fri Sep 16 02:44:22 2022 ] Training epoch: 92 +[ Fri Sep 16 02:45:07 2022 ] Batch(57/162) done. Loss: 0.0181 lr:0.001000 network_time: 0.0260 +[ Fri Sep 16 02:46:20 2022 ] Batch(157/162) done. Loss: 0.0126 lr:0.001000 network_time: 0.0266 +[ Fri Sep 16 02:46:23 2022 ] Eval epoch: 92 +[ Fri Sep 16 02:48:12 2022 ] Mean test loss of 930 batches: 2.5934081077575684. +[ Fri Sep 16 02:48:13 2022 ] Top1: 54.54% +[ Fri Sep 16 02:48:13 2022 ] Top5: 81.03% +[ Fri Sep 16 02:48:13 2022 ] Training epoch: 93 +[ Fri Sep 16 02:49:26 2022 ] Batch(95/162) done. Loss: 0.0061 lr:0.001000 network_time: 0.0678 +[ Fri Sep 16 02:50:14 2022 ] Eval epoch: 93 +[ Fri Sep 16 02:52:03 2022 ] Mean test loss of 930 batches: 2.5692086219787598. +[ Fri Sep 16 02:52:04 2022 ] Top1: 55.00% +[ Fri Sep 16 02:52:04 2022 ] Top5: 81.34% +[ Fri Sep 16 02:52:04 2022 ] Training epoch: 94 +[ Fri Sep 16 02:52:32 2022 ] Batch(33/162) done. Loss: 0.0225 lr:0.001000 network_time: 0.0273 +[ Fri Sep 16 02:53:45 2022 ] Batch(133/162) done. Loss: 0.0196 lr:0.001000 network_time: 0.0302 +[ Fri Sep 16 02:54:05 2022 ] Eval epoch: 94 +[ Fri Sep 16 02:55:55 2022 ] Mean test loss of 930 batches: 2.561540126800537. +[ Fri Sep 16 02:55:55 2022 ] Top1: 54.92% +[ Fri Sep 16 02:55:56 2022 ] Top5: 81.30% +[ Fri Sep 16 02:55:56 2022 ] Training epoch: 95 +[ Fri Sep 16 02:56:51 2022 ] Batch(71/162) done. Loss: 0.0138 lr:0.001000 network_time: 0.0272 +[ Fri Sep 16 02:57:56 2022 ] Eval epoch: 95 +[ Fri Sep 16 02:59:46 2022 ] Mean test loss of 930 batches: 2.600646495819092. +[ Fri Sep 16 02:59:47 2022 ] Top1: 54.43% +[ Fri Sep 16 02:59:47 2022 ] Top5: 80.92% +[ Fri Sep 16 02:59:47 2022 ] Training epoch: 96 +[ Fri Sep 16 02:59:58 2022 ] Batch(9/162) done. Loss: 0.0091 lr:0.001000 network_time: 0.0254 +[ Fri Sep 16 03:01:10 2022 ] Batch(109/162) done. Loss: 0.0059 lr:0.001000 network_time: 0.0275 +[ Fri Sep 16 03:01:48 2022 ] Eval epoch: 96 +[ Fri Sep 16 03:03:38 2022 ] Mean test loss of 930 batches: 2.594008207321167. +[ Fri Sep 16 03:03:38 2022 ] Top1: 54.50% +[ Fri Sep 16 03:03:39 2022 ] Top5: 81.09% +[ Fri Sep 16 03:03:39 2022 ] Training epoch: 97 +[ Fri Sep 16 03:04:17 2022 ] Batch(47/162) done. Loss: 0.0162 lr:0.001000 network_time: 0.0313 +[ Fri Sep 16 03:05:29 2022 ] Batch(147/162) done. Loss: 0.0074 lr:0.001000 network_time: 0.0307 +[ Fri Sep 16 03:05:40 2022 ] Eval epoch: 97 +[ Fri Sep 16 03:07:29 2022 ] Mean test loss of 930 batches: 2.56474232673645. +[ Fri Sep 16 03:07:30 2022 ] Top1: 54.66% +[ Fri Sep 16 03:07:30 2022 ] Top5: 81.15% +[ Fri Sep 16 03:07:30 2022 ] Training epoch: 98 +[ Fri Sep 16 03:08:36 2022 ] Batch(85/162) done. Loss: 0.0135 lr:0.001000 network_time: 0.0295 +[ Fri Sep 16 03:09:31 2022 ] Eval epoch: 98 +[ Fri Sep 16 03:11:21 2022 ] Mean test loss of 930 batches: 2.5852057933807373. +[ Fri Sep 16 03:11:21 2022 ] Top1: 54.71% +[ Fri Sep 16 03:11:21 2022 ] Top5: 81.15% +[ Fri Sep 16 03:11:22 2022 ] Training epoch: 99 +[ Fri Sep 16 03:11:42 2022 ] Batch(23/162) done. Loss: 0.0056 lr:0.001000 network_time: 0.0263 +[ Fri Sep 16 03:12:55 2022 ] Batch(123/162) done. Loss: 0.0053 lr:0.001000 network_time: 0.0262 +[ Fri Sep 16 03:13:22 2022 ] Eval epoch: 99 +[ Fri Sep 16 03:15:11 2022 ] Mean test loss of 930 batches: 2.5815749168395996. +[ Fri Sep 16 03:15:12 2022 ] Top1: 54.67% +[ Fri Sep 16 03:15:12 2022 ] Top5: 81.28% +[ Fri Sep 16 03:15:13 2022 ] Training epoch: 100 +[ Fri Sep 16 03:16:00 2022 ] Batch(61/162) done. Loss: 0.0195 lr:0.001000 network_time: 0.0220 +[ Fri Sep 16 03:17:13 2022 ] Batch(161/162) done. Loss: 0.0064 lr:0.001000 network_time: 0.0269 +[ Fri Sep 16 03:17:13 2022 ] Eval epoch: 100 +[ Fri Sep 16 03:19:02 2022 ] Mean test loss of 930 batches: 2.606626510620117. +[ Fri Sep 16 03:19:03 2022 ] Top1: 54.28% +[ Fri Sep 16 03:19:03 2022 ] Top5: 80.92% +[ Fri Sep 16 03:19:04 2022 ] Training epoch: 101 +[ Fri Sep 16 03:20:19 2022 ] Batch(99/162) done. Loss: 0.0070 lr:0.000100 network_time: 0.0313 +[ Fri Sep 16 03:21:04 2022 ] Eval epoch: 101 +[ Fri Sep 16 03:22:53 2022 ] Mean test loss of 930 batches: 2.575133800506592. +[ Fri Sep 16 03:22:54 2022 ] Top1: 54.92% +[ Fri Sep 16 03:22:54 2022 ] Top5: 81.40% +[ Fri Sep 16 03:22:54 2022 ] Training epoch: 102 +[ Fri Sep 16 03:23:25 2022 ] Batch(37/162) done. Loss: 0.0085 lr:0.000100 network_time: 0.0315 +[ Fri Sep 16 03:24:37 2022 ] Batch(137/162) done. Loss: 0.0057 lr:0.000100 network_time: 0.0299 +[ Fri Sep 16 03:24:55 2022 ] Eval epoch: 102 +[ Fri Sep 16 03:26:45 2022 ] Mean test loss of 930 batches: 2.5923445224761963. +[ Fri Sep 16 03:26:45 2022 ] Top1: 54.70% +[ Fri Sep 16 03:26:46 2022 ] Top5: 81.13% +[ Fri Sep 16 03:26:46 2022 ] Training epoch: 103 +[ Fri Sep 16 03:27:44 2022 ] Batch(75/162) done. Loss: 0.0136 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 03:28:47 2022 ] Eval epoch: 103 +[ Fri Sep 16 03:30:36 2022 ] Mean test loss of 930 batches: 2.6140177249908447. +[ Fri Sep 16 03:30:37 2022 ] Top1: 54.28% +[ Fri Sep 16 03:30:37 2022 ] Top5: 81.05% +[ Fri Sep 16 03:30:38 2022 ] Training epoch: 104 +[ Fri Sep 16 03:30:50 2022 ] Batch(13/162) done. Loss: 0.0077 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 03:32:03 2022 ] Batch(113/162) done. Loss: 0.0058 lr:0.000100 network_time: 0.0257 +[ Fri Sep 16 03:32:38 2022 ] Eval epoch: 104 +[ Fri Sep 16 03:34:27 2022 ] Mean test loss of 930 batches: 2.602022647857666. +[ Fri Sep 16 03:34:27 2022 ] Top1: 54.47% +[ Fri Sep 16 03:34:28 2022 ] Top5: 81.10% +[ Fri Sep 16 03:34:28 2022 ] Training epoch: 105 +[ Fri Sep 16 03:35:09 2022 ] Batch(51/162) done. Loss: 0.0099 lr:0.000100 network_time: 0.0324 +[ Fri Sep 16 03:36:21 2022 ] Batch(151/162) done. Loss: 0.0118 lr:0.000100 network_time: 0.0259 +[ Fri Sep 16 03:36:29 2022 ] Eval epoch: 105 +[ Fri Sep 16 03:38:18 2022 ] Mean test loss of 930 batches: 2.5884015560150146. +[ Fri Sep 16 03:38:19 2022 ] Top1: 54.51% +[ Fri Sep 16 03:38:19 2022 ] Top5: 81.17% +[ Fri Sep 16 03:38:19 2022 ] Training epoch: 106 +[ Fri Sep 16 03:39:27 2022 ] Batch(89/162) done. Loss: 0.0040 lr:0.000100 network_time: 0.0231 +[ Fri Sep 16 03:40:20 2022 ] Eval epoch: 106 +[ Fri Sep 16 03:42:09 2022 ] Mean test loss of 930 batches: 2.61933970451355. +[ Fri Sep 16 03:42:09 2022 ] Top1: 54.38% +[ Fri Sep 16 03:42:10 2022 ] Top5: 80.87% +[ Fri Sep 16 03:42:10 2022 ] Training epoch: 107 +[ Fri Sep 16 03:42:33 2022 ] Batch(27/162) done. Loss: 0.0046 lr:0.000100 network_time: 0.0302 +[ Fri Sep 16 03:43:45 2022 ] Batch(127/162) done. Loss: 0.0163 lr:0.000100 network_time: 0.0320 +[ Fri Sep 16 03:44:10 2022 ] Eval epoch: 107 +[ Fri Sep 16 03:45:59 2022 ] Mean test loss of 930 batches: 2.5805156230926514. +[ Fri Sep 16 03:46:00 2022 ] Top1: 54.50% +[ Fri Sep 16 03:46:00 2022 ] Top5: 81.09% +[ Fri Sep 16 03:46:01 2022 ] Training epoch: 108 +[ Fri Sep 16 03:46:51 2022 ] Batch(65/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0311 +[ Fri Sep 16 03:48:01 2022 ] Eval epoch: 108 +[ Fri Sep 16 03:49:50 2022 ] Mean test loss of 930 batches: 2.622124433517456. +[ Fri Sep 16 03:49:51 2022 ] Top1: 54.37% +[ Fri Sep 16 03:49:51 2022 ] Top5: 81.01% +[ Fri Sep 16 03:49:51 2022 ] Training epoch: 109 +[ Fri Sep 16 03:49:57 2022 ] Batch(3/162) done. Loss: 0.0086 lr:0.000100 network_time: 0.0315 +[ Fri Sep 16 03:51:10 2022 ] Batch(103/162) done. Loss: 0.0456 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 03:51:52 2022 ] Eval epoch: 109 +[ Fri Sep 16 03:53:41 2022 ] Mean test loss of 930 batches: 2.5847461223602295. +[ Fri Sep 16 03:53:42 2022 ] Top1: 54.65% +[ Fri Sep 16 03:53:42 2022 ] Top5: 81.22% +[ Fri Sep 16 03:53:42 2022 ] Training epoch: 110 +[ Fri Sep 16 03:54:16 2022 ] Batch(41/162) done. Loss: 0.0170 lr:0.000100 network_time: 0.0301 +[ Fri Sep 16 03:55:28 2022 ] Batch(141/162) done. Loss: 0.0042 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 03:55:43 2022 ] Eval epoch: 110 +[ Fri Sep 16 03:57:32 2022 ] Mean test loss of 930 batches: 2.6004409790039062. +[ Fri Sep 16 03:57:33 2022 ] Top1: 54.47% +[ Fri Sep 16 03:57:33 2022 ] Top5: 80.99% +[ Fri Sep 16 03:57:33 2022 ] Training epoch: 111 +[ Fri Sep 16 03:58:34 2022 ] Batch(79/162) done. Loss: 0.0052 lr:0.000100 network_time: 0.0327 +[ Fri Sep 16 03:59:34 2022 ] Eval epoch: 111 +[ Fri Sep 16 04:01:24 2022 ] Mean test loss of 930 batches: 2.621633291244507. +[ Fri Sep 16 04:01:24 2022 ] Top1: 54.11% +[ Fri Sep 16 04:01:25 2022 ] Top5: 80.90% +[ Fri Sep 16 04:01:25 2022 ] Training epoch: 112 +[ Fri Sep 16 04:01:41 2022 ] Batch(17/162) done. Loss: 0.0093 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:02:54 2022 ] Batch(117/162) done. Loss: 0.0104 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 04:03:26 2022 ] Eval epoch: 112 +[ Fri Sep 16 04:05:15 2022 ] Mean test loss of 930 batches: 2.5725698471069336. +[ Fri Sep 16 04:05:15 2022 ] Top1: 54.85% +[ Fri Sep 16 04:05:16 2022 ] Top5: 81.15% +[ Fri Sep 16 04:05:16 2022 ] Training epoch: 113 +[ Fri Sep 16 04:05:59 2022 ] Batch(55/162) done. Loss: 0.0106 lr:0.000100 network_time: 0.0316 +[ Fri Sep 16 04:07:12 2022 ] Batch(155/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 04:07:16 2022 ] Eval epoch: 113 +[ Fri Sep 16 04:09:06 2022 ] Mean test loss of 930 batches: 2.610018730163574. +[ Fri Sep 16 04:09:06 2022 ] Top1: 54.21% +[ Fri Sep 16 04:09:06 2022 ] Top5: 80.90% +[ Fri Sep 16 04:09:07 2022 ] Training epoch: 114 +[ Fri Sep 16 04:10:18 2022 ] Batch(93/162) done. Loss: 0.0059 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 04:11:07 2022 ] Eval epoch: 114 +[ Fri Sep 16 04:12:57 2022 ] Mean test loss of 930 batches: 2.5909993648529053. +[ Fri Sep 16 04:12:57 2022 ] Top1: 54.70% +[ Fri Sep 16 04:12:58 2022 ] Top5: 81.17% +[ Fri Sep 16 04:12:58 2022 ] Training epoch: 115 +[ Fri Sep 16 04:13:24 2022 ] Batch(31/162) done. Loss: 0.0038 lr:0.000100 network_time: 0.0312 +[ Fri Sep 16 04:14:37 2022 ] Batch(131/162) done. Loss: 0.0082 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 04:14:59 2022 ] Eval epoch: 115 +[ Fri Sep 16 04:16:48 2022 ] Mean test loss of 930 batches: 2.6113758087158203. +[ Fri Sep 16 04:16:48 2022 ] Top1: 54.65% +[ Fri Sep 16 04:16:49 2022 ] Top5: 80.93% +[ Fri Sep 16 04:16:49 2022 ] Training epoch: 116 +[ Fri Sep 16 04:17:42 2022 ] Batch(69/162) done. Loss: 0.0166 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 04:18:49 2022 ] Eval epoch: 116 +[ Fri Sep 16 04:20:38 2022 ] Mean test loss of 930 batches: 2.5807549953460693. +[ Fri Sep 16 04:20:39 2022 ] Top1: 54.80% +[ Fri Sep 16 04:20:39 2022 ] Top5: 81.31% +[ Fri Sep 16 04:20:39 2022 ] Training epoch: 117 +[ Fri Sep 16 04:20:48 2022 ] Batch(7/162) done. Loss: 0.0119 lr:0.000100 network_time: 0.0555 +[ Fri Sep 16 04:22:01 2022 ] Batch(107/162) done. Loss: 0.0034 lr:0.000100 network_time: 0.0301 +[ Fri Sep 16 04:22:40 2022 ] Eval epoch: 117 +[ Fri Sep 16 04:24:29 2022 ] Mean test loss of 930 batches: 2.5913569927215576. +[ Fri Sep 16 04:24:30 2022 ] Top1: 54.78% +[ Fri Sep 16 04:24:30 2022 ] Top5: 81.19% +[ Fri Sep 16 04:24:30 2022 ] Training epoch: 118 +[ Fri Sep 16 04:25:07 2022 ] Batch(45/162) done. Loss: 0.0136 lr:0.000100 network_time: 0.0284 +[ Fri Sep 16 04:26:19 2022 ] Batch(145/162) done. Loss: 0.0099 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 04:26:31 2022 ] Eval epoch: 118 +[ Fri Sep 16 04:28:20 2022 ] Mean test loss of 930 batches: 2.5927035808563232. +[ Fri Sep 16 04:28:21 2022 ] Top1: 54.50% +[ Fri Sep 16 04:28:21 2022 ] Top5: 81.17% +[ Fri Sep 16 04:28:21 2022 ] Training epoch: 119 +[ Fri Sep 16 04:29:25 2022 ] Batch(83/162) done. Loss: 0.0105 lr:0.000100 network_time: 0.0313 +[ Fri Sep 16 04:30:22 2022 ] Eval epoch: 119 +[ Fri Sep 16 04:32:12 2022 ] Mean test loss of 930 batches: 2.582989454269409. +[ Fri Sep 16 04:32:12 2022 ] Top1: 54.89% +[ Fri Sep 16 04:32:12 2022 ] Top5: 81.16% +[ Fri Sep 16 04:32:13 2022 ] Training epoch: 120 +[ Fri Sep 16 04:32:32 2022 ] Batch(21/162) done. Loss: 0.0411 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:33:44 2022 ] Batch(121/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0327 +[ Fri Sep 16 04:34:13 2022 ] Eval epoch: 120 +[ Fri Sep 16 04:36:03 2022 ] Mean test loss of 930 batches: 2.5880775451660156. +[ Fri Sep 16 04:36:03 2022 ] Top1: 54.77% +[ Fri Sep 16 04:36:04 2022 ] Top5: 81.30% +[ Fri Sep 16 04:36:04 2022 ] Training epoch: 121 +[ Fri Sep 16 04:36:50 2022 ] Batch(59/162) done. Loss: 0.0113 lr:0.000100 network_time: 0.0280 +[ Fri Sep 16 04:38:03 2022 ] Batch(159/162) done. Loss: 0.0105 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:38:04 2022 ] Eval epoch: 121 +[ Fri Sep 16 04:39:53 2022 ] Mean test loss of 930 batches: 2.565481185913086. +[ Fri Sep 16 04:39:54 2022 ] Top1: 54.65% +[ Fri Sep 16 04:39:54 2022 ] Top5: 81.13% +[ Fri Sep 16 04:39:55 2022 ] Training epoch: 122 +[ Fri Sep 16 04:41:08 2022 ] Batch(97/162) done. Loss: 0.0066 lr:0.000100 network_time: 0.0375 +[ Fri Sep 16 04:41:55 2022 ] Eval epoch: 122 +[ Fri Sep 16 04:43:44 2022 ] Mean test loss of 930 batches: 2.5854990482330322. +[ Fri Sep 16 04:43:44 2022 ] Top1: 54.53% +[ Fri Sep 16 04:43:45 2022 ] Top5: 81.14% +[ Fri Sep 16 04:43:45 2022 ] Training epoch: 123 +[ Fri Sep 16 04:44:14 2022 ] Batch(35/162) done. Loss: 0.0107 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:45:27 2022 ] Batch(135/162) done. Loss: 0.0059 lr:0.000100 network_time: 0.0329 +[ Fri Sep 16 04:45:46 2022 ] Eval epoch: 123 +[ Fri Sep 16 04:47:35 2022 ] Mean test loss of 930 batches: 2.5903828144073486. +[ Fri Sep 16 04:47:35 2022 ] Top1: 54.24% +[ Fri Sep 16 04:47:36 2022 ] Top5: 80.99% +[ Fri Sep 16 04:47:36 2022 ] Training epoch: 124 +[ Fri Sep 16 04:48:32 2022 ] Batch(73/162) done. Loss: 0.0074 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 04:49:36 2022 ] Eval epoch: 124 +[ Fri Sep 16 04:51:26 2022 ] Mean test loss of 930 batches: 2.5589847564697266. +[ Fri Sep 16 04:51:26 2022 ] Top1: 55.00% +[ Fri Sep 16 04:51:26 2022 ] Top5: 81.28% +[ Fri Sep 16 04:51:27 2022 ] Training epoch: 125 +[ Fri Sep 16 04:51:38 2022 ] Batch(11/162) done. Loss: 0.0028 lr:0.000100 network_time: 0.0296 +[ Fri Sep 16 04:52:51 2022 ] Batch(111/162) done. Loss: 0.0088 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 04:53:27 2022 ] Eval epoch: 125 +[ Fri Sep 16 04:55:16 2022 ] Mean test loss of 930 batches: 2.583209991455078. +[ Fri Sep 16 04:55:16 2022 ] Top1: 54.80% +[ Fri Sep 16 04:55:17 2022 ] Top5: 81.28% +[ Fri Sep 16 04:55:17 2022 ] Training epoch: 126 +[ Fri Sep 16 04:55:56 2022 ] Batch(49/162) done. Loss: 0.0033 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:57:08 2022 ] Batch(149/162) done. Loss: 0.0096 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 04:57:17 2022 ] Eval epoch: 126 +[ Fri Sep 16 04:59:07 2022 ] Mean test loss of 930 batches: 2.584449052810669. +[ Fri Sep 16 04:59:08 2022 ] Top1: 54.66% +[ Fri Sep 16 04:59:08 2022 ] Top5: 81.07% +[ Fri Sep 16 04:59:08 2022 ] Training epoch: 127 +[ Fri Sep 16 05:00:15 2022 ] Batch(87/162) done. Loss: 0.0107 lr:0.000100 network_time: 0.0302 +[ Fri Sep 16 05:01:08 2022 ] Eval epoch: 127 +[ Fri Sep 16 05:02:58 2022 ] Mean test loss of 930 batches: 2.5924336910247803. +[ Fri Sep 16 05:02:58 2022 ] Top1: 54.68% +[ Fri Sep 16 05:02:58 2022 ] Top5: 81.16% +[ Fri Sep 16 05:02:59 2022 ] Training epoch: 128 +[ Fri Sep 16 05:03:21 2022 ] Batch(25/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 05:04:33 2022 ] Batch(125/162) done. Loss: 0.0092 lr:0.000100 network_time: 0.0316 +[ Fri Sep 16 05:04:59 2022 ] Eval epoch: 128 +[ Fri Sep 16 05:06:48 2022 ] Mean test loss of 930 batches: 2.6165969371795654. +[ Fri Sep 16 05:06:49 2022 ] Top1: 54.43% +[ Fri Sep 16 05:06:49 2022 ] Top5: 80.89% +[ Fri Sep 16 05:06:49 2022 ] Training epoch: 129 +[ Fri Sep 16 05:07:39 2022 ] Batch(63/162) done. Loss: 0.0040 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 05:08:50 2022 ] Eval epoch: 129 +[ Fri Sep 16 05:10:39 2022 ] Mean test loss of 930 batches: 2.5882484912872314. +[ Fri Sep 16 05:10:39 2022 ] Top1: 54.59% +[ Fri Sep 16 05:10:39 2022 ] Top5: 80.97% +[ Fri Sep 16 05:10:40 2022 ] Training epoch: 130 +[ Fri Sep 16 05:10:44 2022 ] Batch(1/162) done. Loss: 0.0038 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 05:11:56 2022 ] Batch(101/162) done. Loss: 0.0132 lr:0.000100 network_time: 0.0285 +[ Fri Sep 16 05:12:40 2022 ] Eval epoch: 130 +[ Fri Sep 16 05:14:29 2022 ] Mean test loss of 930 batches: 2.577380657196045. +[ Fri Sep 16 05:14:30 2022 ] Top1: 54.80% +[ Fri Sep 16 05:14:30 2022 ] Top5: 81.19% +[ Fri Sep 16 05:14:30 2022 ] Training epoch: 131 +[ Fri Sep 16 05:15:02 2022 ] Batch(39/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 05:16:15 2022 ] Batch(139/162) done. Loss: 0.0055 lr:0.000100 network_time: 0.0304 +[ Fri Sep 16 05:16:31 2022 ] Eval epoch: 131 +[ Fri Sep 16 05:18:19 2022 ] Mean test loss of 930 batches: 2.5679023265838623. +[ Fri Sep 16 05:18:20 2022 ] Top1: 54.80% +[ Fri Sep 16 05:18:20 2022 ] Top5: 81.27% +[ Fri Sep 16 05:18:21 2022 ] Training epoch: 132 +[ Fri Sep 16 05:19:20 2022 ] Batch(77/162) done. Loss: 0.0100 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 05:20:21 2022 ] Eval epoch: 132 +[ Fri Sep 16 05:22:10 2022 ] Mean test loss of 930 batches: 2.5951454639434814. +[ Fri Sep 16 05:22:10 2022 ] Top1: 54.70% +[ Fri Sep 16 05:22:11 2022 ] Top5: 81.24% +[ Fri Sep 16 05:22:11 2022 ] Training epoch: 133 +[ Fri Sep 16 05:22:26 2022 ] Batch(15/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 05:23:38 2022 ] Batch(115/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0427 +[ Fri Sep 16 05:24:12 2022 ] Eval epoch: 133 +[ Fri Sep 16 05:26:00 2022 ] Mean test loss of 930 batches: 2.583064556121826. +[ Fri Sep 16 05:26:01 2022 ] Top1: 54.66% +[ Fri Sep 16 05:26:01 2022 ] Top5: 81.21% +[ Fri Sep 16 05:26:02 2022 ] Training epoch: 134 +[ Fri Sep 16 05:26:43 2022 ] Batch(53/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 05:27:56 2022 ] Batch(153/162) done. Loss: 0.0109 lr:0.000100 network_time: 0.0470 +[ Fri Sep 16 05:28:02 2022 ] Eval epoch: 134 +[ Fri Sep 16 05:29:51 2022 ] Mean test loss of 930 batches: 2.6534018516540527. +[ Fri Sep 16 05:29:52 2022 ] Top1: 53.97% +[ Fri Sep 16 05:29:52 2022 ] Top5: 80.83% +[ Fri Sep 16 05:29:52 2022 ] Training epoch: 135 +[ Fri Sep 16 05:31:02 2022 ] Batch(91/162) done. Loss: 0.0041 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:31:53 2022 ] Eval epoch: 135 +[ Fri Sep 16 05:33:42 2022 ] Mean test loss of 930 batches: 2.5870189666748047. +[ Fri Sep 16 05:33:42 2022 ] Top1: 54.72% +[ Fri Sep 16 05:33:43 2022 ] Top5: 81.20% +[ Fri Sep 16 05:33:43 2022 ] Training epoch: 136 +[ Fri Sep 16 05:34:08 2022 ] Batch(29/162) done. Loss: 0.0045 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 05:35:20 2022 ] Batch(129/162) done. Loss: 0.0047 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 05:35:44 2022 ] Eval epoch: 136 +[ Fri Sep 16 05:37:33 2022 ] Mean test loss of 930 batches: 2.5728559494018555. +[ Fri Sep 16 05:37:33 2022 ] Top1: 54.68% +[ Fri Sep 16 05:37:33 2022 ] Top5: 81.26% +[ Fri Sep 16 05:37:34 2022 ] Training epoch: 137 +[ Fri Sep 16 05:38:26 2022 ] Batch(67/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0306 +[ Fri Sep 16 05:39:35 2022 ] Eval epoch: 137 +[ Fri Sep 16 05:41:24 2022 ] Mean test loss of 930 batches: 2.589679002761841. +[ Fri Sep 16 05:41:24 2022 ] Top1: 54.66% +[ Fri Sep 16 05:41:25 2022 ] Top5: 81.09% +[ Fri Sep 16 05:41:25 2022 ] Training epoch: 138 +[ Fri Sep 16 05:41:33 2022 ] Batch(5/162) done. Loss: 0.0089 lr:0.000100 network_time: 0.0311 +[ Fri Sep 16 05:42:45 2022 ] Batch(105/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0315 +[ Fri Sep 16 05:43:26 2022 ] Eval epoch: 138 +[ Fri Sep 16 05:45:15 2022 ] Mean test loss of 930 batches: 2.5958192348480225. +[ Fri Sep 16 05:45:16 2022 ] Top1: 54.56% +[ Fri Sep 16 05:45:16 2022 ] Top5: 81.23% +[ Fri Sep 16 05:45:16 2022 ] Training epoch: 139 +[ Fri Sep 16 05:45:52 2022 ] Batch(43/162) done. Loss: 0.0162 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 05:47:04 2022 ] Batch(143/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 05:47:18 2022 ] Eval epoch: 139 +[ Fri Sep 16 05:49:07 2022 ] Mean test loss of 930 batches: 2.6090312004089355. +[ Fri Sep 16 05:49:07 2022 ] Top1: 54.48% +[ Fri Sep 16 05:49:08 2022 ] Top5: 81.00% +[ Fri Sep 16 05:49:08 2022 ] Training epoch: 140 +[ Fri Sep 16 05:50:11 2022 ] Batch(81/162) done. Loss: 0.0042 lr:0.000100 network_time: 0.0277 +[ Fri Sep 16 05:51:09 2022 ] Eval epoch: 140 +[ Fri Sep 16 05:52:58 2022 ] Mean test loss of 930 batches: 2.596174716949463. +[ Fri Sep 16 05:52:59 2022 ] Top1: 54.44% +[ Fri Sep 16 05:52:59 2022 ] Top5: 81.09% diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_bone_xset/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c958ccdf355f35cb2b65cfe17ddc47d89a466eb9 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_joint_motion_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_motion_xset +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_motion_xset diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b63f3743a5cfb40125948f72ad5e6e2c974d96fa --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58c173a511eb08702adf595bc87ee42b1d1116f33936bf7c6b79babd070abd96 +size 34946665 diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/log.txt b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..6db5d646b0be592af3444687f301e9377ab5a3c0 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/log.txt @@ -0,0 +1,929 @@ +[ Thu Sep 15 20:53:29 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_motion_xset', 'model_saved_name': './save_models/ntu120_joint_motion_xset', 'Experiment_name': 'ntu120_joint_motion_xset', 'config': './config/ntu120_xset/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 20:53:29 2022 ] Training epoch: 1 +[ Thu Sep 15 20:54:48 2022 ] Batch(99/162) done. Loss: 2.8867 lr:0.100000 network_time: 0.0295 +[ Thu Sep 15 20:55:33 2022 ] Eval epoch: 1 +[ Thu Sep 15 20:57:27 2022 ] Mean test loss of 930 batches: 5.0145583152771. +[ Thu Sep 15 20:57:27 2022 ] Top1: 12.30% +[ Thu Sep 15 20:57:27 2022 ] Top5: 33.00% +[ Thu Sep 15 20:57:28 2022 ] Training epoch: 2 +[ Thu Sep 15 20:57:58 2022 ] Batch(37/162) done. Loss: 1.8791 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 20:59:11 2022 ] Batch(137/162) done. Loss: 1.9913 lr:0.100000 network_time: 0.0547 +[ Thu Sep 15 20:59:29 2022 ] Eval epoch: 2 +[ Thu Sep 15 21:01:19 2022 ] Mean test loss of 930 batches: 5.094688892364502. +[ Thu Sep 15 21:01:20 2022 ] Top1: 15.49% +[ Thu Sep 15 21:01:20 2022 ] Top5: 39.50% +[ Thu Sep 15 21:01:20 2022 ] Training epoch: 3 +[ Thu Sep 15 21:02:19 2022 ] Batch(75/162) done. Loss: 1.8336 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 21:03:22 2022 ] Eval epoch: 3 +[ Thu Sep 15 21:05:11 2022 ] Mean test loss of 930 batches: 3.7378287315368652. +[ Thu Sep 15 21:05:12 2022 ] Top1: 26.10% +[ Thu Sep 15 21:05:12 2022 ] Top5: 53.90% +[ Thu Sep 15 21:05:12 2022 ] Training epoch: 4 +[ Thu Sep 15 21:05:25 2022 ] Batch(13/162) done. Loss: 1.5423 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 21:06:38 2022 ] Batch(113/162) done. Loss: 1.6592 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 21:07:13 2022 ] Eval epoch: 4 +[ Thu Sep 15 21:09:04 2022 ] Mean test loss of 930 batches: 3.334031581878662. +[ Thu Sep 15 21:09:04 2022 ] Top1: 28.04% +[ Thu Sep 15 21:09:05 2022 ] Top5: 55.90% +[ Thu Sep 15 21:09:05 2022 ] Training epoch: 5 +[ Thu Sep 15 21:09:46 2022 ] Batch(51/162) done. Loss: 1.5313 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 21:10:58 2022 ] Batch(151/162) done. Loss: 1.5609 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 21:11:06 2022 ] Eval epoch: 5 +[ Thu Sep 15 21:12:56 2022 ] Mean test loss of 930 batches: 3.3112664222717285. +[ Thu Sep 15 21:12:56 2022 ] Top1: 31.94% +[ Thu Sep 15 21:12:57 2022 ] Top5: 61.28% +[ Thu Sep 15 21:12:57 2022 ] Training epoch: 6 +[ Thu Sep 15 21:14:05 2022 ] Batch(89/162) done. Loss: 1.2561 lr:0.100000 network_time: 0.0255 +[ Thu Sep 15 21:14:58 2022 ] Eval epoch: 6 +[ Thu Sep 15 21:16:47 2022 ] Mean test loss of 930 batches: 3.7558095455169678. +[ Thu Sep 15 21:16:48 2022 ] Top1: 29.97% +[ Thu Sep 15 21:16:48 2022 ] Top5: 60.91% +[ Thu Sep 15 21:16:48 2022 ] Training epoch: 7 +[ Thu Sep 15 21:17:12 2022 ] Batch(27/162) done. Loss: 1.4390 lr:0.100000 network_time: 0.0517 +[ Thu Sep 15 21:18:24 2022 ] Batch(127/162) done. Loss: 0.6995 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 21:18:49 2022 ] Eval epoch: 7 +[ Thu Sep 15 21:20:39 2022 ] Mean test loss of 930 batches: 3.5645387172698975. +[ Thu Sep 15 21:20:39 2022 ] Top1: 34.81% +[ Thu Sep 15 21:20:40 2022 ] Top5: 65.19% +[ Thu Sep 15 21:20:40 2022 ] Training epoch: 8 +[ Thu Sep 15 21:21:31 2022 ] Batch(65/162) done. Loss: 1.2151 lr:0.100000 network_time: 0.0459 +[ Thu Sep 15 21:22:41 2022 ] Eval epoch: 8 +[ Thu Sep 15 21:24:32 2022 ] Mean test loss of 930 batches: 3.345634937286377. +[ Thu Sep 15 21:24:32 2022 ] Top1: 37.25% +[ Thu Sep 15 21:24:33 2022 ] Top5: 66.65% +[ Thu Sep 15 21:24:33 2022 ] Training epoch: 9 +[ Thu Sep 15 21:24:39 2022 ] Batch(3/162) done. Loss: 0.8029 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 21:25:51 2022 ] Batch(103/162) done. Loss: 1.1043 lr:0.100000 network_time: 0.0442 +[ Thu Sep 15 21:26:34 2022 ] Eval epoch: 9 +[ Thu Sep 15 21:28:23 2022 ] Mean test loss of 930 batches: 3.5088706016540527. +[ Thu Sep 15 21:28:24 2022 ] Top1: 30.93% +[ Thu Sep 15 21:28:24 2022 ] Top5: 61.77% +[ Thu Sep 15 21:28:25 2022 ] Training epoch: 10 +[ Thu Sep 15 21:28:58 2022 ] Batch(41/162) done. Loss: 0.9017 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 21:30:11 2022 ] Batch(141/162) done. Loss: 1.0810 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 21:30:26 2022 ] Eval epoch: 10 +[ Thu Sep 15 21:32:16 2022 ] Mean test loss of 930 batches: 3.7960636615753174. +[ Thu Sep 15 21:32:16 2022 ] Top1: 37.34% +[ Thu Sep 15 21:32:17 2022 ] Top5: 68.48% +[ Thu Sep 15 21:32:17 2022 ] Training epoch: 11 +[ Thu Sep 15 21:33:18 2022 ] Batch(79/162) done. Loss: 0.8720 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 21:34:18 2022 ] Eval epoch: 11 +[ Thu Sep 15 21:36:08 2022 ] Mean test loss of 930 batches: 3.196443796157837. +[ Thu Sep 15 21:36:09 2022 ] Top1: 37.54% +[ Thu Sep 15 21:36:09 2022 ] Top5: 66.85% +[ Thu Sep 15 21:36:09 2022 ] Training epoch: 12 +[ Thu Sep 15 21:36:26 2022 ] Batch(17/162) done. Loss: 0.8167 lr:0.100000 network_time: 0.0310 +[ Thu Sep 15 21:37:38 2022 ] Batch(117/162) done. Loss: 1.1508 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 21:38:10 2022 ] Eval epoch: 12 +[ Thu Sep 15 21:40:00 2022 ] Mean test loss of 930 batches: 3.6423709392547607. +[ Thu Sep 15 21:40:01 2022 ] Top1: 38.01% +[ Thu Sep 15 21:40:01 2022 ] Top5: 68.47% +[ Thu Sep 15 21:40:01 2022 ] Training epoch: 13 +[ Thu Sep 15 21:40:45 2022 ] Batch(55/162) done. Loss: 0.6677 lr:0.100000 network_time: 0.0280 +[ Thu Sep 15 21:41:58 2022 ] Batch(155/162) done. Loss: 0.8657 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 21:42:02 2022 ] Eval epoch: 13 +[ Thu Sep 15 21:43:52 2022 ] Mean test loss of 930 batches: 2.7829337120056152. +[ Thu Sep 15 21:43:53 2022 ] Top1: 43.47% +[ Thu Sep 15 21:43:53 2022 ] Top5: 75.32% +[ Thu Sep 15 21:43:53 2022 ] Training epoch: 14 +[ Thu Sep 15 21:45:05 2022 ] Batch(93/162) done. Loss: 0.9714 lr:0.100000 network_time: 0.0326 +[ Thu Sep 15 21:45:54 2022 ] Eval epoch: 14 +[ Thu Sep 15 21:47:44 2022 ] Mean test loss of 930 batches: 2.5059609413146973. +[ Thu Sep 15 21:47:44 2022 ] Top1: 45.03% +[ Thu Sep 15 21:47:45 2022 ] Top5: 74.04% +[ Thu Sep 15 21:47:45 2022 ] Training epoch: 15 +[ Thu Sep 15 21:48:11 2022 ] Batch(31/162) done. Loss: 0.5980 lr:0.100000 network_time: 0.0296 +[ Thu Sep 15 21:49:24 2022 ] Batch(131/162) done. Loss: 0.6171 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 21:49:46 2022 ] Eval epoch: 15 +[ Thu Sep 15 21:51:37 2022 ] Mean test loss of 930 batches: 3.646928310394287. +[ Thu Sep 15 21:51:37 2022 ] Top1: 37.05% +[ Thu Sep 15 21:51:38 2022 ] Top5: 67.63% +[ Thu Sep 15 21:51:38 2022 ] Training epoch: 16 +[ Thu Sep 15 21:52:32 2022 ] Batch(69/162) done. Loss: 0.7645 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 21:53:39 2022 ] Eval epoch: 16 +[ Thu Sep 15 21:55:29 2022 ] Mean test loss of 930 batches: 3.679865837097168. +[ Thu Sep 15 21:55:29 2022 ] Top1: 42.20% +[ Thu Sep 15 21:55:30 2022 ] Top5: 71.60% +[ Thu Sep 15 21:55:30 2022 ] Training epoch: 17 +[ Thu Sep 15 21:55:39 2022 ] Batch(7/162) done. Loss: 0.8176 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 21:56:52 2022 ] Batch(107/162) done. Loss: 0.8370 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 21:57:31 2022 ] Eval epoch: 17 +[ Thu Sep 15 21:59:21 2022 ] Mean test loss of 930 batches: 3.4497668743133545. +[ Thu Sep 15 21:59:21 2022 ] Top1: 42.82% +[ Thu Sep 15 21:59:22 2022 ] Top5: 72.71% +[ Thu Sep 15 21:59:22 2022 ] Training epoch: 18 +[ Thu Sep 15 21:59:58 2022 ] Batch(45/162) done. Loss: 0.6977 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 22:01:11 2022 ] Batch(145/162) done. Loss: 0.4468 lr:0.100000 network_time: 0.0284 +[ Thu Sep 15 22:01:23 2022 ] Eval epoch: 18 +[ Thu Sep 15 22:03:13 2022 ] Mean test loss of 930 batches: 3.5319061279296875. +[ Thu Sep 15 22:03:13 2022 ] Top1: 37.75% +[ Thu Sep 15 22:03:13 2022 ] Top5: 69.26% +[ Thu Sep 15 22:03:14 2022 ] Training epoch: 19 +[ Thu Sep 15 22:04:18 2022 ] Batch(83/162) done. Loss: 0.2785 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:05:15 2022 ] Eval epoch: 19 +[ Thu Sep 15 22:07:05 2022 ] Mean test loss of 930 batches: 2.7543692588806152. +[ Thu Sep 15 22:07:05 2022 ] Top1: 45.21% +[ Thu Sep 15 22:07:06 2022 ] Top5: 76.62% +[ Thu Sep 15 22:07:06 2022 ] Training epoch: 20 +[ Thu Sep 15 22:07:25 2022 ] Batch(21/162) done. Loss: 0.7290 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 22:08:38 2022 ] Batch(121/162) done. Loss: 0.6042 lr:0.100000 network_time: 0.0347 +[ Thu Sep 15 22:09:07 2022 ] Eval epoch: 20 +[ Thu Sep 15 22:10:57 2022 ] Mean test loss of 930 batches: 3.4957778453826904. +[ Thu Sep 15 22:10:57 2022 ] Top1: 38.53% +[ Thu Sep 15 22:10:58 2022 ] Top5: 70.23% +[ Thu Sep 15 22:10:58 2022 ] Training epoch: 21 +[ Thu Sep 15 22:11:45 2022 ] Batch(59/162) done. Loss: 0.5559 lr:0.100000 network_time: 0.0251 +[ Thu Sep 15 22:12:58 2022 ] Batch(159/162) done. Loss: 0.7466 lr:0.100000 network_time: 0.0319 +[ Thu Sep 15 22:12:59 2022 ] Eval epoch: 21 +[ Thu Sep 15 22:14:48 2022 ] Mean test loss of 930 batches: 2.8289051055908203. +[ Thu Sep 15 22:14:49 2022 ] Top1: 45.09% +[ Thu Sep 15 22:14:49 2022 ] Top5: 74.70% +[ Thu Sep 15 22:14:50 2022 ] Training epoch: 22 +[ Thu Sep 15 22:16:04 2022 ] Batch(97/162) done. Loss: 0.4766 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 22:16:51 2022 ] Eval epoch: 22 +[ Thu Sep 15 22:18:44 2022 ] Mean test loss of 930 batches: 3.316596746444702. +[ Thu Sep 15 22:18:45 2022 ] Top1: 43.31% +[ Thu Sep 15 22:18:45 2022 ] Top5: 72.96% +[ Thu Sep 15 22:18:46 2022 ] Training epoch: 23 +[ Thu Sep 15 22:19:19 2022 ] Batch(35/162) done. Loss: 0.3969 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 22:20:42 2022 ] Batch(135/162) done. Loss: 0.2837 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 22:21:04 2022 ] Eval epoch: 23 +[ Thu Sep 15 22:23:08 2022 ] Mean test loss of 930 batches: 3.197326421737671. +[ Thu Sep 15 22:23:08 2022 ] Top1: 42.93% +[ Thu Sep 15 22:23:09 2022 ] Top5: 71.72% +[ Thu Sep 15 22:23:09 2022 ] Training epoch: 24 +[ Thu Sep 15 22:24:14 2022 ] Batch(73/162) done. Loss: 0.2647 lr:0.100000 network_time: 0.0311 +[ Thu Sep 15 22:25:28 2022 ] Eval epoch: 24 +[ Thu Sep 15 22:27:31 2022 ] Mean test loss of 930 batches: 3.7576260566711426. +[ Thu Sep 15 22:27:32 2022 ] Top1: 33.71% +[ Thu Sep 15 22:27:32 2022 ] Top5: 64.81% +[ Thu Sep 15 22:27:32 2022 ] Training epoch: 25 +[ Thu Sep 15 22:27:46 2022 ] Batch(11/162) done. Loss: 0.2705 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 22:29:10 2022 ] Batch(111/162) done. Loss: 0.6018 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 22:29:52 2022 ] Eval epoch: 25 +[ Thu Sep 15 22:31:55 2022 ] Mean test loss of 930 batches: 3.1339001655578613. +[ Thu Sep 15 22:31:55 2022 ] Top1: 43.27% +[ Thu Sep 15 22:31:56 2022 ] Top5: 72.67% +[ Thu Sep 15 22:31:56 2022 ] Training epoch: 26 +[ Thu Sep 15 22:32:41 2022 ] Batch(49/162) done. Loss: 0.3780 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 22:34:05 2022 ] Batch(149/162) done. Loss: 0.4230 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:34:15 2022 ] Eval epoch: 26 +[ Thu Sep 15 22:36:19 2022 ] Mean test loss of 930 batches: 2.8259902000427246. +[ Thu Sep 15 22:36:20 2022 ] Top1: 47.37% +[ Thu Sep 15 22:36:20 2022 ] Top5: 77.19% +[ Thu Sep 15 22:36:20 2022 ] Training epoch: 27 +[ Thu Sep 15 22:37:37 2022 ] Batch(87/162) done. Loss: 0.1814 lr:0.100000 network_time: 0.0356 +[ Thu Sep 15 22:38:40 2022 ] Eval epoch: 27 +[ Thu Sep 15 22:40:43 2022 ] Mean test loss of 930 batches: 3.3106560707092285. +[ Thu Sep 15 22:40:43 2022 ] Top1: 41.29% +[ Thu Sep 15 22:40:44 2022 ] Top5: 71.07% +[ Thu Sep 15 22:40:44 2022 ] Training epoch: 28 +[ Thu Sep 15 22:41:09 2022 ] Batch(25/162) done. Loss: 0.3237 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 22:42:29 2022 ] Batch(125/162) done. Loss: 0.4479 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 22:42:56 2022 ] Eval epoch: 28 +[ Thu Sep 15 22:44:46 2022 ] Mean test loss of 930 batches: 3.0452048778533936. +[ Thu Sep 15 22:44:47 2022 ] Top1: 44.39% +[ Thu Sep 15 22:44:47 2022 ] Top5: 73.71% +[ Thu Sep 15 22:44:47 2022 ] Training epoch: 29 +[ Thu Sep 15 22:45:37 2022 ] Batch(63/162) done. Loss: 0.4210 lr:0.100000 network_time: 0.0295 +[ Thu Sep 15 22:46:49 2022 ] Eval epoch: 29 +[ Thu Sep 15 22:48:38 2022 ] Mean test loss of 930 batches: 3.6561460494995117. +[ Thu Sep 15 22:48:38 2022 ] Top1: 42.24% +[ Thu Sep 15 22:48:39 2022 ] Top5: 71.71% +[ Thu Sep 15 22:48:39 2022 ] Training epoch: 30 +[ Thu Sep 15 22:48:44 2022 ] Batch(1/162) done. Loss: 0.1475 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:49:56 2022 ] Batch(101/162) done. Loss: 0.3633 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 22:50:40 2022 ] Eval epoch: 30 +[ Thu Sep 15 22:52:30 2022 ] Mean test loss of 930 batches: 3.4392001628875732. +[ Thu Sep 15 22:52:30 2022 ] Top1: 38.44% +[ Thu Sep 15 22:52:31 2022 ] Top5: 68.96% +[ Thu Sep 15 22:52:31 2022 ] Training epoch: 31 +[ Thu Sep 15 22:53:03 2022 ] Batch(39/162) done. Loss: 0.2928 lr:0.100000 network_time: 0.0318 +[ Thu Sep 15 22:54:16 2022 ] Batch(139/162) done. Loss: 0.3484 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 22:54:32 2022 ] Eval epoch: 31 +[ Thu Sep 15 22:56:22 2022 ] Mean test loss of 930 batches: 3.405987024307251. +[ Thu Sep 15 22:56:23 2022 ] Top1: 44.97% +[ Thu Sep 15 22:56:23 2022 ] Top5: 74.00% +[ Thu Sep 15 22:56:24 2022 ] Training epoch: 32 +[ Thu Sep 15 22:57:23 2022 ] Batch(77/162) done. Loss: 0.3654 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 22:58:24 2022 ] Eval epoch: 32 +[ Thu Sep 15 23:00:14 2022 ] Mean test loss of 930 batches: 3.289426326751709. +[ Thu Sep 15 23:00:14 2022 ] Top1: 44.54% +[ Thu Sep 15 23:00:15 2022 ] Top5: 72.69% +[ Thu Sep 15 23:00:15 2022 ] Training epoch: 33 +[ Thu Sep 15 23:00:30 2022 ] Batch(15/162) done. Loss: 0.2446 lr:0.100000 network_time: 0.0309 +[ Thu Sep 15 23:01:43 2022 ] Batch(115/162) done. Loss: 0.6636 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 23:02:16 2022 ] Eval epoch: 33 +[ Thu Sep 15 23:04:06 2022 ] Mean test loss of 930 batches: 3.5604958534240723. +[ Thu Sep 15 23:04:06 2022 ] Top1: 43.77% +[ Thu Sep 15 23:04:07 2022 ] Top5: 73.33% +[ Thu Sep 15 23:04:07 2022 ] Training epoch: 34 +[ Thu Sep 15 23:04:49 2022 ] Batch(53/162) done. Loss: 0.2863 lr:0.100000 network_time: 0.0322 +[ Thu Sep 15 23:06:02 2022 ] Batch(153/162) done. Loss: 0.3649 lr:0.100000 network_time: 0.0314 +[ Thu Sep 15 23:06:08 2022 ] Eval epoch: 34 +[ Thu Sep 15 23:07:57 2022 ] Mean test loss of 930 batches: 3.1730799674987793. +[ Thu Sep 15 23:07:58 2022 ] Top1: 45.38% +[ Thu Sep 15 23:07:58 2022 ] Top5: 75.41% +[ Thu Sep 15 23:07:58 2022 ] Training epoch: 35 +[ Thu Sep 15 23:09:08 2022 ] Batch(91/162) done. Loss: 0.2954 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 23:09:59 2022 ] Eval epoch: 35 +[ Thu Sep 15 23:11:49 2022 ] Mean test loss of 930 batches: 4.07279634475708. +[ Thu Sep 15 23:11:49 2022 ] Top1: 35.21% +[ Thu Sep 15 23:11:50 2022 ] Top5: 66.58% +[ Thu Sep 15 23:11:50 2022 ] Training epoch: 36 +[ Thu Sep 15 23:12:15 2022 ] Batch(29/162) done. Loss: 0.2734 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 23:13:28 2022 ] Batch(129/162) done. Loss: 0.2994 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:13:51 2022 ] Eval epoch: 36 +[ Thu Sep 15 23:15:41 2022 ] Mean test loss of 930 batches: 3.6304514408111572. +[ Thu Sep 15 23:15:41 2022 ] Top1: 45.32% +[ Thu Sep 15 23:15:42 2022 ] Top5: 75.04% +[ Thu Sep 15 23:15:42 2022 ] Training epoch: 37 +[ Thu Sep 15 23:16:35 2022 ] Batch(67/162) done. Loss: 0.2511 lr:0.100000 network_time: 0.0298 +[ Thu Sep 15 23:17:43 2022 ] Eval epoch: 37 +[ Thu Sep 15 23:19:33 2022 ] Mean test loss of 930 batches: 3.6294195652008057. +[ Thu Sep 15 23:19:33 2022 ] Top1: 44.20% +[ Thu Sep 15 23:19:33 2022 ] Top5: 73.36% +[ Thu Sep 15 23:19:34 2022 ] Training epoch: 38 +[ Thu Sep 15 23:19:41 2022 ] Batch(5/162) done. Loss: 0.2162 lr:0.100000 network_time: 0.0284 +[ Thu Sep 15 23:20:54 2022 ] Batch(105/162) done. Loss: 0.1641 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 23:21:35 2022 ] Eval epoch: 38 +[ Thu Sep 15 23:23:24 2022 ] Mean test loss of 930 batches: 3.811417818069458. +[ Thu Sep 15 23:23:25 2022 ] Top1: 46.29% +[ Thu Sep 15 23:23:25 2022 ] Top5: 74.94% +[ Thu Sep 15 23:23:26 2022 ] Training epoch: 39 +[ Thu Sep 15 23:24:01 2022 ] Batch(43/162) done. Loss: 0.1922 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 23:25:13 2022 ] Batch(143/162) done. Loss: 0.4354 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 23:25:27 2022 ] Eval epoch: 39 +[ Thu Sep 15 23:27:16 2022 ] Mean test loss of 930 batches: 4.040771484375. +[ Thu Sep 15 23:27:17 2022 ] Top1: 39.84% +[ Thu Sep 15 23:27:17 2022 ] Top5: 69.56% +[ Thu Sep 15 23:27:18 2022 ] Training epoch: 40 +[ Thu Sep 15 23:28:20 2022 ] Batch(81/162) done. Loss: 0.4334 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 23:29:19 2022 ] Eval epoch: 40 +[ Thu Sep 15 23:31:10 2022 ] Mean test loss of 930 batches: 3.5953073501586914. +[ Thu Sep 15 23:31:10 2022 ] Top1: 41.93% +[ Thu Sep 15 23:31:11 2022 ] Top5: 72.07% +[ Thu Sep 15 23:31:11 2022 ] Training epoch: 41 +[ Thu Sep 15 23:31:29 2022 ] Batch(19/162) done. Loss: 0.1604 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 23:32:46 2022 ] Batch(119/162) done. Loss: 0.3937 lr:0.100000 network_time: 0.0284 +[ Thu Sep 15 23:33:17 2022 ] Eval epoch: 41 +[ Thu Sep 15 23:35:09 2022 ] Mean test loss of 930 batches: 3.520280122756958. +[ Thu Sep 15 23:35:09 2022 ] Top1: 47.59% +[ Thu Sep 15 23:35:10 2022 ] Top5: 75.64% +[ Thu Sep 15 23:35:10 2022 ] Training epoch: 42 +[ Thu Sep 15 23:35:57 2022 ] Batch(57/162) done. Loss: 0.1668 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 23:37:09 2022 ] Batch(157/162) done. Loss: 0.2388 lr:0.100000 network_time: 0.0286 +[ Thu Sep 15 23:37:12 2022 ] Eval epoch: 42 +[ Thu Sep 15 23:39:02 2022 ] Mean test loss of 930 batches: 4.183106899261475. +[ Thu Sep 15 23:39:03 2022 ] Top1: 44.28% +[ Thu Sep 15 23:39:03 2022 ] Top5: 72.06% +[ Thu Sep 15 23:39:03 2022 ] Training epoch: 43 +[ Thu Sep 15 23:40:17 2022 ] Batch(95/162) done. Loss: 0.2275 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 23:41:05 2022 ] Eval epoch: 43 +[ Thu Sep 15 23:42:55 2022 ] Mean test loss of 930 batches: 3.718573570251465. +[ Thu Sep 15 23:42:55 2022 ] Top1: 45.26% +[ Thu Sep 15 23:42:56 2022 ] Top5: 74.70% +[ Thu Sep 15 23:42:56 2022 ] Training epoch: 44 +[ Thu Sep 15 23:43:24 2022 ] Batch(33/162) done. Loss: 0.1599 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 23:44:36 2022 ] Batch(133/162) done. Loss: 0.3298 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 23:44:57 2022 ] Eval epoch: 44 +[ Thu Sep 15 23:46:47 2022 ] Mean test loss of 930 batches: 3.5274219512939453. +[ Thu Sep 15 23:46:47 2022 ] Top1: 45.58% +[ Thu Sep 15 23:46:48 2022 ] Top5: 74.79% +[ Thu Sep 15 23:46:48 2022 ] Training epoch: 45 +[ Thu Sep 15 23:47:43 2022 ] Batch(71/162) done. Loss: 0.1845 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:48:49 2022 ] Eval epoch: 45 +[ Thu Sep 15 23:50:39 2022 ] Mean test loss of 930 batches: 3.1531949043273926. +[ Thu Sep 15 23:50:39 2022 ] Top1: 46.84% +[ Thu Sep 15 23:50:39 2022 ] Top5: 76.80% +[ Thu Sep 15 23:50:40 2022 ] Training epoch: 46 +[ Thu Sep 15 23:50:50 2022 ] Batch(9/162) done. Loss: 0.1575 lr:0.100000 network_time: 0.0318 +[ Thu Sep 15 23:52:03 2022 ] Batch(109/162) done. Loss: 0.1927 lr:0.100000 network_time: 0.0316 +[ Thu Sep 15 23:52:41 2022 ] Eval epoch: 46 +[ Thu Sep 15 23:54:31 2022 ] Mean test loss of 930 batches: 3.124375104904175. +[ Thu Sep 15 23:54:32 2022 ] Top1: 48.06% +[ Thu Sep 15 23:54:32 2022 ] Top5: 75.82% +[ Thu Sep 15 23:54:32 2022 ] Training epoch: 47 +[ Thu Sep 15 23:55:10 2022 ] Batch(47/162) done. Loss: 0.1494 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 23:56:23 2022 ] Batch(147/162) done. Loss: 0.1077 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 23:56:33 2022 ] Eval epoch: 47 +[ Thu Sep 15 23:58:23 2022 ] Mean test loss of 930 batches: 3.3888652324676514. +[ Thu Sep 15 23:58:23 2022 ] Top1: 46.95% +[ Thu Sep 15 23:58:24 2022 ] Top5: 75.08% +[ Thu Sep 15 23:58:24 2022 ] Training epoch: 48 +[ Thu Sep 15 23:59:30 2022 ] Batch(85/162) done. Loss: 0.3500 lr:0.100000 network_time: 0.0327 +[ Fri Sep 16 00:00:25 2022 ] Eval epoch: 48 +[ Fri Sep 16 00:02:15 2022 ] Mean test loss of 930 batches: 3.2960240840911865. +[ Fri Sep 16 00:02:15 2022 ] Top1: 45.77% +[ Fri Sep 16 00:02:16 2022 ] Top5: 74.82% +[ Fri Sep 16 00:02:16 2022 ] Training epoch: 49 +[ Fri Sep 16 00:02:37 2022 ] Batch(23/162) done. Loss: 0.1806 lr:0.100000 network_time: 0.0323 +[ Fri Sep 16 00:03:49 2022 ] Batch(123/162) done. Loss: 0.2071 lr:0.100000 network_time: 0.0339 +[ Fri Sep 16 00:04:17 2022 ] Eval epoch: 49 +[ Fri Sep 16 00:06:07 2022 ] Mean test loss of 930 batches: 3.379523754119873. +[ Fri Sep 16 00:06:07 2022 ] Top1: 47.97% +[ Fri Sep 16 00:06:08 2022 ] Top5: 76.24% +[ Fri Sep 16 00:06:08 2022 ] Training epoch: 50 +[ Fri Sep 16 00:06:56 2022 ] Batch(61/162) done. Loss: 0.1720 lr:0.100000 network_time: 0.0305 +[ Fri Sep 16 00:08:09 2022 ] Batch(161/162) done. Loss: 0.1912 lr:0.100000 network_time: 0.0259 +[ Fri Sep 16 00:08:09 2022 ] Eval epoch: 50 +[ Fri Sep 16 00:09:58 2022 ] Mean test loss of 930 batches: 3.817385673522949. +[ Fri Sep 16 00:09:59 2022 ] Top1: 45.53% +[ Fri Sep 16 00:09:59 2022 ] Top5: 74.42% +[ Fri Sep 16 00:09:59 2022 ] Training epoch: 51 +[ Fri Sep 16 00:11:15 2022 ] Batch(99/162) done. Loss: 0.3319 lr:0.100000 network_time: 0.0281 +[ Fri Sep 16 00:12:00 2022 ] Eval epoch: 51 +[ Fri Sep 16 00:13:50 2022 ] Mean test loss of 930 batches: 3.0376579761505127. +[ Fri Sep 16 00:13:50 2022 ] Top1: 46.97% +[ Fri Sep 16 00:13:51 2022 ] Top5: 77.13% +[ Fri Sep 16 00:13:51 2022 ] Training epoch: 52 +[ Fri Sep 16 00:14:22 2022 ] Batch(37/162) done. Loss: 0.1760 lr:0.100000 network_time: 0.0277 +[ Fri Sep 16 00:15:34 2022 ] Batch(137/162) done. Loss: 0.2859 lr:0.100000 network_time: 0.0304 +[ Fri Sep 16 00:15:52 2022 ] Eval epoch: 52 +[ Fri Sep 16 00:17:42 2022 ] Mean test loss of 930 batches: 2.975449800491333. +[ Fri Sep 16 00:17:42 2022 ] Top1: 48.78% +[ Fri Sep 16 00:17:43 2022 ] Top5: 76.65% +[ Fri Sep 16 00:17:43 2022 ] Training epoch: 53 +[ Fri Sep 16 00:18:41 2022 ] Batch(75/162) done. Loss: 0.0967 lr:0.100000 network_time: 0.0260 +[ Fri Sep 16 00:19:44 2022 ] Eval epoch: 53 +[ Fri Sep 16 00:21:34 2022 ] Mean test loss of 930 batches: 3.7247111797332764. +[ Fri Sep 16 00:21:34 2022 ] Top1: 45.97% +[ Fri Sep 16 00:21:34 2022 ] Top5: 74.71% +[ Fri Sep 16 00:21:35 2022 ] Training epoch: 54 +[ Fri Sep 16 00:21:48 2022 ] Batch(13/162) done. Loss: 0.1032 lr:0.100000 network_time: 0.0273 +[ Fri Sep 16 00:23:01 2022 ] Batch(113/162) done. Loss: 0.2584 lr:0.100000 network_time: 0.0263 +[ Fri Sep 16 00:23:36 2022 ] Eval epoch: 54 +[ Fri Sep 16 00:25:26 2022 ] Mean test loss of 930 batches: 3.4403839111328125. +[ Fri Sep 16 00:25:26 2022 ] Top1: 46.47% +[ Fri Sep 16 00:25:26 2022 ] Top5: 74.89% +[ Fri Sep 16 00:25:27 2022 ] Training epoch: 55 +[ Fri Sep 16 00:26:07 2022 ] Batch(51/162) done. Loss: 0.1137 lr:0.100000 network_time: 0.0272 +[ Fri Sep 16 00:27:20 2022 ] Batch(151/162) done. Loss: 0.1669 lr:0.100000 network_time: 0.0255 +[ Fri Sep 16 00:27:28 2022 ] Eval epoch: 55 +[ Fri Sep 16 00:29:17 2022 ] Mean test loss of 930 batches: 3.3242578506469727. +[ Fri Sep 16 00:29:17 2022 ] Top1: 44.78% +[ Fri Sep 16 00:29:18 2022 ] Top5: 73.90% +[ Fri Sep 16 00:29:18 2022 ] Training epoch: 56 +[ Fri Sep 16 00:30:27 2022 ] Batch(89/162) done. Loss: 0.1448 lr:0.100000 network_time: 0.0310 +[ Fri Sep 16 00:31:19 2022 ] Eval epoch: 56 +[ Fri Sep 16 00:33:10 2022 ] Mean test loss of 930 batches: 3.144345283508301. +[ Fri Sep 16 00:33:10 2022 ] Top1: 47.82% +[ Fri Sep 16 00:33:11 2022 ] Top5: 76.10% +[ Fri Sep 16 00:33:11 2022 ] Training epoch: 57 +[ Fri Sep 16 00:33:35 2022 ] Batch(27/162) done. Loss: 0.2348 lr:0.100000 network_time: 0.0274 +[ Fri Sep 16 00:34:47 2022 ] Batch(127/162) done. Loss: 0.1120 lr:0.100000 network_time: 0.0275 +[ Fri Sep 16 00:35:12 2022 ] Eval epoch: 57 +[ Fri Sep 16 00:37:02 2022 ] Mean test loss of 930 batches: 2.920133352279663. +[ Fri Sep 16 00:37:03 2022 ] Top1: 48.81% +[ Fri Sep 16 00:37:03 2022 ] Top5: 77.31% +[ Fri Sep 16 00:37:03 2022 ] Training epoch: 58 +[ Fri Sep 16 00:37:54 2022 ] Batch(65/162) done. Loss: 0.3310 lr:0.100000 network_time: 0.0559 +[ Fri Sep 16 00:39:05 2022 ] Eval epoch: 58 +[ Fri Sep 16 00:40:55 2022 ] Mean test loss of 930 batches: 3.7737793922424316. +[ Fri Sep 16 00:40:55 2022 ] Top1: 43.11% +[ Fri Sep 16 00:40:56 2022 ] Top5: 70.58% +[ Fri Sep 16 00:40:56 2022 ] Training epoch: 59 +[ Fri Sep 16 00:41:02 2022 ] Batch(3/162) done. Loss: 0.0720 lr:0.100000 network_time: 0.0316 +[ Fri Sep 16 00:42:15 2022 ] Batch(103/162) done. Loss: 0.2959 lr:0.100000 network_time: 0.0284 +[ Fri Sep 16 00:42:57 2022 ] Eval epoch: 59 +[ Fri Sep 16 00:44:47 2022 ] Mean test loss of 930 batches: 3.2564449310302734. +[ Fri Sep 16 00:44:48 2022 ] Top1: 48.88% +[ Fri Sep 16 00:44:48 2022 ] Top5: 77.70% +[ Fri Sep 16 00:44:48 2022 ] Training epoch: 60 +[ Fri Sep 16 00:45:22 2022 ] Batch(41/162) done. Loss: 0.1657 lr:0.100000 network_time: 0.0279 +[ Fri Sep 16 00:46:35 2022 ] Batch(141/162) done. Loss: 0.1678 lr:0.100000 network_time: 0.0401 +[ Fri Sep 16 00:46:50 2022 ] Eval epoch: 60 +[ Fri Sep 16 00:48:40 2022 ] Mean test loss of 930 batches: 3.528254747390747. +[ Fri Sep 16 00:48:40 2022 ] Top1: 48.81% +[ Fri Sep 16 00:48:41 2022 ] Top5: 76.94% +[ Fri Sep 16 00:48:41 2022 ] Training epoch: 61 +[ Fri Sep 16 00:49:42 2022 ] Batch(79/162) done. Loss: 0.1274 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 00:50:42 2022 ] Eval epoch: 61 +[ Fri Sep 16 00:52:32 2022 ] Mean test loss of 930 batches: 3.0551934242248535. +[ Fri Sep 16 00:52:32 2022 ] Top1: 52.95% +[ Fri Sep 16 00:52:33 2022 ] Top5: 80.04% +[ Fri Sep 16 00:52:33 2022 ] Training epoch: 62 +[ Fri Sep 16 00:52:49 2022 ] Batch(17/162) done. Loss: 0.0102 lr:0.010000 network_time: 0.0322 +[ Fri Sep 16 00:54:02 2022 ] Batch(117/162) done. Loss: 0.0097 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 00:54:34 2022 ] Eval epoch: 62 +[ Fri Sep 16 00:56:24 2022 ] Mean test loss of 930 batches: 2.7636544704437256. +[ Fri Sep 16 00:56:24 2022 ] Top1: 54.60% +[ Fri Sep 16 00:56:25 2022 ] Top5: 80.99% +[ Fri Sep 16 00:56:25 2022 ] Training epoch: 63 +[ Fri Sep 16 00:57:09 2022 ] Batch(55/162) done. Loss: 0.0137 lr:0.010000 network_time: 0.0319 +[ Fri Sep 16 00:58:22 2022 ] Batch(155/162) done. Loss: 0.0272 lr:0.010000 network_time: 0.0269 +[ Fri Sep 16 00:58:26 2022 ] Eval epoch: 63 +[ Fri Sep 16 01:00:16 2022 ] Mean test loss of 930 batches: 2.9170336723327637. +[ Fri Sep 16 01:00:17 2022 ] Top1: 54.68% +[ Fri Sep 16 01:00:17 2022 ] Top5: 81.01% +[ Fri Sep 16 01:00:18 2022 ] Training epoch: 64 +[ Fri Sep 16 01:01:29 2022 ] Batch(93/162) done. Loss: 0.0298 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 01:02:19 2022 ] Eval epoch: 64 +[ Fri Sep 16 01:04:09 2022 ] Mean test loss of 930 batches: 2.8463385105133057. +[ Fri Sep 16 01:04:09 2022 ] Top1: 53.52% +[ Fri Sep 16 01:04:09 2022 ] Top5: 80.42% +[ Fri Sep 16 01:04:10 2022 ] Training epoch: 65 +[ Fri Sep 16 01:04:36 2022 ] Batch(31/162) done. Loss: 0.0082 lr:0.010000 network_time: 0.0256 +[ Fri Sep 16 01:05:49 2022 ] Batch(131/162) done. Loss: 0.0156 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:06:11 2022 ] Eval epoch: 65 +[ Fri Sep 16 01:08:01 2022 ] Mean test loss of 930 batches: 2.7530834674835205. +[ Fri Sep 16 01:08:01 2022 ] Top1: 54.70% +[ Fri Sep 16 01:08:02 2022 ] Top5: 81.44% +[ Fri Sep 16 01:08:02 2022 ] Training epoch: 66 +[ Fri Sep 16 01:08:56 2022 ] Batch(69/162) done. Loss: 0.0330 lr:0.010000 network_time: 0.0282 +[ Fri Sep 16 01:10:03 2022 ] Eval epoch: 66 +[ Fri Sep 16 01:11:52 2022 ] Mean test loss of 930 batches: 2.835975408554077. +[ Fri Sep 16 01:11:53 2022 ] Top1: 54.82% +[ Fri Sep 16 01:11:53 2022 ] Top5: 81.14% +[ Fri Sep 16 01:11:53 2022 ] Training epoch: 67 +[ Fri Sep 16 01:12:02 2022 ] Batch(7/162) done. Loss: 0.0208 lr:0.010000 network_time: 0.0344 +[ Fri Sep 16 01:13:15 2022 ] Batch(107/162) done. Loss: 0.0075 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:13:55 2022 ] Eval epoch: 67 +[ Fri Sep 16 01:15:44 2022 ] Mean test loss of 930 batches: 2.7904841899871826. +[ Fri Sep 16 01:15:45 2022 ] Top1: 54.78% +[ Fri Sep 16 01:15:45 2022 ] Top5: 81.29% +[ Fri Sep 16 01:15:45 2022 ] Training epoch: 68 +[ Fri Sep 16 01:16:22 2022 ] Batch(45/162) done. Loss: 0.0099 lr:0.010000 network_time: 0.0278 +[ Fri Sep 16 01:17:35 2022 ] Batch(145/162) done. Loss: 0.0066 lr:0.010000 network_time: 0.0277 +[ Fri Sep 16 01:17:47 2022 ] Eval epoch: 68 +[ Fri Sep 16 01:19:36 2022 ] Mean test loss of 930 batches: 2.7633280754089355. +[ Fri Sep 16 01:19:37 2022 ] Top1: 54.63% +[ Fri Sep 16 01:19:37 2022 ] Top5: 81.23% +[ Fri Sep 16 01:19:38 2022 ] Training epoch: 69 +[ Fri Sep 16 01:20:42 2022 ] Batch(83/162) done. Loss: 0.0115 lr:0.010000 network_time: 0.0280 +[ Fri Sep 16 01:21:39 2022 ] Eval epoch: 69 +[ Fri Sep 16 01:23:29 2022 ] Mean test loss of 930 batches: 2.717859983444214. +[ Fri Sep 16 01:23:29 2022 ] Top1: 54.13% +[ Fri Sep 16 01:23:30 2022 ] Top5: 80.91% +[ Fri Sep 16 01:23:30 2022 ] Training epoch: 70 +[ Fri Sep 16 01:23:49 2022 ] Batch(21/162) done. Loss: 0.0243 lr:0.010000 network_time: 0.0261 +[ Fri Sep 16 01:25:01 2022 ] Batch(121/162) done. Loss: 0.0246 lr:0.010000 network_time: 0.0312 +[ Fri Sep 16 01:25:31 2022 ] Eval epoch: 70 +[ Fri Sep 16 01:27:21 2022 ] Mean test loss of 930 batches: 2.8249881267547607. +[ Fri Sep 16 01:27:21 2022 ] Top1: 55.01% +[ Fri Sep 16 01:27:22 2022 ] Top5: 81.17% +[ Fri Sep 16 01:27:22 2022 ] Training epoch: 71 +[ Fri Sep 16 01:28:09 2022 ] Batch(59/162) done. Loss: 0.0115 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:29:21 2022 ] Batch(159/162) done. Loss: 0.0081 lr:0.010000 network_time: 0.0281 +[ Fri Sep 16 01:29:23 2022 ] Eval epoch: 71 +[ Fri Sep 16 01:31:13 2022 ] Mean test loss of 930 batches: 2.6873366832733154. +[ Fri Sep 16 01:31:14 2022 ] Top1: 54.78% +[ Fri Sep 16 01:31:14 2022 ] Top5: 81.29% +[ Fri Sep 16 01:31:14 2022 ] Training epoch: 72 +[ Fri Sep 16 01:32:28 2022 ] Batch(97/162) done. Loss: 0.0124 lr:0.010000 network_time: 0.0311 +[ Fri Sep 16 01:33:15 2022 ] Eval epoch: 72 +[ Fri Sep 16 01:35:05 2022 ] Mean test loss of 930 batches: 2.838772773742676. +[ Fri Sep 16 01:35:06 2022 ] Top1: 55.18% +[ Fri Sep 16 01:35:06 2022 ] Top5: 81.33% +[ Fri Sep 16 01:35:06 2022 ] Training epoch: 73 +[ Fri Sep 16 01:35:35 2022 ] Batch(35/162) done. Loss: 0.0055 lr:0.010000 network_time: 0.0290 +[ Fri Sep 16 01:36:48 2022 ] Batch(135/162) done. Loss: 0.0075 lr:0.010000 network_time: 0.0303 +[ Fri Sep 16 01:37:07 2022 ] Eval epoch: 73 +[ Fri Sep 16 01:38:57 2022 ] Mean test loss of 930 batches: 2.8878660202026367. +[ Fri Sep 16 01:38:58 2022 ] Top1: 55.20% +[ Fri Sep 16 01:38:58 2022 ] Top5: 81.34% +[ Fri Sep 16 01:38:58 2022 ] Training epoch: 74 +[ Fri Sep 16 01:39:55 2022 ] Batch(73/162) done. Loss: 0.0124 lr:0.010000 network_time: 0.0287 +[ Fri Sep 16 01:40:59 2022 ] Eval epoch: 74 +[ Fri Sep 16 01:42:49 2022 ] Mean test loss of 930 batches: 2.664907455444336. +[ Fri Sep 16 01:42:50 2022 ] Top1: 55.28% +[ Fri Sep 16 01:42:50 2022 ] Top5: 81.36% +[ Fri Sep 16 01:42:51 2022 ] Training epoch: 75 +[ Fri Sep 16 01:43:02 2022 ] Batch(11/162) done. Loss: 0.0043 lr:0.010000 network_time: 0.0273 +[ Fri Sep 16 01:44:15 2022 ] Batch(111/162) done. Loss: 0.0047 lr:0.010000 network_time: 0.0333 +[ Fri Sep 16 01:44:52 2022 ] Eval epoch: 75 +[ Fri Sep 16 01:46:41 2022 ] Mean test loss of 930 batches: 2.6683382987976074. +[ Fri Sep 16 01:46:42 2022 ] Top1: 54.45% +[ Fri Sep 16 01:46:42 2022 ] Top5: 80.98% +[ Fri Sep 16 01:46:42 2022 ] Training epoch: 76 +[ Fri Sep 16 01:47:22 2022 ] Batch(49/162) done. Loss: 0.0093 lr:0.010000 network_time: 0.0279 +[ Fri Sep 16 01:48:35 2022 ] Batch(149/162) done. Loss: 0.0145 lr:0.010000 network_time: 0.0253 +[ Fri Sep 16 01:48:44 2022 ] Eval epoch: 76 +[ Fri Sep 16 01:50:33 2022 ] Mean test loss of 930 batches: 2.8474338054656982. +[ Fri Sep 16 01:50:34 2022 ] Top1: 55.17% +[ Fri Sep 16 01:50:34 2022 ] Top5: 81.17% +[ Fri Sep 16 01:50:34 2022 ] Training epoch: 77 +[ Fri Sep 16 01:51:42 2022 ] Batch(87/162) done. Loss: 0.0102 lr:0.010000 network_time: 0.0264 +[ Fri Sep 16 01:52:36 2022 ] Eval epoch: 77 +[ Fri Sep 16 01:54:25 2022 ] Mean test loss of 930 batches: 2.8973159790039062. +[ Fri Sep 16 01:54:26 2022 ] Top1: 55.34% +[ Fri Sep 16 01:54:26 2022 ] Top5: 81.56% +[ Fri Sep 16 01:54:26 2022 ] Training epoch: 78 +[ Fri Sep 16 01:54:48 2022 ] Batch(25/162) done. Loss: 0.0102 lr:0.010000 network_time: 0.0296 +[ Fri Sep 16 01:56:01 2022 ] Batch(125/162) done. Loss: 0.0087 lr:0.010000 network_time: 0.0272 +[ Fri Sep 16 01:56:28 2022 ] Eval epoch: 78 +[ Fri Sep 16 01:58:17 2022 ] Mean test loss of 930 batches: 2.6789896488189697. +[ Fri Sep 16 01:58:18 2022 ] Top1: 54.80% +[ Fri Sep 16 01:58:18 2022 ] Top5: 81.20% +[ Fri Sep 16 01:58:18 2022 ] Training epoch: 79 +[ Fri Sep 16 01:59:08 2022 ] Batch(63/162) done. Loss: 0.0068 lr:0.010000 network_time: 0.0500 +[ Fri Sep 16 02:00:19 2022 ] Eval epoch: 79 +[ Fri Sep 16 02:02:09 2022 ] Mean test loss of 930 batches: 2.7858283519744873. +[ Fri Sep 16 02:02:09 2022 ] Top1: 54.91% +[ Fri Sep 16 02:02:10 2022 ] Top5: 81.21% +[ Fri Sep 16 02:02:10 2022 ] Training epoch: 80 +[ Fri Sep 16 02:02:15 2022 ] Batch(1/162) done. Loss: 0.0035 lr:0.010000 network_time: 0.0303 +[ Fri Sep 16 02:03:27 2022 ] Batch(101/162) done. Loss: 0.0141 lr:0.010000 network_time: 0.0399 +[ Fri Sep 16 02:04:11 2022 ] Eval epoch: 80 +[ Fri Sep 16 02:06:01 2022 ] Mean test loss of 930 batches: 2.8534252643585205. +[ Fri Sep 16 02:06:02 2022 ] Top1: 55.02% +[ Fri Sep 16 02:06:02 2022 ] Top5: 81.41% +[ Fri Sep 16 02:06:02 2022 ] Training epoch: 81 +[ Fri Sep 16 02:06:34 2022 ] Batch(39/162) done. Loss: 0.0035 lr:0.001000 network_time: 0.0254 +[ Fri Sep 16 02:07:47 2022 ] Batch(139/162) done. Loss: 0.0039 lr:0.001000 network_time: 0.0233 +[ Fri Sep 16 02:08:03 2022 ] Eval epoch: 81 +[ Fri Sep 16 02:09:52 2022 ] Mean test loss of 930 batches: 2.6803336143493652. +[ Fri Sep 16 02:09:53 2022 ] Top1: 55.20% +[ Fri Sep 16 02:09:53 2022 ] Top5: 81.48% +[ Fri Sep 16 02:09:53 2022 ] Training epoch: 82 +[ Fri Sep 16 02:10:53 2022 ] Batch(77/162) done. Loss: 0.0049 lr:0.001000 network_time: 0.0262 +[ Fri Sep 16 02:11:54 2022 ] Eval epoch: 82 +[ Fri Sep 16 02:13:44 2022 ] Mean test loss of 930 batches: 2.7156152725219727. +[ Fri Sep 16 02:13:44 2022 ] Top1: 55.67% +[ Fri Sep 16 02:13:45 2022 ] Top5: 81.66% +[ Fri Sep 16 02:13:45 2022 ] Training epoch: 83 +[ Fri Sep 16 02:14:00 2022 ] Batch(15/162) done. Loss: 0.0084 lr:0.001000 network_time: 0.0341 +[ Fri Sep 16 02:15:12 2022 ] Batch(115/162) done. Loss: 0.0417 lr:0.001000 network_time: 0.0321 +[ Fri Sep 16 02:15:46 2022 ] Eval epoch: 83 +[ Fri Sep 16 02:17:36 2022 ] Mean test loss of 930 batches: 2.685029983520508. +[ Fri Sep 16 02:17:36 2022 ] Top1: 54.72% +[ Fri Sep 16 02:17:37 2022 ] Top5: 81.19% +[ Fri Sep 16 02:17:37 2022 ] Training epoch: 84 +[ Fri Sep 16 02:18:19 2022 ] Batch(53/162) done. Loss: 0.0095 lr:0.001000 network_time: 0.0280 +[ Fri Sep 16 02:19:32 2022 ] Batch(153/162) done. Loss: 0.0193 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:19:38 2022 ] Eval epoch: 84 +[ Fri Sep 16 02:21:28 2022 ] Mean test loss of 930 batches: 2.905918598175049. +[ Fri Sep 16 02:21:28 2022 ] Top1: 55.23% +[ Fri Sep 16 02:21:29 2022 ] Top5: 81.10% +[ Fri Sep 16 02:21:29 2022 ] Training epoch: 85 +[ Fri Sep 16 02:22:39 2022 ] Batch(91/162) done. Loss: 0.0063 lr:0.001000 network_time: 0.0309 +[ Fri Sep 16 02:23:30 2022 ] Eval epoch: 85 +[ Fri Sep 16 02:25:20 2022 ] Mean test loss of 930 batches: 2.808967351913452. +[ Fri Sep 16 02:25:20 2022 ] Top1: 55.04% +[ Fri Sep 16 02:25:21 2022 ] Top5: 81.41% +[ Fri Sep 16 02:25:21 2022 ] Training epoch: 86 +[ Fri Sep 16 02:25:46 2022 ] Batch(29/162) done. Loss: 0.0053 lr:0.001000 network_time: 0.0268 +[ Fri Sep 16 02:26:59 2022 ] Batch(129/162) done. Loss: 0.0089 lr:0.001000 network_time: 0.0285 +[ Fri Sep 16 02:27:22 2022 ] Eval epoch: 86 +[ Fri Sep 16 02:29:12 2022 ] Mean test loss of 930 batches: 2.6798934936523438. +[ Fri Sep 16 02:29:12 2022 ] Top1: 54.68% +[ Fri Sep 16 02:29:13 2022 ] Top5: 81.19% +[ Fri Sep 16 02:29:13 2022 ] Training epoch: 87 +[ Fri Sep 16 02:30:05 2022 ] Batch(67/162) done. Loss: 0.0089 lr:0.001000 network_time: 0.0313 +[ Fri Sep 16 02:31:14 2022 ] Eval epoch: 87 +[ Fri Sep 16 02:33:04 2022 ] Mean test loss of 930 batches: 2.7120673656463623. +[ Fri Sep 16 02:33:04 2022 ] Top1: 55.48% +[ Fri Sep 16 02:33:04 2022 ] Top5: 81.55% +[ Fri Sep 16 02:33:05 2022 ] Training epoch: 88 +[ Fri Sep 16 02:33:12 2022 ] Batch(5/162) done. Loss: 0.0106 lr:0.001000 network_time: 0.0268 +[ Fri Sep 16 02:34:25 2022 ] Batch(105/162) done. Loss: 0.0082 lr:0.001000 network_time: 0.0269 +[ Fri Sep 16 02:35:06 2022 ] Eval epoch: 88 +[ Fri Sep 16 02:36:55 2022 ] Mean test loss of 930 batches: 2.6429519653320312. +[ Fri Sep 16 02:36:55 2022 ] Top1: 55.39% +[ Fri Sep 16 02:36:56 2022 ] Top5: 81.70% +[ Fri Sep 16 02:36:56 2022 ] Training epoch: 89 +[ Fri Sep 16 02:37:31 2022 ] Batch(43/162) done. Loss: 0.0084 lr:0.001000 network_time: 0.0294 +[ Fri Sep 16 02:38:44 2022 ] Batch(143/162) done. Loss: 0.0078 lr:0.001000 network_time: 0.0392 +[ Fri Sep 16 02:38:57 2022 ] Eval epoch: 89 +[ Fri Sep 16 02:40:47 2022 ] Mean test loss of 930 batches: 2.7270517349243164. +[ Fri Sep 16 02:40:47 2022 ] Top1: 54.51% +[ Fri Sep 16 02:40:48 2022 ] Top5: 81.10% +[ Fri Sep 16 02:40:48 2022 ] Training epoch: 90 +[ Fri Sep 16 02:41:50 2022 ] Batch(81/162) done. Loss: 0.0173 lr:0.001000 network_time: 0.0349 +[ Fri Sep 16 02:42:49 2022 ] Eval epoch: 90 +[ Fri Sep 16 02:44:39 2022 ] Mean test loss of 930 batches: 2.8094165325164795. +[ Fri Sep 16 02:44:39 2022 ] Top1: 54.99% +[ Fri Sep 16 02:44:40 2022 ] Top5: 81.39% +[ Fri Sep 16 02:44:40 2022 ] Training epoch: 91 +[ Fri Sep 16 02:44:57 2022 ] Batch(19/162) done. Loss: 0.0077 lr:0.001000 network_time: 0.0302 +[ Fri Sep 16 02:46:10 2022 ] Batch(119/162) done. Loss: 0.0108 lr:0.001000 network_time: 0.0271 +[ Fri Sep 16 02:46:41 2022 ] Eval epoch: 91 +[ Fri Sep 16 02:48:31 2022 ] Mean test loss of 930 batches: 2.7365074157714844. +[ Fri Sep 16 02:48:31 2022 ] Top1: 55.28% +[ Fri Sep 16 02:48:32 2022 ] Top5: 81.50% +[ Fri Sep 16 02:48:32 2022 ] Training epoch: 92 +[ Fri Sep 16 02:49:17 2022 ] Batch(57/162) done. Loss: 0.0151 lr:0.001000 network_time: 0.0319 +[ Fri Sep 16 02:50:30 2022 ] Batch(157/162) done. Loss: 0.0383 lr:0.001000 network_time: 0.0283 +[ Fri Sep 16 02:50:33 2022 ] Eval epoch: 92 +[ Fri Sep 16 02:52:22 2022 ] Mean test loss of 930 batches: 2.6518261432647705. +[ Fri Sep 16 02:52:23 2022 ] Top1: 54.07% +[ Fri Sep 16 02:52:23 2022 ] Top5: 81.02% +[ Fri Sep 16 02:52:23 2022 ] Training epoch: 93 +[ Fri Sep 16 02:53:36 2022 ] Batch(95/162) done. Loss: 0.0064 lr:0.001000 network_time: 0.0273 +[ Fri Sep 16 02:54:24 2022 ] Eval epoch: 93 +[ Fri Sep 16 02:56:14 2022 ] Mean test loss of 930 batches: 2.8818185329437256. +[ Fri Sep 16 02:56:14 2022 ] Top1: 54.94% +[ Fri Sep 16 02:56:15 2022 ] Top5: 81.20% +[ Fri Sep 16 02:56:15 2022 ] Training epoch: 94 +[ Fri Sep 16 02:56:43 2022 ] Batch(33/162) done. Loss: 0.0079 lr:0.001000 network_time: 0.0273 +[ Fri Sep 16 02:57:55 2022 ] Batch(133/162) done. Loss: 0.0125 lr:0.001000 network_time: 0.0282 +[ Fri Sep 16 02:58:16 2022 ] Eval epoch: 94 +[ Fri Sep 16 03:00:05 2022 ] Mean test loss of 930 batches: 2.7161922454833984. +[ Fri Sep 16 03:00:06 2022 ] Top1: 55.58% +[ Fri Sep 16 03:00:06 2022 ] Top5: 81.59% +[ Fri Sep 16 03:00:06 2022 ] Training epoch: 95 +[ Fri Sep 16 03:01:02 2022 ] Batch(71/162) done. Loss: 0.0110 lr:0.001000 network_time: 0.0322 +[ Fri Sep 16 03:02:07 2022 ] Eval epoch: 95 +[ Fri Sep 16 03:03:57 2022 ] Mean test loss of 930 batches: 2.7197964191436768. +[ Fri Sep 16 03:03:57 2022 ] Top1: 55.39% +[ Fri Sep 16 03:03:58 2022 ] Top5: 81.45% +[ Fri Sep 16 03:03:58 2022 ] Training epoch: 96 +[ Fri Sep 16 03:04:08 2022 ] Batch(9/162) done. Loss: 0.0097 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 03:05:21 2022 ] Batch(109/162) done. Loss: 0.0032 lr:0.001000 network_time: 0.0272 +[ Fri Sep 16 03:05:59 2022 ] Eval epoch: 96 +[ Fri Sep 16 03:07:49 2022 ] Mean test loss of 930 batches: 2.823249578475952. +[ Fri Sep 16 03:07:49 2022 ] Top1: 53.73% +[ Fri Sep 16 03:07:50 2022 ] Top5: 80.80% +[ Fri Sep 16 03:07:50 2022 ] Training epoch: 97 +[ Fri Sep 16 03:08:28 2022 ] Batch(47/162) done. Loss: 0.0206 lr:0.001000 network_time: 0.0288 +[ Fri Sep 16 03:09:41 2022 ] Batch(147/162) done. Loss: 0.0045 lr:0.001000 network_time: 0.0276 +[ Fri Sep 16 03:09:51 2022 ] Eval epoch: 97 +[ Fri Sep 16 03:11:41 2022 ] Mean test loss of 930 batches: 2.73584246635437. +[ Fri Sep 16 03:11:41 2022 ] Top1: 55.20% +[ Fri Sep 16 03:11:41 2022 ] Top5: 81.49% +[ Fri Sep 16 03:11:42 2022 ] Training epoch: 98 +[ Fri Sep 16 03:12:47 2022 ] Batch(85/162) done. Loss: 0.0076 lr:0.001000 network_time: 0.0250 +[ Fri Sep 16 03:13:43 2022 ] Eval epoch: 98 +[ Fri Sep 16 03:15:32 2022 ] Mean test loss of 930 batches: 2.759752035140991. +[ Fri Sep 16 03:15:33 2022 ] Top1: 54.60% +[ Fri Sep 16 03:15:33 2022 ] Top5: 81.10% +[ Fri Sep 16 03:15:33 2022 ] Training epoch: 99 +[ Fri Sep 16 03:15:54 2022 ] Batch(23/162) done. Loss: 0.0042 lr:0.001000 network_time: 0.0276 +[ Fri Sep 16 03:17:06 2022 ] Batch(123/162) done. Loss: 0.0093 lr:0.001000 network_time: 0.0273 +[ Fri Sep 16 03:17:34 2022 ] Eval epoch: 99 +[ Fri Sep 16 03:19:24 2022 ] Mean test loss of 930 batches: 2.6689939498901367. +[ Fri Sep 16 03:19:24 2022 ] Top1: 53.70% +[ Fri Sep 16 03:19:24 2022 ] Top5: 80.66% +[ Fri Sep 16 03:19:25 2022 ] Training epoch: 100 +[ Fri Sep 16 03:20:13 2022 ] Batch(61/162) done. Loss: 0.0058 lr:0.001000 network_time: 0.0324 +[ Fri Sep 16 03:21:25 2022 ] Batch(161/162) done. Loss: 0.0059 lr:0.001000 network_time: 0.0423 +[ Fri Sep 16 03:21:26 2022 ] Eval epoch: 100 +[ Fri Sep 16 03:23:15 2022 ] Mean test loss of 930 batches: 2.663466691970825. +[ Fri Sep 16 03:23:15 2022 ] Top1: 54.84% +[ Fri Sep 16 03:23:16 2022 ] Top5: 81.30% +[ Fri Sep 16 03:23:16 2022 ] Training epoch: 101 +[ Fri Sep 16 03:24:31 2022 ] Batch(99/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0320 +[ Fri Sep 16 03:25:17 2022 ] Eval epoch: 101 +[ Fri Sep 16 03:27:06 2022 ] Mean test loss of 930 batches: 2.7768349647521973. +[ Fri Sep 16 03:27:07 2022 ] Top1: 55.20% +[ Fri Sep 16 03:27:07 2022 ] Top5: 81.58% +[ Fri Sep 16 03:27:07 2022 ] Training epoch: 102 +[ Fri Sep 16 03:27:38 2022 ] Batch(37/162) done. Loss: 0.0066 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 03:28:51 2022 ] Batch(137/162) done. Loss: 0.0044 lr:0.000100 network_time: 0.0440 +[ Fri Sep 16 03:29:08 2022 ] Eval epoch: 102 +[ Fri Sep 16 03:30:58 2022 ] Mean test loss of 930 batches: 2.6662395000457764. +[ Fri Sep 16 03:30:58 2022 ] Top1: 54.99% +[ Fri Sep 16 03:30:58 2022 ] Top5: 81.35% +[ Fri Sep 16 03:30:59 2022 ] Training epoch: 103 +[ Fri Sep 16 03:31:57 2022 ] Batch(75/162) done. Loss: 0.0094 lr:0.000100 network_time: 0.0336 +[ Fri Sep 16 03:32:59 2022 ] Eval epoch: 103 +[ Fri Sep 16 03:34:49 2022 ] Mean test loss of 930 batches: 2.8566136360168457. +[ Fri Sep 16 03:34:50 2022 ] Top1: 55.30% +[ Fri Sep 16 03:34:50 2022 ] Top5: 81.46% +[ Fri Sep 16 03:34:50 2022 ] Training epoch: 104 +[ Fri Sep 16 03:35:04 2022 ] Batch(13/162) done. Loss: 0.0033 lr:0.000100 network_time: 0.0313 +[ Fri Sep 16 03:36:16 2022 ] Batch(113/162) done. Loss: 0.0040 lr:0.000100 network_time: 0.0251 +[ Fri Sep 16 03:36:52 2022 ] Eval epoch: 104 +[ Fri Sep 16 03:38:41 2022 ] Mean test loss of 930 batches: 2.852644681930542. +[ Fri Sep 16 03:38:41 2022 ] Top1: 55.15% +[ Fri Sep 16 03:38:42 2022 ] Top5: 81.41% +[ Fri Sep 16 03:38:42 2022 ] Training epoch: 105 +[ Fri Sep 16 03:39:23 2022 ] Batch(51/162) done. Loss: 0.0089 lr:0.000100 network_time: 0.0280 +[ Fri Sep 16 03:40:36 2022 ] Batch(151/162) done. Loss: 0.0130 lr:0.000100 network_time: 0.0296 +[ Fri Sep 16 03:40:43 2022 ] Eval epoch: 105 +[ Fri Sep 16 03:42:33 2022 ] Mean test loss of 930 batches: 2.7096853256225586. +[ Fri Sep 16 03:42:33 2022 ] Top1: 54.55% +[ Fri Sep 16 03:42:34 2022 ] Top5: 81.34% +[ Fri Sep 16 03:42:34 2022 ] Training epoch: 106 +[ Fri Sep 16 03:43:42 2022 ] Batch(89/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0260 +[ Fri Sep 16 03:44:35 2022 ] Eval epoch: 106 +[ Fri Sep 16 03:46:24 2022 ] Mean test loss of 930 batches: 2.6621322631835938. +[ Fri Sep 16 03:46:25 2022 ] Top1: 55.51% +[ Fri Sep 16 03:46:25 2022 ] Top5: 81.55% +[ Fri Sep 16 03:46:25 2022 ] Training epoch: 107 +[ Fri Sep 16 03:46:49 2022 ] Batch(27/162) done. Loss: 0.0082 lr:0.000100 network_time: 0.0310 +[ Fri Sep 16 03:48:02 2022 ] Batch(127/162) done. Loss: 0.0041 lr:0.000100 network_time: 0.0255 +[ Fri Sep 16 03:48:27 2022 ] Eval epoch: 107 +[ Fri Sep 16 03:50:16 2022 ] Mean test loss of 930 batches: 2.7165894508361816. +[ Fri Sep 16 03:50:16 2022 ] Top1: 55.25% +[ Fri Sep 16 03:50:17 2022 ] Top5: 81.56% +[ Fri Sep 16 03:50:17 2022 ] Training epoch: 108 +[ Fri Sep 16 03:51:08 2022 ] Batch(65/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0263 +[ Fri Sep 16 03:52:18 2022 ] Eval epoch: 108 +[ Fri Sep 16 03:54:08 2022 ] Mean test loss of 930 batches: 2.8998818397521973. +[ Fri Sep 16 03:54:08 2022 ] Top1: 55.12% +[ Fri Sep 16 03:54:08 2022 ] Top5: 81.39% +[ Fri Sep 16 03:54:09 2022 ] Training epoch: 109 +[ Fri Sep 16 03:54:15 2022 ] Batch(3/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 03:55:27 2022 ] Batch(103/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0318 +[ Fri Sep 16 03:56:10 2022 ] Eval epoch: 109 +[ Fri Sep 16 03:58:00 2022 ] Mean test loss of 930 batches: 2.7563014030456543. +[ Fri Sep 16 03:58:00 2022 ] Top1: 54.75% +[ Fri Sep 16 03:58:01 2022 ] Top5: 81.21% +[ Fri Sep 16 03:58:01 2022 ] Training epoch: 110 +[ Fri Sep 16 03:58:34 2022 ] Batch(41/162) done. Loss: 0.0130 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 03:59:47 2022 ] Batch(141/162) done. Loss: 0.0035 lr:0.000100 network_time: 0.0289 +[ Fri Sep 16 04:00:02 2022 ] Eval epoch: 110 +[ Fri Sep 16 04:01:51 2022 ] Mean test loss of 930 batches: 2.6887638568878174. +[ Fri Sep 16 04:01:52 2022 ] Top1: 54.75% +[ Fri Sep 16 04:01:52 2022 ] Top5: 81.20% +[ Fri Sep 16 04:01:53 2022 ] Training epoch: 111 +[ Fri Sep 16 04:02:54 2022 ] Batch(79/162) done. Loss: 0.0023 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:03:53 2022 ] Eval epoch: 111 +[ Fri Sep 16 04:05:43 2022 ] Mean test loss of 930 batches: 2.7811808586120605. +[ Fri Sep 16 04:05:43 2022 ] Top1: 53.83% +[ Fri Sep 16 04:05:43 2022 ] Top5: 80.74% +[ Fri Sep 16 04:05:44 2022 ] Training epoch: 112 +[ Fri Sep 16 04:05:59 2022 ] Batch(17/162) done. Loss: 0.0050 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:07:12 2022 ] Batch(117/162) done. Loss: 0.0069 lr:0.000100 network_time: 0.0230 +[ Fri Sep 16 04:07:44 2022 ] Eval epoch: 112 +[ Fri Sep 16 04:09:34 2022 ] Mean test loss of 930 batches: 2.7857918739318848. +[ Fri Sep 16 04:09:34 2022 ] Top1: 55.56% +[ Fri Sep 16 04:09:35 2022 ] Top5: 81.54% +[ Fri Sep 16 04:09:35 2022 ] Training epoch: 113 +[ Fri Sep 16 04:10:19 2022 ] Batch(55/162) done. Loss: 0.0080 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 04:11:31 2022 ] Batch(155/162) done. Loss: 0.0046 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:11:36 2022 ] Eval epoch: 113 +[ Fri Sep 16 04:13:25 2022 ] Mean test loss of 930 batches: 2.7522940635681152. +[ Fri Sep 16 04:13:26 2022 ] Top1: 52.42% +[ Fri Sep 16 04:13:26 2022 ] Top5: 79.89% +[ Fri Sep 16 04:13:27 2022 ] Training epoch: 114 +[ Fri Sep 16 04:14:38 2022 ] Batch(93/162) done. Loss: 0.0080 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:15:28 2022 ] Eval epoch: 114 +[ Fri Sep 16 04:17:17 2022 ] Mean test loss of 930 batches: 2.7601916790008545. +[ Fri Sep 16 04:17:18 2022 ] Top1: 55.44% +[ Fri Sep 16 04:17:18 2022 ] Top5: 81.62% +[ Fri Sep 16 04:17:18 2022 ] Training epoch: 115 +[ Fri Sep 16 04:17:44 2022 ] Batch(31/162) done. Loss: 0.0056 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:18:57 2022 ] Batch(131/162) done. Loss: 0.0066 lr:0.000100 network_time: 0.0255 +[ Fri Sep 16 04:19:19 2022 ] Eval epoch: 115 +[ Fri Sep 16 04:21:09 2022 ] Mean test loss of 930 batches: 2.7520415782928467. +[ Fri Sep 16 04:21:10 2022 ] Top1: 55.05% +[ Fri Sep 16 04:21:10 2022 ] Top5: 81.05% +[ Fri Sep 16 04:21:10 2022 ] Training epoch: 116 +[ Fri Sep 16 04:22:04 2022 ] Batch(69/162) done. Loss: 0.0031 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 04:23:11 2022 ] Eval epoch: 116 +[ Fri Sep 16 04:25:01 2022 ] Mean test loss of 930 batches: 2.7753899097442627. +[ Fri Sep 16 04:25:01 2022 ] Top1: 55.38% +[ Fri Sep 16 04:25:02 2022 ] Top5: 81.63% +[ Fri Sep 16 04:25:02 2022 ] Training epoch: 117 +[ Fri Sep 16 04:25:11 2022 ] Batch(7/162) done. Loss: 0.0121 lr:0.000100 network_time: 0.0308 +[ Fri Sep 16 04:26:24 2022 ] Batch(107/162) done. Loss: 0.0023 lr:0.000100 network_time: 0.0230 +[ Fri Sep 16 04:27:03 2022 ] Eval epoch: 117 +[ Fri Sep 16 04:28:52 2022 ] Mean test loss of 930 batches: 2.677675724029541. +[ Fri Sep 16 04:28:53 2022 ] Top1: 55.50% +[ Fri Sep 16 04:28:53 2022 ] Top5: 81.51% +[ Fri Sep 16 04:28:53 2022 ] Training epoch: 118 +[ Fri Sep 16 04:29:30 2022 ] Batch(45/162) done. Loss: 0.0061 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 04:30:42 2022 ] Batch(145/162) done. Loss: 0.0047 lr:0.000100 network_time: 0.0271 +[ Fri Sep 16 04:30:54 2022 ] Eval epoch: 118 +[ Fri Sep 16 04:32:44 2022 ] Mean test loss of 930 batches: 2.7359731197357178. +[ Fri Sep 16 04:32:44 2022 ] Top1: 55.38% +[ Fri Sep 16 04:32:45 2022 ] Top5: 81.53% +[ Fri Sep 16 04:32:45 2022 ] Training epoch: 119 +[ Fri Sep 16 04:33:49 2022 ] Batch(83/162) done. Loss: 0.0051 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 04:34:46 2022 ] Eval epoch: 119 +[ Fri Sep 16 04:36:36 2022 ] Mean test loss of 930 batches: 2.6921119689941406. +[ Fri Sep 16 04:36:36 2022 ] Top1: 55.72% +[ Fri Sep 16 04:36:36 2022 ] Top5: 81.66% +[ Fri Sep 16 04:36:37 2022 ] Training epoch: 120 +[ Fri Sep 16 04:36:56 2022 ] Batch(21/162) done. Loss: 0.0079 lr:0.000100 network_time: 0.0326 +[ Fri Sep 16 04:38:09 2022 ] Batch(121/162) done. Loss: 0.0039 lr:0.000100 network_time: 0.0281 +[ Fri Sep 16 04:38:38 2022 ] Eval epoch: 120 +[ Fri Sep 16 04:40:27 2022 ] Mean test loss of 930 batches: 2.6823599338531494. +[ Fri Sep 16 04:40:28 2022 ] Top1: 55.41% +[ Fri Sep 16 04:40:28 2022 ] Top5: 81.65% +[ Fri Sep 16 04:40:29 2022 ] Training epoch: 121 +[ Fri Sep 16 04:41:15 2022 ] Batch(59/162) done. Loss: 0.0046 lr:0.000100 network_time: 0.0319 +[ Fri Sep 16 04:42:28 2022 ] Batch(159/162) done. Loss: 0.0268 lr:0.000100 network_time: 0.0277 +[ Fri Sep 16 04:42:30 2022 ] Eval epoch: 121 +[ Fri Sep 16 04:44:19 2022 ] Mean test loss of 930 batches: 2.6829521656036377. +[ Fri Sep 16 04:44:20 2022 ] Top1: 55.41% +[ Fri Sep 16 04:44:20 2022 ] Top5: 81.51% +[ Fri Sep 16 04:44:20 2022 ] Training epoch: 122 +[ Fri Sep 16 04:45:35 2022 ] Batch(97/162) done. Loss: 0.0045 lr:0.000100 network_time: 0.0272 +[ Fri Sep 16 04:46:21 2022 ] Eval epoch: 122 +[ Fri Sep 16 04:48:11 2022 ] Mean test loss of 930 batches: 2.6855432987213135. +[ Fri Sep 16 04:48:12 2022 ] Top1: 55.09% +[ Fri Sep 16 04:48:12 2022 ] Top5: 81.43% +[ Fri Sep 16 04:48:12 2022 ] Training epoch: 123 +[ Fri Sep 16 04:48:41 2022 ] Batch(35/162) done. Loss: 0.0065 lr:0.000100 network_time: 0.0278 +[ Fri Sep 16 04:49:54 2022 ] Batch(135/162) done. Loss: 0.0055 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 04:50:13 2022 ] Eval epoch: 123 +[ Fri Sep 16 04:52:03 2022 ] Mean test loss of 930 batches: 2.808767557144165. +[ Fri Sep 16 04:52:03 2022 ] Top1: 54.43% +[ Fri Sep 16 04:52:04 2022 ] Top5: 81.16% +[ Fri Sep 16 04:52:04 2022 ] Training epoch: 124 +[ Fri Sep 16 04:53:01 2022 ] Batch(73/162) done. Loss: 0.0041 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 04:54:05 2022 ] Eval epoch: 124 +[ Fri Sep 16 04:55:55 2022 ] Mean test loss of 930 batches: 2.7997090816497803. +[ Fri Sep 16 04:55:55 2022 ] Top1: 55.73% +[ Fri Sep 16 04:55:56 2022 ] Top5: 81.60% +[ Fri Sep 16 04:55:56 2022 ] Training epoch: 125 +[ Fri Sep 16 04:56:08 2022 ] Batch(11/162) done. Loss: 0.0022 lr:0.000100 network_time: 0.0291 +[ Fri Sep 16 04:57:21 2022 ] Batch(111/162) done. Loss: 0.0030 lr:0.000100 network_time: 0.0370 +[ Fri Sep 16 04:57:57 2022 ] Eval epoch: 125 +[ Fri Sep 16 04:59:46 2022 ] Mean test loss of 930 batches: 2.8247811794281006. +[ Fri Sep 16 04:59:47 2022 ] Top1: 54.96% +[ Fri Sep 16 04:59:47 2022 ] Top5: 81.33% +[ Fri Sep 16 04:59:48 2022 ] Training epoch: 126 +[ Fri Sep 16 05:00:27 2022 ] Batch(49/162) done. Loss: 0.0083 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:01:40 2022 ] Batch(149/162) done. Loss: 0.0065 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:01:49 2022 ] Eval epoch: 126 +[ Fri Sep 16 05:03:38 2022 ] Mean test loss of 930 batches: 2.709444046020508. +[ Fri Sep 16 05:03:38 2022 ] Top1: 54.90% +[ Fri Sep 16 05:03:39 2022 ] Top5: 81.36% +[ Fri Sep 16 05:03:39 2022 ] Training epoch: 127 +[ Fri Sep 16 05:04:46 2022 ] Batch(87/162) done. Loss: 0.0068 lr:0.000100 network_time: 0.0284 +[ Fri Sep 16 05:05:40 2022 ] Eval epoch: 127 +[ Fri Sep 16 05:07:29 2022 ] Mean test loss of 930 batches: 2.721369743347168. +[ Fri Sep 16 05:07:30 2022 ] Top1: 55.40% +[ Fri Sep 16 05:07:30 2022 ] Top5: 81.58% +[ Fri Sep 16 05:07:31 2022 ] Training epoch: 128 +[ Fri Sep 16 05:07:52 2022 ] Batch(25/162) done. Loss: 0.0107 lr:0.000100 network_time: 0.0350 +[ Fri Sep 16 05:09:05 2022 ] Batch(125/162) done. Loss: 0.0059 lr:0.000100 network_time: 0.0300 +[ Fri Sep 16 05:09:32 2022 ] Eval epoch: 128 +[ Fri Sep 16 05:11:21 2022 ] Mean test loss of 930 batches: 2.696549892425537. +[ Fri Sep 16 05:11:21 2022 ] Top1: 55.42% +[ Fri Sep 16 05:11:22 2022 ] Top5: 81.51% +[ Fri Sep 16 05:11:22 2022 ] Training epoch: 129 +[ Fri Sep 16 05:12:11 2022 ] Batch(63/162) done. Loss: 0.0023 lr:0.000100 network_time: 0.0281 +[ Fri Sep 16 05:13:23 2022 ] Eval epoch: 129 +[ Fri Sep 16 05:15:12 2022 ] Mean test loss of 930 batches: 2.860775947570801. +[ Fri Sep 16 05:15:13 2022 ] Top1: 55.20% +[ Fri Sep 16 05:15:13 2022 ] Top5: 81.41% +[ Fri Sep 16 05:15:13 2022 ] Training epoch: 130 +[ Fri Sep 16 05:15:18 2022 ] Batch(1/162) done. Loss: 0.0050 lr:0.000100 network_time: 0.0282 +[ Fri Sep 16 05:16:30 2022 ] Batch(101/162) done. Loss: 0.0149 lr:0.000100 network_time: 0.0232 +[ Fri Sep 16 05:17:14 2022 ] Eval epoch: 130 +[ Fri Sep 16 05:19:04 2022 ] Mean test loss of 930 batches: 2.670889139175415. +[ Fri Sep 16 05:19:04 2022 ] Top1: 55.61% +[ Fri Sep 16 05:19:04 2022 ] Top5: 81.70% +[ Fri Sep 16 05:19:05 2022 ] Training epoch: 131 +[ Fri Sep 16 05:19:37 2022 ] Batch(39/162) done. Loss: 0.0060 lr:0.000100 network_time: 0.0342 +[ Fri Sep 16 05:20:50 2022 ] Batch(139/162) done. Loss: 0.0074 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 05:21:06 2022 ] Eval epoch: 131 +[ Fri Sep 16 05:22:55 2022 ] Mean test loss of 930 batches: 2.783825397491455. +[ Fri Sep 16 05:22:56 2022 ] Top1: 55.61% +[ Fri Sep 16 05:22:56 2022 ] Top5: 81.69% +[ Fri Sep 16 05:22:57 2022 ] Training epoch: 132 +[ Fri Sep 16 05:23:56 2022 ] Batch(77/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 05:24:58 2022 ] Eval epoch: 132 +[ Fri Sep 16 05:26:47 2022 ] Mean test loss of 930 batches: 2.690352201461792. +[ Fri Sep 16 05:26:47 2022 ] Top1: 54.85% +[ Fri Sep 16 05:26:48 2022 ] Top5: 81.45% +[ Fri Sep 16 05:26:48 2022 ] Training epoch: 133 +[ Fri Sep 16 05:27:03 2022 ] Batch(15/162) done. Loss: 0.0041 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 05:28:15 2022 ] Batch(115/162) done. Loss: 0.0088 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 05:28:49 2022 ] Eval epoch: 133 +[ Fri Sep 16 05:30:39 2022 ] Mean test loss of 930 batches: 2.7724499702453613. +[ Fri Sep 16 05:30:39 2022 ] Top1: 53.06% +[ Fri Sep 16 05:30:39 2022 ] Top5: 80.30% +[ Fri Sep 16 05:30:40 2022 ] Training epoch: 134 +[ Fri Sep 16 05:31:22 2022 ] Batch(53/162) done. Loss: 0.0106 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 05:32:35 2022 ] Batch(153/162) done. Loss: 0.0159 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 05:32:41 2022 ] Eval epoch: 134 +[ Fri Sep 16 05:34:30 2022 ] Mean test loss of 930 batches: 2.824082136154175. +[ Fri Sep 16 05:34:31 2022 ] Top1: 53.24% +[ Fri Sep 16 05:34:31 2022 ] Top5: 80.41% +[ Fri Sep 16 05:34:31 2022 ] Training epoch: 135 +[ Fri Sep 16 05:35:41 2022 ] Batch(91/162) done. Loss: 0.0025 lr:0.000100 network_time: 0.0316 +[ Fri Sep 16 05:36:32 2022 ] Eval epoch: 135 +[ Fri Sep 16 05:38:21 2022 ] Mean test loss of 930 batches: 2.7413904666900635. +[ Fri Sep 16 05:38:22 2022 ] Top1: 55.46% +[ Fri Sep 16 05:38:22 2022 ] Top5: 81.65% +[ Fri Sep 16 05:38:22 2022 ] Training epoch: 136 +[ Fri Sep 16 05:38:47 2022 ] Batch(29/162) done. Loss: 0.0073 lr:0.000100 network_time: 0.0287 +[ Fri Sep 16 05:40:00 2022 ] Batch(129/162) done. Loss: 0.0043 lr:0.000100 network_time: 0.0282 +[ Fri Sep 16 05:40:23 2022 ] Eval epoch: 136 +[ Fri Sep 16 05:42:13 2022 ] Mean test loss of 930 batches: 2.650205373764038. +[ Fri Sep 16 05:42:13 2022 ] Top1: 55.57% +[ Fri Sep 16 05:42:14 2022 ] Top5: 81.71% +[ Fri Sep 16 05:42:14 2022 ] Training epoch: 137 +[ Fri Sep 16 05:43:07 2022 ] Batch(67/162) done. Loss: 0.0036 lr:0.000100 network_time: 0.0311 +[ Fri Sep 16 05:44:16 2022 ] Eval epoch: 137 +[ Fri Sep 16 05:46:05 2022 ] Mean test loss of 930 batches: 2.8283350467681885. +[ Fri Sep 16 05:46:05 2022 ] Top1: 55.00% +[ Fri Sep 16 05:46:06 2022 ] Top5: 81.16% +[ Fri Sep 16 05:46:06 2022 ] Training epoch: 138 +[ Fri Sep 16 05:46:14 2022 ] Batch(5/162) done. Loss: 0.0037 lr:0.000100 network_time: 0.0520 +[ Fri Sep 16 05:47:26 2022 ] Batch(105/162) done. Loss: 0.0045 lr:0.000100 network_time: 0.0287 +[ Fri Sep 16 05:48:07 2022 ] Eval epoch: 138 +[ Fri Sep 16 05:49:57 2022 ] Mean test loss of 930 batches: 2.746467113494873. +[ Fri Sep 16 05:49:57 2022 ] Top1: 55.47% +[ Fri Sep 16 05:49:57 2022 ] Top5: 81.55% +[ Fri Sep 16 05:49:58 2022 ] Training epoch: 139 +[ Fri Sep 16 05:50:33 2022 ] Batch(43/162) done. Loss: 0.0108 lr:0.000100 network_time: 0.0343 +[ Fri Sep 16 05:51:46 2022 ] Batch(143/162) done. Loss: 0.0025 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 05:51:59 2022 ] Eval epoch: 139 +[ Fri Sep 16 05:53:48 2022 ] Mean test loss of 930 batches: 2.8351552486419678. +[ Fri Sep 16 05:53:49 2022 ] Top1: 54.96% +[ Fri Sep 16 05:53:49 2022 ] Top5: 81.28% +[ Fri Sep 16 05:53:49 2022 ] Training epoch: 140 +[ Fri Sep 16 05:54:52 2022 ] Batch(81/162) done. Loss: 0.0037 lr:0.000100 network_time: 0.0271 +[ Fri Sep 16 05:55:50 2022 ] Eval epoch: 140 +[ Fri Sep 16 05:57:39 2022 ] Mean test loss of 930 batches: 2.6571571826934814. +[ Fri Sep 16 05:57:39 2022 ] Top1: 54.60% +[ Fri Sep 16 05:57:40 2022 ] Top5: 81.13% diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_motion_xset/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f888cf8527396d608be2e02f79607079c9bc0f99 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_joint_xset +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xset/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_xset +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_xset diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b6b3fd132508588fbd019208eace4626d49cc915 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:491e9c65f6add18507396ed4656525a266bcb9d27d032137752a84ee32646935 +size 34946665 diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/log.txt b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..10087f99f6718a992ad5aefb19e5b5757b8c0394 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/log.txt @@ -0,0 +1,929 @@ +[ Thu Sep 15 20:53:21 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_xset', 'model_saved_name': './save_models/ntu120_joint_xset', 'Experiment_name': 'ntu120_joint_xset', 'config': './config/ntu120_xset/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xset/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 20:53:21 2022 ] Training epoch: 1 +[ Thu Sep 15 20:54:39 2022 ] Batch(99/162) done. Loss: 3.0592 lr:0.100000 network_time: 0.0311 +[ Thu Sep 15 20:55:24 2022 ] Eval epoch: 1 +[ Thu Sep 15 20:57:13 2022 ] Mean test loss of 930 batches: 4.616232872009277. +[ Thu Sep 15 20:57:14 2022 ] Top1: 9.34% +[ Thu Sep 15 20:57:14 2022 ] Top5: 28.52% +[ Thu Sep 15 20:57:14 2022 ] Training epoch: 2 +[ Thu Sep 15 20:57:45 2022 ] Batch(37/162) done. Loss: 2.1872 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 20:58:58 2022 ] Batch(137/162) done. Loss: 2.5467 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 20:59:16 2022 ] Eval epoch: 2 +[ Thu Sep 15 21:01:04 2022 ] Mean test loss of 930 batches: 4.0941853523254395. +[ Thu Sep 15 21:01:04 2022 ] Top1: 17.66% +[ Thu Sep 15 21:01:05 2022 ] Top5: 40.94% +[ Thu Sep 15 21:01:05 2022 ] Training epoch: 3 +[ Thu Sep 15 21:02:04 2022 ] Batch(75/162) done. Loss: 2.1980 lr:0.100000 network_time: 0.0310 +[ Thu Sep 15 21:03:07 2022 ] Eval epoch: 3 +[ Thu Sep 15 21:04:56 2022 ] Mean test loss of 930 batches: 3.8105180263519287. +[ Thu Sep 15 21:04:57 2022 ] Top1: 22.06% +[ Thu Sep 15 21:04:57 2022 ] Top5: 46.13% +[ Thu Sep 15 21:04:57 2022 ] Training epoch: 4 +[ Thu Sep 15 21:05:11 2022 ] Batch(13/162) done. Loss: 2.1092 lr:0.100000 network_time: 0.0317 +[ Thu Sep 15 21:06:24 2022 ] Batch(113/162) done. Loss: 2.0003 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 21:06:59 2022 ] Eval epoch: 4 +[ Thu Sep 15 21:08:48 2022 ] Mean test loss of 930 batches: 3.274050235748291. +[ Thu Sep 15 21:08:48 2022 ] Top1: 25.47% +[ Thu Sep 15 21:08:49 2022 ] Top5: 52.02% +[ Thu Sep 15 21:08:49 2022 ] Training epoch: 5 +[ Thu Sep 15 21:09:30 2022 ] Batch(51/162) done. Loss: 1.7818 lr:0.100000 network_time: 0.0321 +[ Thu Sep 15 21:10:43 2022 ] Batch(151/162) done. Loss: 2.0315 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 21:10:50 2022 ] Eval epoch: 5 +[ Thu Sep 15 21:12:39 2022 ] Mean test loss of 930 batches: 3.1566548347473145. +[ Thu Sep 15 21:12:39 2022 ] Top1: 30.55% +[ Thu Sep 15 21:12:40 2022 ] Top5: 57.31% +[ Thu Sep 15 21:12:40 2022 ] Training epoch: 6 +[ Thu Sep 15 21:13:49 2022 ] Batch(89/162) done. Loss: 1.6567 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 21:14:41 2022 ] Eval epoch: 6 +[ Thu Sep 15 21:16:30 2022 ] Mean test loss of 930 batches: 3.1373534202575684. +[ Thu Sep 15 21:16:30 2022 ] Top1: 30.52% +[ Thu Sep 15 21:16:31 2022 ] Top5: 59.38% +[ Thu Sep 15 21:16:31 2022 ] Training epoch: 7 +[ Thu Sep 15 21:16:55 2022 ] Batch(27/162) done. Loss: 1.5083 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 21:18:07 2022 ] Batch(127/162) done. Loss: 1.1499 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 21:18:32 2022 ] Eval epoch: 7 +[ Thu Sep 15 21:20:21 2022 ] Mean test loss of 930 batches: 2.8039331436157227. +[ Thu Sep 15 21:20:22 2022 ] Top1: 33.04% +[ Thu Sep 15 21:20:22 2022 ] Top5: 64.04% +[ Thu Sep 15 21:20:22 2022 ] Training epoch: 8 +[ Thu Sep 15 21:21:13 2022 ] Batch(65/162) done. Loss: 1.4526 lr:0.100000 network_time: 0.0333 +[ Thu Sep 15 21:22:23 2022 ] Eval epoch: 8 +[ Thu Sep 15 21:24:13 2022 ] Mean test loss of 930 batches: 2.763702392578125. +[ Thu Sep 15 21:24:13 2022 ] Top1: 34.54% +[ Thu Sep 15 21:24:14 2022 ] Top5: 65.50% +[ Thu Sep 15 21:24:14 2022 ] Training epoch: 9 +[ Thu Sep 15 21:24:20 2022 ] Batch(3/162) done. Loss: 0.9275 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 21:25:33 2022 ] Batch(103/162) done. Loss: 1.3035 lr:0.100000 network_time: 0.0286 +[ Thu Sep 15 21:26:15 2022 ] Eval epoch: 9 +[ Thu Sep 15 21:28:04 2022 ] Mean test loss of 930 batches: 2.8759517669677734. +[ Thu Sep 15 21:28:04 2022 ] Top1: 35.49% +[ Thu Sep 15 21:28:05 2022 ] Top5: 66.09% +[ Thu Sep 15 21:28:05 2022 ] Training epoch: 10 +[ Thu Sep 15 21:28:39 2022 ] Batch(41/162) done. Loss: 1.0014 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 21:29:51 2022 ] Batch(141/162) done. Loss: 1.1646 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 21:30:06 2022 ] Eval epoch: 10 +[ Thu Sep 15 21:31:55 2022 ] Mean test loss of 930 batches: 2.6339988708496094. +[ Thu Sep 15 21:31:55 2022 ] Top1: 38.66% +[ Thu Sep 15 21:31:56 2022 ] Top5: 68.13% +[ Thu Sep 15 21:31:56 2022 ] Training epoch: 11 +[ Thu Sep 15 21:32:57 2022 ] Batch(79/162) done. Loss: 1.1789 lr:0.100000 network_time: 0.0293 +[ Thu Sep 15 21:33:57 2022 ] Eval epoch: 11 +[ Thu Sep 15 21:35:45 2022 ] Mean test loss of 930 batches: 3.1947619915008545. +[ Thu Sep 15 21:35:45 2022 ] Top1: 34.92% +[ Thu Sep 15 21:35:46 2022 ] Top5: 66.34% +[ Thu Sep 15 21:35:46 2022 ] Training epoch: 12 +[ Thu Sep 15 21:36:02 2022 ] Batch(17/162) done. Loss: 0.9280 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 21:37:15 2022 ] Batch(117/162) done. Loss: 1.1294 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 21:37:47 2022 ] Eval epoch: 12 +[ Thu Sep 15 21:39:35 2022 ] Mean test loss of 930 batches: 2.5820627212524414. +[ Thu Sep 15 21:39:36 2022 ] Top1: 40.86% +[ Thu Sep 15 21:39:36 2022 ] Top5: 71.07% +[ Thu Sep 15 21:39:36 2022 ] Training epoch: 13 +[ Thu Sep 15 21:40:20 2022 ] Batch(55/162) done. Loss: 0.7400 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 21:41:33 2022 ] Batch(155/162) done. Loss: 0.9805 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 21:41:38 2022 ] Eval epoch: 13 +[ Thu Sep 15 21:43:27 2022 ] Mean test loss of 930 batches: 2.6807053089141846. +[ Thu Sep 15 21:43:27 2022 ] Top1: 38.93% +[ Thu Sep 15 21:43:28 2022 ] Top5: 71.42% +[ Thu Sep 15 21:43:28 2022 ] Training epoch: 14 +[ Thu Sep 15 21:44:40 2022 ] Batch(93/162) done. Loss: 1.1675 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 21:45:29 2022 ] Eval epoch: 14 +[ Thu Sep 15 21:47:19 2022 ] Mean test loss of 930 batches: 2.6006383895874023. +[ Thu Sep 15 21:47:19 2022 ] Top1: 41.03% +[ Thu Sep 15 21:47:20 2022 ] Top5: 71.68% +[ Thu Sep 15 21:47:20 2022 ] Training epoch: 15 +[ Thu Sep 15 21:47:47 2022 ] Batch(31/162) done. Loss: 0.9053 lr:0.100000 network_time: 0.0286 +[ Thu Sep 15 21:48:59 2022 ] Batch(131/162) done. Loss: 0.7565 lr:0.100000 network_time: 0.0318 +[ Thu Sep 15 21:49:22 2022 ] Eval epoch: 15 +[ Thu Sep 15 21:51:11 2022 ] Mean test loss of 930 batches: 2.5753092765808105. +[ Thu Sep 15 21:51:11 2022 ] Top1: 41.86% +[ Thu Sep 15 21:51:12 2022 ] Top5: 71.00% +[ Thu Sep 15 21:51:12 2022 ] Training epoch: 16 +[ Thu Sep 15 21:52:06 2022 ] Batch(69/162) done. Loss: 0.9629 lr:0.100000 network_time: 0.0280 +[ Thu Sep 15 21:53:13 2022 ] Eval epoch: 16 +[ Thu Sep 15 21:55:02 2022 ] Mean test loss of 930 batches: 2.642726182937622. +[ Thu Sep 15 21:55:02 2022 ] Top1: 42.77% +[ Thu Sep 15 21:55:03 2022 ] Top5: 73.14% +[ Thu Sep 15 21:55:03 2022 ] Training epoch: 17 +[ Thu Sep 15 21:55:12 2022 ] Batch(7/162) done. Loss: 0.7531 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 21:56:25 2022 ] Batch(107/162) done. Loss: 0.9298 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 21:57:04 2022 ] Eval epoch: 17 +[ Thu Sep 15 21:58:52 2022 ] Mean test loss of 930 batches: 2.701462984085083. +[ Thu Sep 15 21:58:53 2022 ] Top1: 41.56% +[ Thu Sep 15 21:58:53 2022 ] Top5: 72.56% +[ Thu Sep 15 21:58:54 2022 ] Training epoch: 18 +[ Thu Sep 15 21:59:30 2022 ] Batch(45/162) done. Loss: 0.9319 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 22:00:43 2022 ] Batch(145/162) done. Loss: 0.5295 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 22:00:55 2022 ] Eval epoch: 18 +[ Thu Sep 15 22:02:43 2022 ] Mean test loss of 930 batches: 2.777517080307007. +[ Thu Sep 15 22:02:43 2022 ] Top1: 42.65% +[ Thu Sep 15 22:02:43 2022 ] Top5: 73.63% +[ Thu Sep 15 22:02:44 2022 ] Training epoch: 19 +[ Thu Sep 15 22:03:48 2022 ] Batch(83/162) done. Loss: 0.5384 lr:0.100000 network_time: 0.0337 +[ Thu Sep 15 22:04:45 2022 ] Eval epoch: 19 +[ Thu Sep 15 22:06:33 2022 ] Mean test loss of 930 batches: 2.634265422821045. +[ Thu Sep 15 22:06:34 2022 ] Top1: 43.27% +[ Thu Sep 15 22:06:34 2022 ] Top5: 73.45% +[ Thu Sep 15 22:06:34 2022 ] Training epoch: 20 +[ Thu Sep 15 22:06:54 2022 ] Batch(21/162) done. Loss: 0.9205 lr:0.100000 network_time: 0.0314 +[ Thu Sep 15 22:08:07 2022 ] Batch(121/162) done. Loss: 0.5672 lr:0.100000 network_time: 0.0334 +[ Thu Sep 15 22:08:36 2022 ] Eval epoch: 20 +[ Thu Sep 15 22:10:24 2022 ] Mean test loss of 930 batches: 2.6961050033569336. +[ Thu Sep 15 22:10:24 2022 ] Top1: 42.92% +[ Thu Sep 15 22:10:25 2022 ] Top5: 73.06% +[ Thu Sep 15 22:10:25 2022 ] Training epoch: 21 +[ Thu Sep 15 22:11:12 2022 ] Batch(59/162) done. Loss: 0.4838 lr:0.100000 network_time: 0.0336 +[ Thu Sep 15 22:12:25 2022 ] Batch(159/162) done. Loss: 0.8854 lr:0.100000 network_time: 0.0309 +[ Thu Sep 15 22:12:27 2022 ] Eval epoch: 21 +[ Thu Sep 15 22:14:15 2022 ] Mean test loss of 930 batches: 2.5363717079162598. +[ Thu Sep 15 22:14:16 2022 ] Top1: 43.11% +[ Thu Sep 15 22:14:16 2022 ] Top5: 73.08% +[ Thu Sep 15 22:14:16 2022 ] Training epoch: 22 +[ Thu Sep 15 22:15:31 2022 ] Batch(97/162) done. Loss: 0.6675 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 22:16:18 2022 ] Eval epoch: 22 +[ Thu Sep 15 22:18:07 2022 ] Mean test loss of 930 batches: 2.5331027507781982. +[ Thu Sep 15 22:18:07 2022 ] Top1: 45.46% +[ Thu Sep 15 22:18:07 2022 ] Top5: 73.80% +[ Thu Sep 15 22:18:08 2022 ] Training epoch: 23 +[ Thu Sep 15 22:18:37 2022 ] Batch(35/162) done. Loss: 0.6239 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 22:19:49 2022 ] Batch(135/162) done. Loss: 0.4228 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 22:20:08 2022 ] Eval epoch: 23 +[ Thu Sep 15 22:21:56 2022 ] Mean test loss of 930 batches: 2.4731662273406982. +[ Thu Sep 15 22:21:57 2022 ] Top1: 44.55% +[ Thu Sep 15 22:21:57 2022 ] Top5: 74.01% +[ Thu Sep 15 22:21:58 2022 ] Training epoch: 24 +[ Thu Sep 15 22:22:55 2022 ] Batch(73/162) done. Loss: 0.4733 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 22:23:59 2022 ] Eval epoch: 24 +[ Thu Sep 15 22:25:47 2022 ] Mean test loss of 930 batches: 2.79464054107666. +[ Thu Sep 15 22:25:47 2022 ] Top1: 44.01% +[ Thu Sep 15 22:25:48 2022 ] Top5: 73.97% +[ Thu Sep 15 22:25:48 2022 ] Training epoch: 25 +[ Thu Sep 15 22:26:00 2022 ] Batch(11/162) done. Loss: 0.4082 lr:0.100000 network_time: 0.0352 +[ Thu Sep 15 22:27:13 2022 ] Batch(111/162) done. Loss: 0.5356 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 22:27:49 2022 ] Eval epoch: 25 +[ Thu Sep 15 22:29:37 2022 ] Mean test loss of 930 batches: 2.302302598953247. +[ Thu Sep 15 22:29:38 2022 ] Top1: 46.27% +[ Thu Sep 15 22:29:38 2022 ] Top5: 76.14% +[ Thu Sep 15 22:29:38 2022 ] Training epoch: 26 +[ Thu Sep 15 22:30:18 2022 ] Batch(49/162) done. Loss: 0.4845 lr:0.100000 network_time: 0.0290 +[ Thu Sep 15 22:31:30 2022 ] Batch(149/162) done. Loss: 0.7163 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 22:31:39 2022 ] Eval epoch: 26 +[ Thu Sep 15 22:33:27 2022 ] Mean test loss of 930 batches: 2.5283055305480957. +[ Thu Sep 15 22:33:28 2022 ] Top1: 47.49% +[ Thu Sep 15 22:33:28 2022 ] Top5: 77.84% +[ Thu Sep 15 22:33:28 2022 ] Training epoch: 27 +[ Thu Sep 15 22:34:36 2022 ] Batch(87/162) done. Loss: 0.5433 lr:0.100000 network_time: 0.0330 +[ Thu Sep 15 22:35:30 2022 ] Eval epoch: 27 +[ Thu Sep 15 22:37:18 2022 ] Mean test loss of 930 batches: 2.756495714187622. +[ Thu Sep 15 22:37:18 2022 ] Top1: 43.80% +[ Thu Sep 15 22:37:19 2022 ] Top5: 73.11% +[ Thu Sep 15 22:37:19 2022 ] Training epoch: 28 +[ Thu Sep 15 22:37:41 2022 ] Batch(25/162) done. Loss: 0.3972 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 22:38:54 2022 ] Batch(125/162) done. Loss: 0.6571 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 22:39:20 2022 ] Eval epoch: 28 +[ Thu Sep 15 22:41:08 2022 ] Mean test loss of 930 batches: 2.6355648040771484. +[ Thu Sep 15 22:41:08 2022 ] Top1: 46.46% +[ Thu Sep 15 22:41:09 2022 ] Top5: 75.25% +[ Thu Sep 15 22:41:09 2022 ] Training epoch: 29 +[ Thu Sep 15 22:41:59 2022 ] Batch(63/162) done. Loss: 0.4874 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 22:43:10 2022 ] Eval epoch: 29 +[ Thu Sep 15 22:44:58 2022 ] Mean test loss of 930 batches: 2.8874144554138184. +[ Thu Sep 15 22:44:58 2022 ] Top1: 44.57% +[ Thu Sep 15 22:44:59 2022 ] Top5: 74.14% +[ Thu Sep 15 22:44:59 2022 ] Training epoch: 30 +[ Thu Sep 15 22:45:03 2022 ] Batch(1/162) done. Loss: 0.3427 lr:0.100000 network_time: 0.0323 +[ Thu Sep 15 22:46:16 2022 ] Batch(101/162) done. Loss: 0.4298 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 22:47:00 2022 ] Eval epoch: 30 +[ Thu Sep 15 22:48:48 2022 ] Mean test loss of 930 batches: 2.7167603969573975. +[ Thu Sep 15 22:48:48 2022 ] Top1: 45.86% +[ Thu Sep 15 22:48:48 2022 ] Top5: 74.97% +[ Thu Sep 15 22:48:49 2022 ] Training epoch: 31 +[ Thu Sep 15 22:49:21 2022 ] Batch(39/162) done. Loss: 0.3104 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 22:50:34 2022 ] Batch(139/162) done. Loss: 0.5252 lr:0.100000 network_time: 0.0329 +[ Thu Sep 15 22:50:50 2022 ] Eval epoch: 31 +[ Thu Sep 15 22:52:38 2022 ] Mean test loss of 930 batches: 2.8605589866638184. +[ Thu Sep 15 22:52:39 2022 ] Top1: 44.77% +[ Thu Sep 15 22:52:39 2022 ] Top5: 73.49% +[ Thu Sep 15 22:52:39 2022 ] Training epoch: 32 +[ Thu Sep 15 22:53:39 2022 ] Batch(77/162) done. Loss: 0.3644 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 22:54:40 2022 ] Eval epoch: 32 +[ Thu Sep 15 22:56:28 2022 ] Mean test loss of 930 batches: 2.744288682937622. +[ Thu Sep 15 22:56:29 2022 ] Top1: 45.83% +[ Thu Sep 15 22:56:29 2022 ] Top5: 74.21% +[ Thu Sep 15 22:56:29 2022 ] Training epoch: 33 +[ Thu Sep 15 22:56:44 2022 ] Batch(15/162) done. Loss: 0.2899 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 22:57:57 2022 ] Batch(115/162) done. Loss: 0.9063 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 22:58:30 2022 ] Eval epoch: 33 +[ Thu Sep 15 23:00:19 2022 ] Mean test loss of 930 batches: 2.8654515743255615. +[ Thu Sep 15 23:00:19 2022 ] Top1: 46.32% +[ Thu Sep 15 23:00:20 2022 ] Top5: 75.20% +[ Thu Sep 15 23:00:20 2022 ] Training epoch: 34 +[ Thu Sep 15 23:01:02 2022 ] Batch(53/162) done. Loss: 0.5861 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 23:02:15 2022 ] Batch(153/162) done. Loss: 0.5252 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 23:02:21 2022 ] Eval epoch: 34 +[ Thu Sep 15 23:04:10 2022 ] Mean test loss of 930 batches: 2.6413774490356445. +[ Thu Sep 15 23:04:10 2022 ] Top1: 48.05% +[ Thu Sep 15 23:04:10 2022 ] Top5: 76.22% +[ Thu Sep 15 23:04:11 2022 ] Training epoch: 35 +[ Thu Sep 15 23:05:20 2022 ] Batch(91/162) done. Loss: 0.7987 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 23:06:11 2022 ] Eval epoch: 35 +[ Thu Sep 15 23:07:59 2022 ] Mean test loss of 930 batches: 3.157620906829834. +[ Thu Sep 15 23:08:00 2022 ] Top1: 44.87% +[ Thu Sep 15 23:08:00 2022 ] Top5: 74.35% +[ Thu Sep 15 23:08:01 2022 ] Training epoch: 36 +[ Thu Sep 15 23:08:25 2022 ] Batch(29/162) done. Loss: 0.3202 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 23:09:38 2022 ] Batch(129/162) done. Loss: 0.3796 lr:0.100000 network_time: 0.0313 +[ Thu Sep 15 23:10:01 2022 ] Eval epoch: 36 +[ Thu Sep 15 23:11:49 2022 ] Mean test loss of 930 batches: 2.9143733978271484. +[ Thu Sep 15 23:11:50 2022 ] Top1: 46.78% +[ Thu Sep 15 23:11:50 2022 ] Top5: 75.46% +[ Thu Sep 15 23:11:50 2022 ] Training epoch: 37 +[ Thu Sep 15 23:12:43 2022 ] Batch(67/162) done. Loss: 0.3318 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 23:13:52 2022 ] Eval epoch: 37 +[ Thu Sep 15 23:15:40 2022 ] Mean test loss of 930 batches: 2.5134196281433105. +[ Thu Sep 15 23:15:40 2022 ] Top1: 47.01% +[ Thu Sep 15 23:15:40 2022 ] Top5: 75.64% +[ Thu Sep 15 23:15:41 2022 ] Training epoch: 38 +[ Thu Sep 15 23:15:48 2022 ] Batch(5/162) done. Loss: 0.4636 lr:0.100000 network_time: 0.0310 +[ Thu Sep 15 23:17:01 2022 ] Batch(105/162) done. Loss: 0.2994 lr:0.100000 network_time: 0.0606 +[ Thu Sep 15 23:17:42 2022 ] Eval epoch: 38 +[ Thu Sep 15 23:19:30 2022 ] Mean test loss of 930 batches: 2.8436739444732666. +[ Thu Sep 15 23:19:30 2022 ] Top1: 46.69% +[ Thu Sep 15 23:19:31 2022 ] Top5: 75.47% +[ Thu Sep 15 23:19:31 2022 ] Training epoch: 39 +[ Thu Sep 15 23:20:06 2022 ] Batch(43/162) done. Loss: 0.1927 lr:0.100000 network_time: 0.0289 +[ Thu Sep 15 23:21:19 2022 ] Batch(143/162) done. Loss: 0.4042 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 23:21:32 2022 ] Eval epoch: 39 +[ Thu Sep 15 23:23:20 2022 ] Mean test loss of 930 batches: 2.809511423110962. +[ Thu Sep 15 23:23:20 2022 ] Top1: 45.66% +[ Thu Sep 15 23:23:21 2022 ] Top5: 74.96% +[ Thu Sep 15 23:23:21 2022 ] Training epoch: 40 +[ Thu Sep 15 23:24:24 2022 ] Batch(81/162) done. Loss: 0.3760 lr:0.100000 network_time: 0.0407 +[ Thu Sep 15 23:25:22 2022 ] Eval epoch: 40 +[ Thu Sep 15 23:27:11 2022 ] Mean test loss of 930 batches: 2.7429559230804443. +[ Thu Sep 15 23:27:11 2022 ] Top1: 45.55% +[ Thu Sep 15 23:27:12 2022 ] Top5: 74.78% +[ Thu Sep 15 23:27:12 2022 ] Training epoch: 41 +[ Thu Sep 15 23:27:30 2022 ] Batch(19/162) done. Loss: 0.5173 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 23:28:43 2022 ] Batch(119/162) done. Loss: 0.6124 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 23:29:13 2022 ] Eval epoch: 41 +[ Thu Sep 15 23:31:01 2022 ] Mean test loss of 930 batches: 2.9963934421539307. +[ Thu Sep 15 23:31:01 2022 ] Top1: 45.14% +[ Thu Sep 15 23:31:02 2022 ] Top5: 74.59% +[ Thu Sep 15 23:31:02 2022 ] Training epoch: 42 +[ Thu Sep 15 23:31:47 2022 ] Batch(57/162) done. Loss: 0.4068 lr:0.100000 network_time: 0.0283 +[ Thu Sep 15 23:33:00 2022 ] Batch(157/162) done. Loss: 0.3797 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 23:33:03 2022 ] Eval epoch: 42 +[ Thu Sep 15 23:34:51 2022 ] Mean test loss of 930 batches: 2.8139801025390625. +[ Thu Sep 15 23:34:52 2022 ] Top1: 46.93% +[ Thu Sep 15 23:34:52 2022 ] Top5: 75.50% +[ Thu Sep 15 23:34:52 2022 ] Training epoch: 43 +[ Thu Sep 15 23:36:05 2022 ] Batch(95/162) done. Loss: 0.3201 lr:0.100000 network_time: 0.0307 +[ Thu Sep 15 23:36:53 2022 ] Eval epoch: 43 +[ Thu Sep 15 23:38:42 2022 ] Mean test loss of 930 batches: 3.052001953125. +[ Thu Sep 15 23:38:42 2022 ] Top1: 42.91% +[ Thu Sep 15 23:38:43 2022 ] Top5: 74.33% +[ Thu Sep 15 23:38:43 2022 ] Training epoch: 44 +[ Thu Sep 15 23:39:11 2022 ] Batch(33/162) done. Loss: 0.1518 lr:0.100000 network_time: 0.0299 +[ Thu Sep 15 23:40:24 2022 ] Batch(133/162) done. Loss: 0.3774 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 23:40:44 2022 ] Eval epoch: 44 +[ Thu Sep 15 23:42:33 2022 ] Mean test loss of 930 batches: 2.9345126152038574. +[ Thu Sep 15 23:42:33 2022 ] Top1: 47.30% +[ Thu Sep 15 23:42:34 2022 ] Top5: 75.75% +[ Thu Sep 15 23:42:34 2022 ] Training epoch: 45 +[ Thu Sep 15 23:43:29 2022 ] Batch(71/162) done. Loss: 0.4863 lr:0.100000 network_time: 0.0283 +[ Thu Sep 15 23:44:35 2022 ] Eval epoch: 45 +[ Thu Sep 15 23:46:23 2022 ] Mean test loss of 930 batches: 2.7627146244049072. +[ Thu Sep 15 23:46:24 2022 ] Top1: 47.54% +[ Thu Sep 15 23:46:24 2022 ] Top5: 76.17% +[ Thu Sep 15 23:46:24 2022 ] Training epoch: 46 +[ Thu Sep 15 23:46:34 2022 ] Batch(9/162) done. Loss: 0.2246 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 23:47:47 2022 ] Batch(109/162) done. Loss: 0.5166 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 23:48:25 2022 ] Eval epoch: 46 +[ Thu Sep 15 23:50:13 2022 ] Mean test loss of 930 batches: 2.7105462551116943. +[ Thu Sep 15 23:50:14 2022 ] Top1: 47.83% +[ Thu Sep 15 23:50:14 2022 ] Top5: 77.36% +[ Thu Sep 15 23:50:15 2022 ] Training epoch: 47 +[ Thu Sep 15 23:50:52 2022 ] Batch(47/162) done. Loss: 0.2201 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 23:52:05 2022 ] Batch(147/162) done. Loss: 0.3085 lr:0.100000 network_time: 0.0307 +[ Thu Sep 15 23:52:16 2022 ] Eval epoch: 47 +[ Thu Sep 15 23:54:04 2022 ] Mean test loss of 930 batches: 2.8468735218048096. +[ Thu Sep 15 23:54:04 2022 ] Top1: 46.37% +[ Thu Sep 15 23:54:05 2022 ] Top5: 75.79% +[ Thu Sep 15 23:54:05 2022 ] Training epoch: 48 +[ Thu Sep 15 23:55:11 2022 ] Batch(85/162) done. Loss: 0.4530 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 23:56:06 2022 ] Eval epoch: 48 +[ Thu Sep 15 23:57:54 2022 ] Mean test loss of 930 batches: 2.7094438076019287. +[ Thu Sep 15 23:57:55 2022 ] Top1: 47.79% +[ Thu Sep 15 23:57:55 2022 ] Top5: 76.91% +[ Thu Sep 15 23:57:55 2022 ] Training epoch: 49 +[ Thu Sep 15 23:58:16 2022 ] Batch(23/162) done. Loss: 0.3716 lr:0.100000 network_time: 0.0382 +[ Thu Sep 15 23:59:29 2022 ] Batch(123/162) done. Loss: 0.2848 lr:0.100000 network_time: 0.0328 +[ Thu Sep 15 23:59:57 2022 ] Eval epoch: 49 +[ Fri Sep 16 00:01:45 2022 ] Mean test loss of 930 batches: 3.2839035987854004. +[ Fri Sep 16 00:01:45 2022 ] Top1: 44.72% +[ Fri Sep 16 00:01:46 2022 ] Top5: 72.77% +[ Fri Sep 16 00:01:46 2022 ] Training epoch: 50 +[ Fri Sep 16 00:02:34 2022 ] Batch(61/162) done. Loss: 0.3227 lr:0.100000 network_time: 0.0309 +[ Fri Sep 16 00:03:47 2022 ] Batch(161/162) done. Loss: 0.3406 lr:0.100000 network_time: 0.0273 +[ Fri Sep 16 00:03:47 2022 ] Eval epoch: 50 +[ Fri Sep 16 00:05:35 2022 ] Mean test loss of 930 batches: 2.811931848526001. +[ Fri Sep 16 00:05:35 2022 ] Top1: 48.72% +[ Fri Sep 16 00:05:36 2022 ] Top5: 77.38% +[ Fri Sep 16 00:05:36 2022 ] Training epoch: 51 +[ Fri Sep 16 00:06:52 2022 ] Batch(99/162) done. Loss: 0.3076 lr:0.100000 network_time: 0.0271 +[ Fri Sep 16 00:07:37 2022 ] Eval epoch: 51 +[ Fri Sep 16 00:09:26 2022 ] Mean test loss of 930 batches: 3.179542064666748. +[ Fri Sep 16 00:09:26 2022 ] Top1: 45.92% +[ Fri Sep 16 00:09:27 2022 ] Top5: 74.47% +[ Fri Sep 16 00:09:27 2022 ] Training epoch: 52 +[ Fri Sep 16 00:09:57 2022 ] Batch(37/162) done. Loss: 0.1807 lr:0.100000 network_time: 0.0298 +[ Fri Sep 16 00:11:10 2022 ] Batch(137/162) done. Loss: 0.4173 lr:0.100000 network_time: 0.0274 +[ Fri Sep 16 00:11:28 2022 ] Eval epoch: 52 +[ Fri Sep 16 00:13:16 2022 ] Mean test loss of 930 batches: 2.9092044830322266. +[ Fri Sep 16 00:13:16 2022 ] Top1: 45.58% +[ Fri Sep 16 00:13:17 2022 ] Top5: 74.35% +[ Fri Sep 16 00:13:17 2022 ] Training epoch: 53 +[ Fri Sep 16 00:14:15 2022 ] Batch(75/162) done. Loss: 0.3810 lr:0.100000 network_time: 0.0316 +[ Fri Sep 16 00:15:18 2022 ] Eval epoch: 53 +[ Fri Sep 16 00:17:06 2022 ] Mean test loss of 930 batches: 3.3449065685272217. +[ Fri Sep 16 00:17:06 2022 ] Top1: 44.76% +[ Fri Sep 16 00:17:07 2022 ] Top5: 74.22% +[ Fri Sep 16 00:17:07 2022 ] Training epoch: 54 +[ Fri Sep 16 00:17:20 2022 ] Batch(13/162) done. Loss: 0.0814 lr:0.100000 network_time: 0.0300 +[ Fri Sep 16 00:18:33 2022 ] Batch(113/162) done. Loss: 0.2477 lr:0.100000 network_time: 0.0257 +[ Fri Sep 16 00:19:08 2022 ] Eval epoch: 54 +[ Fri Sep 16 00:20:56 2022 ] Mean test loss of 930 batches: 2.650818109512329. +[ Fri Sep 16 00:20:56 2022 ] Top1: 47.98% +[ Fri Sep 16 00:20:57 2022 ] Top5: 77.17% +[ Fri Sep 16 00:20:57 2022 ] Training epoch: 55 +[ Fri Sep 16 00:21:38 2022 ] Batch(51/162) done. Loss: 0.1965 lr:0.100000 network_time: 0.0275 +[ Fri Sep 16 00:22:51 2022 ] Batch(151/162) done. Loss: 0.2623 lr:0.100000 network_time: 0.0273 +[ Fri Sep 16 00:22:58 2022 ] Eval epoch: 55 +[ Fri Sep 16 00:24:46 2022 ] Mean test loss of 930 batches: 2.9719905853271484. +[ Fri Sep 16 00:24:47 2022 ] Top1: 47.19% +[ Fri Sep 16 00:24:47 2022 ] Top5: 75.58% +[ Fri Sep 16 00:24:47 2022 ] Training epoch: 56 +[ Fri Sep 16 00:25:56 2022 ] Batch(89/162) done. Loss: 0.1470 lr:0.100000 network_time: 0.0261 +[ Fri Sep 16 00:26:48 2022 ] Eval epoch: 56 +[ Fri Sep 16 00:28:37 2022 ] Mean test loss of 930 batches: 2.853518009185791. +[ Fri Sep 16 00:28:37 2022 ] Top1: 46.27% +[ Fri Sep 16 00:28:38 2022 ] Top5: 76.19% +[ Fri Sep 16 00:28:38 2022 ] Training epoch: 57 +[ Fri Sep 16 00:29:01 2022 ] Batch(27/162) done. Loss: 0.1605 lr:0.100000 network_time: 0.0288 +[ Fri Sep 16 00:30:14 2022 ] Batch(127/162) done. Loss: 0.2337 lr:0.100000 network_time: 0.0265 +[ Fri Sep 16 00:30:39 2022 ] Eval epoch: 57 +[ Fri Sep 16 00:32:27 2022 ] Mean test loss of 930 batches: 2.9228341579437256. +[ Fri Sep 16 00:32:27 2022 ] Top1: 45.10% +[ Fri Sep 16 00:32:28 2022 ] Top5: 75.00% +[ Fri Sep 16 00:32:28 2022 ] Training epoch: 58 +[ Fri Sep 16 00:33:19 2022 ] Batch(65/162) done. Loss: 0.1659 lr:0.100000 network_time: 0.0264 +[ Fri Sep 16 00:34:29 2022 ] Eval epoch: 58 +[ Fri Sep 16 00:36:17 2022 ] Mean test loss of 930 batches: 3.072599172592163. +[ Fri Sep 16 00:36:17 2022 ] Top1: 46.37% +[ Fri Sep 16 00:36:18 2022 ] Top5: 74.37% +[ Fri Sep 16 00:36:18 2022 ] Training epoch: 59 +[ Fri Sep 16 00:36:23 2022 ] Batch(3/162) done. Loss: 0.1019 lr:0.100000 network_time: 0.0279 +[ Fri Sep 16 00:37:36 2022 ] Batch(103/162) done. Loss: 0.2214 lr:0.100000 network_time: 0.0281 +[ Fri Sep 16 00:38:19 2022 ] Eval epoch: 59 +[ Fri Sep 16 00:40:07 2022 ] Mean test loss of 930 batches: 2.779177188873291. +[ Fri Sep 16 00:40:07 2022 ] Top1: 46.86% +[ Fri Sep 16 00:40:08 2022 ] Top5: 75.80% +[ Fri Sep 16 00:40:08 2022 ] Training epoch: 60 +[ Fri Sep 16 00:40:41 2022 ] Batch(41/162) done. Loss: 0.2597 lr:0.100000 network_time: 0.0280 +[ Fri Sep 16 00:41:54 2022 ] Batch(141/162) done. Loss: 0.1924 lr:0.100000 network_time: 0.0312 +[ Fri Sep 16 00:42:09 2022 ] Eval epoch: 60 +[ Fri Sep 16 00:43:57 2022 ] Mean test loss of 930 batches: 2.9837682247161865. +[ Fri Sep 16 00:43:58 2022 ] Top1: 47.59% +[ Fri Sep 16 00:43:58 2022 ] Top5: 76.13% +[ Fri Sep 16 00:43:58 2022 ] Training epoch: 61 +[ Fri Sep 16 00:44:59 2022 ] Batch(79/162) done. Loss: 0.1124 lr:0.010000 network_time: 0.0279 +[ Fri Sep 16 00:45:59 2022 ] Eval epoch: 61 +[ Fri Sep 16 00:47:48 2022 ] Mean test loss of 930 batches: 2.455381155014038. +[ Fri Sep 16 00:47:48 2022 ] Top1: 53.95% +[ Fri Sep 16 00:47:49 2022 ] Top5: 80.31% +[ Fri Sep 16 00:47:49 2022 ] Training epoch: 62 +[ Fri Sep 16 00:48:05 2022 ] Batch(17/162) done. Loss: 0.0396 lr:0.010000 network_time: 0.0237 +[ Fri Sep 16 00:49:18 2022 ] Batch(117/162) done. Loss: 0.0511 lr:0.010000 network_time: 0.0274 +[ Fri Sep 16 00:49:50 2022 ] Eval epoch: 62 +[ Fri Sep 16 00:51:39 2022 ] Mean test loss of 930 batches: 2.414081335067749. +[ Fri Sep 16 00:51:40 2022 ] Top1: 54.59% +[ Fri Sep 16 00:51:40 2022 ] Top5: 80.92% +[ Fri Sep 16 00:51:40 2022 ] Training epoch: 63 +[ Fri Sep 16 00:52:24 2022 ] Batch(55/162) done. Loss: 0.0494 lr:0.010000 network_time: 0.0276 +[ Fri Sep 16 00:53:37 2022 ] Batch(155/162) done. Loss: 0.0421 lr:0.010000 network_time: 0.0271 +[ Fri Sep 16 00:53:41 2022 ] Eval epoch: 63 +[ Fri Sep 16 00:55:30 2022 ] Mean test loss of 930 batches: 2.435600996017456. +[ Fri Sep 16 00:55:30 2022 ] Top1: 54.98% +[ Fri Sep 16 00:55:31 2022 ] Top5: 81.13% +[ Fri Sep 16 00:55:31 2022 ] Training epoch: 64 +[ Fri Sep 16 00:56:42 2022 ] Batch(93/162) done. Loss: 0.0951 lr:0.010000 network_time: 0.0278 +[ Fri Sep 16 00:57:32 2022 ] Eval epoch: 64 +[ Fri Sep 16 00:59:20 2022 ] Mean test loss of 930 batches: 2.467794418334961. +[ Fri Sep 16 00:59:21 2022 ] Top1: 54.78% +[ Fri Sep 16 00:59:21 2022 ] Top5: 81.15% +[ Fri Sep 16 00:59:21 2022 ] Training epoch: 65 +[ Fri Sep 16 00:59:47 2022 ] Batch(31/162) done. Loss: 0.0189 lr:0.010000 network_time: 0.0267 +[ Fri Sep 16 01:01:00 2022 ] Batch(131/162) done. Loss: 0.0475 lr:0.010000 network_time: 0.0311 +[ Fri Sep 16 01:01:22 2022 ] Eval epoch: 65 +[ Fri Sep 16 01:03:11 2022 ] Mean test loss of 930 batches: 2.487011194229126. +[ Fri Sep 16 01:03:11 2022 ] Top1: 54.64% +[ Fri Sep 16 01:03:12 2022 ] Top5: 80.91% +[ Fri Sep 16 01:03:12 2022 ] Training epoch: 66 +[ Fri Sep 16 01:04:06 2022 ] Batch(69/162) done. Loss: 0.0478 lr:0.010000 network_time: 0.0262 +[ Fri Sep 16 01:05:13 2022 ] Eval epoch: 66 +[ Fri Sep 16 01:07:00 2022 ] Mean test loss of 930 batches: 2.487764596939087. +[ Fri Sep 16 01:07:01 2022 ] Top1: 54.90% +[ Fri Sep 16 01:07:01 2022 ] Top5: 81.02% +[ Fri Sep 16 01:07:02 2022 ] Training epoch: 67 +[ Fri Sep 16 01:07:10 2022 ] Batch(7/162) done. Loss: 0.0119 lr:0.010000 network_time: 0.0318 +[ Fri Sep 16 01:08:23 2022 ] Batch(107/162) done. Loss: 0.0186 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:09:03 2022 ] Eval epoch: 67 +[ Fri Sep 16 01:10:51 2022 ] Mean test loss of 930 batches: 2.4871976375579834. +[ Fri Sep 16 01:10:51 2022 ] Top1: 55.07% +[ Fri Sep 16 01:10:52 2022 ] Top5: 81.23% +[ Fri Sep 16 01:10:52 2022 ] Training epoch: 68 +[ Fri Sep 16 01:11:29 2022 ] Batch(45/162) done. Loss: 0.0457 lr:0.010000 network_time: 0.0288 +[ Fri Sep 16 01:12:41 2022 ] Batch(145/162) done. Loss: 0.0222 lr:0.010000 network_time: 0.0276 +[ Fri Sep 16 01:12:53 2022 ] Eval epoch: 68 +[ Fri Sep 16 01:14:41 2022 ] Mean test loss of 930 batches: 2.5081300735473633. +[ Fri Sep 16 01:14:41 2022 ] Top1: 54.78% +[ Fri Sep 16 01:14:42 2022 ] Top5: 80.99% +[ Fri Sep 16 01:14:42 2022 ] Training epoch: 69 +[ Fri Sep 16 01:15:46 2022 ] Batch(83/162) done. Loss: 0.0238 lr:0.010000 network_time: 0.0341 +[ Fri Sep 16 01:16:43 2022 ] Eval epoch: 69 +[ Fri Sep 16 01:18:31 2022 ] Mean test loss of 930 batches: 2.4945333003997803. +[ Fri Sep 16 01:18:32 2022 ] Top1: 55.12% +[ Fri Sep 16 01:18:32 2022 ] Top5: 81.14% +[ Fri Sep 16 01:18:32 2022 ] Training epoch: 70 +[ Fri Sep 16 01:18:52 2022 ] Batch(21/162) done. Loss: 0.0233 lr:0.010000 network_time: 0.0275 +[ Fri Sep 16 01:20:05 2022 ] Batch(121/162) done. Loss: 0.0358 lr:0.010000 network_time: 0.0266 +[ Fri Sep 16 01:20:34 2022 ] Eval epoch: 70 +[ Fri Sep 16 01:22:22 2022 ] Mean test loss of 930 batches: 2.4906225204467773. +[ Fri Sep 16 01:22:23 2022 ] Top1: 55.35% +[ Fri Sep 16 01:22:23 2022 ] Top5: 81.29% +[ Fri Sep 16 01:22:23 2022 ] Training epoch: 71 +[ Fri Sep 16 01:23:10 2022 ] Batch(59/162) done. Loss: 0.0117 lr:0.010000 network_time: 0.0309 +[ Fri Sep 16 01:24:23 2022 ] Batch(159/162) done. Loss: 0.0134 lr:0.010000 network_time: 0.0263 +[ Fri Sep 16 01:24:24 2022 ] Eval epoch: 71 +[ Fri Sep 16 01:26:13 2022 ] Mean test loss of 930 batches: 2.5574259757995605. +[ Fri Sep 16 01:26:13 2022 ] Top1: 54.91% +[ Fri Sep 16 01:26:14 2022 ] Top5: 81.06% +[ Fri Sep 16 01:26:14 2022 ] Training epoch: 72 +[ Fri Sep 16 01:27:28 2022 ] Batch(97/162) done. Loss: 0.0187 lr:0.010000 network_time: 0.0268 +[ Fri Sep 16 01:28:15 2022 ] Eval epoch: 72 +[ Fri Sep 16 01:30:03 2022 ] Mean test loss of 930 batches: 2.5424442291259766. +[ Fri Sep 16 01:30:03 2022 ] Top1: 55.18% +[ Fri Sep 16 01:30:04 2022 ] Top5: 81.15% +[ Fri Sep 16 01:30:04 2022 ] Training epoch: 73 +[ Fri Sep 16 01:30:33 2022 ] Batch(35/162) done. Loss: 0.0086 lr:0.010000 network_time: 0.0283 +[ Fri Sep 16 01:31:46 2022 ] Batch(135/162) done. Loss: 0.0168 lr:0.010000 network_time: 0.0320 +[ Fri Sep 16 01:32:05 2022 ] Eval epoch: 73 +[ Fri Sep 16 01:33:53 2022 ] Mean test loss of 930 batches: 2.5362462997436523. +[ Fri Sep 16 01:33:53 2022 ] Top1: 55.36% +[ Fri Sep 16 01:33:54 2022 ] Top5: 81.27% +[ Fri Sep 16 01:33:54 2022 ] Training epoch: 74 +[ Fri Sep 16 01:34:51 2022 ] Batch(73/162) done. Loss: 0.0156 lr:0.010000 network_time: 0.0273 +[ Fri Sep 16 01:35:55 2022 ] Eval epoch: 74 +[ Fri Sep 16 01:37:43 2022 ] Mean test loss of 930 batches: 2.521191358566284. +[ Fri Sep 16 01:37:43 2022 ] Top1: 55.26% +[ Fri Sep 16 01:37:44 2022 ] Top5: 81.21% +[ Fri Sep 16 01:37:44 2022 ] Training epoch: 75 +[ Fri Sep 16 01:37:56 2022 ] Batch(11/162) done. Loss: 0.0207 lr:0.010000 network_time: 0.0285 +[ Fri Sep 16 01:39:08 2022 ] Batch(111/162) done. Loss: 0.0131 lr:0.010000 network_time: 0.0283 +[ Fri Sep 16 01:39:45 2022 ] Eval epoch: 75 +[ Fri Sep 16 01:41:33 2022 ] Mean test loss of 930 batches: 2.5355825424194336. +[ Fri Sep 16 01:41:33 2022 ] Top1: 55.33% +[ Fri Sep 16 01:41:34 2022 ] Top5: 81.14% +[ Fri Sep 16 01:41:34 2022 ] Training epoch: 76 +[ Fri Sep 16 01:42:13 2022 ] Batch(49/162) done. Loss: 0.0105 lr:0.010000 network_time: 0.0347 +[ Fri Sep 16 01:43:26 2022 ] Batch(149/162) done. Loss: 0.0263 lr:0.010000 network_time: 0.0265 +[ Fri Sep 16 01:43:35 2022 ] Eval epoch: 76 +[ Fri Sep 16 01:45:23 2022 ] Mean test loss of 930 batches: 2.538811683654785. +[ Fri Sep 16 01:45:24 2022 ] Top1: 55.36% +[ Fri Sep 16 01:45:24 2022 ] Top5: 81.18% +[ Fri Sep 16 01:45:24 2022 ] Training epoch: 77 +[ Fri Sep 16 01:46:32 2022 ] Batch(87/162) done. Loss: 0.0133 lr:0.010000 network_time: 0.0324 +[ Fri Sep 16 01:47:26 2022 ] Eval epoch: 77 +[ Fri Sep 16 01:49:14 2022 ] Mean test loss of 930 batches: 2.534885883331299. +[ Fri Sep 16 01:49:15 2022 ] Top1: 55.49% +[ Fri Sep 16 01:49:15 2022 ] Top5: 81.30% +[ Fri Sep 16 01:49:15 2022 ] Training epoch: 78 +[ Fri Sep 16 01:49:37 2022 ] Batch(25/162) done. Loss: 0.0092 lr:0.010000 network_time: 0.0270 +[ Fri Sep 16 01:50:50 2022 ] Batch(125/162) done. Loss: 0.0150 lr:0.010000 network_time: 0.0286 +[ Fri Sep 16 01:51:17 2022 ] Eval epoch: 78 +[ Fri Sep 16 01:53:07 2022 ] Mean test loss of 930 batches: 2.521869421005249. +[ Fri Sep 16 01:53:07 2022 ] Top1: 55.60% +[ Fri Sep 16 01:53:08 2022 ] Top5: 81.24% +[ Fri Sep 16 01:53:08 2022 ] Training epoch: 79 +[ Fri Sep 16 01:53:57 2022 ] Batch(63/162) done. Loss: 0.0122 lr:0.010000 network_time: 0.0437 +[ Fri Sep 16 01:55:08 2022 ] Eval epoch: 79 +[ Fri Sep 16 01:56:56 2022 ] Mean test loss of 930 batches: 2.5722897052764893. +[ Fri Sep 16 01:56:56 2022 ] Top1: 55.16% +[ Fri Sep 16 01:56:57 2022 ] Top5: 81.03% +[ Fri Sep 16 01:56:57 2022 ] Training epoch: 80 +[ Fri Sep 16 01:57:02 2022 ] Batch(1/162) done. Loss: 0.0119 lr:0.010000 network_time: 0.0294 +[ Fri Sep 16 01:58:14 2022 ] Batch(101/162) done. Loss: 0.0125 lr:0.010000 network_time: 0.0268 +[ Fri Sep 16 01:58:58 2022 ] Eval epoch: 80 +[ Fri Sep 16 02:00:46 2022 ] Mean test loss of 930 batches: 2.557574987411499. +[ Fri Sep 16 02:00:47 2022 ] Top1: 55.35% +[ Fri Sep 16 02:00:47 2022 ] Top5: 81.25% +[ Fri Sep 16 02:00:47 2022 ] Training epoch: 81 +[ Fri Sep 16 02:01:19 2022 ] Batch(39/162) done. Loss: 0.0079 lr:0.001000 network_time: 0.0346 +[ Fri Sep 16 02:02:32 2022 ] Batch(139/162) done. Loss: 0.0043 lr:0.001000 network_time: 0.0307 +[ Fri Sep 16 02:02:49 2022 ] Eval epoch: 81 +[ Fri Sep 16 02:04:36 2022 ] Mean test loss of 930 batches: 2.572951078414917. +[ Fri Sep 16 02:04:37 2022 ] Top1: 55.27% +[ Fri Sep 16 02:04:37 2022 ] Top5: 81.03% +[ Fri Sep 16 02:04:37 2022 ] Training epoch: 82 +[ Fri Sep 16 02:05:37 2022 ] Batch(77/162) done. Loss: 0.0055 lr:0.001000 network_time: 0.0275 +[ Fri Sep 16 02:06:38 2022 ] Eval epoch: 82 +[ Fri Sep 16 02:08:26 2022 ] Mean test loss of 930 batches: 2.530529022216797. +[ Fri Sep 16 02:08:27 2022 ] Top1: 55.61% +[ Fri Sep 16 02:08:27 2022 ] Top5: 81.47% +[ Fri Sep 16 02:08:27 2022 ] Training epoch: 83 +[ Fri Sep 16 02:08:42 2022 ] Batch(15/162) done. Loss: 0.0110 lr:0.001000 network_time: 0.0298 +[ Fri Sep 16 02:09:54 2022 ] Batch(115/162) done. Loss: 0.0387 lr:0.001000 network_time: 0.0316 +[ Fri Sep 16 02:10:28 2022 ] Eval epoch: 83 +[ Fri Sep 16 02:12:17 2022 ] Mean test loss of 930 batches: 2.545494794845581. +[ Fri Sep 16 02:12:17 2022 ] Top1: 55.61% +[ Fri Sep 16 02:12:17 2022 ] Top5: 81.21% +[ Fri Sep 16 02:12:18 2022 ] Training epoch: 84 +[ Fri Sep 16 02:13:00 2022 ] Batch(53/162) done. Loss: 0.0153 lr:0.001000 network_time: 0.0308 +[ Fri Sep 16 02:14:13 2022 ] Batch(153/162) done. Loss: 0.0306 lr:0.001000 network_time: 0.0297 +[ Fri Sep 16 02:14:19 2022 ] Eval epoch: 84 +[ Fri Sep 16 02:16:07 2022 ] Mean test loss of 930 batches: 2.524808883666992. +[ Fri Sep 16 02:16:08 2022 ] Top1: 55.55% +[ Fri Sep 16 02:16:08 2022 ] Top5: 81.22% +[ Fri Sep 16 02:16:08 2022 ] Training epoch: 85 +[ Fri Sep 16 02:17:18 2022 ] Batch(91/162) done. Loss: 0.0149 lr:0.001000 network_time: 0.0278 +[ Fri Sep 16 02:18:10 2022 ] Eval epoch: 85 +[ Fri Sep 16 02:19:58 2022 ] Mean test loss of 930 batches: 2.586280345916748. +[ Fri Sep 16 02:19:58 2022 ] Top1: 55.22% +[ Fri Sep 16 02:19:59 2022 ] Top5: 81.15% +[ Fri Sep 16 02:19:59 2022 ] Training epoch: 86 +[ Fri Sep 16 02:20:24 2022 ] Batch(29/162) done. Loss: 0.0106 lr:0.001000 network_time: 0.0550 +[ Fri Sep 16 02:21:37 2022 ] Batch(129/162) done. Loss: 0.0091 lr:0.001000 network_time: 0.0295 +[ Fri Sep 16 02:22:00 2022 ] Eval epoch: 86 +[ Fri Sep 16 02:23:48 2022 ] Mean test loss of 930 batches: 2.5408122539520264. +[ Fri Sep 16 02:23:49 2022 ] Top1: 55.49% +[ Fri Sep 16 02:23:49 2022 ] Top5: 81.15% +[ Fri Sep 16 02:23:49 2022 ] Training epoch: 87 +[ Fri Sep 16 02:24:41 2022 ] Batch(67/162) done. Loss: 0.0092 lr:0.001000 network_time: 0.0257 +[ Fri Sep 16 02:25:50 2022 ] Eval epoch: 87 +[ Fri Sep 16 02:27:38 2022 ] Mean test loss of 930 batches: 2.536407232284546. +[ Fri Sep 16 02:27:38 2022 ] Top1: 55.42% +[ Fri Sep 16 02:27:39 2022 ] Top5: 81.31% +[ Fri Sep 16 02:27:39 2022 ] Training epoch: 88 +[ Fri Sep 16 02:27:46 2022 ] Batch(5/162) done. Loss: 0.0140 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:28:59 2022 ] Batch(105/162) done. Loss: 0.0098 lr:0.001000 network_time: 0.0277 +[ Fri Sep 16 02:29:40 2022 ] Eval epoch: 88 +[ Fri Sep 16 02:31:28 2022 ] Mean test loss of 930 batches: 2.528388023376465. +[ Fri Sep 16 02:31:29 2022 ] Top1: 55.43% +[ Fri Sep 16 02:31:29 2022 ] Top5: 81.16% +[ Fri Sep 16 02:31:29 2022 ] Training epoch: 89 +[ Fri Sep 16 02:32:05 2022 ] Batch(43/162) done. Loss: 0.0132 lr:0.001000 network_time: 0.0303 +[ Fri Sep 16 02:33:17 2022 ] Batch(143/162) done. Loss: 0.0126 lr:0.001000 network_time: 0.0281 +[ Fri Sep 16 02:33:31 2022 ] Eval epoch: 89 +[ Fri Sep 16 02:35:18 2022 ] Mean test loss of 930 batches: 2.5503320693969727. +[ Fri Sep 16 02:35:19 2022 ] Top1: 55.38% +[ Fri Sep 16 02:35:19 2022 ] Top5: 81.19% +[ Fri Sep 16 02:35:20 2022 ] Training epoch: 90 +[ Fri Sep 16 02:36:22 2022 ] Batch(81/162) done. Loss: 0.0094 lr:0.001000 network_time: 0.0324 +[ Fri Sep 16 02:37:21 2022 ] Eval epoch: 90 +[ Fri Sep 16 02:39:09 2022 ] Mean test loss of 930 batches: 2.556290864944458. +[ Fri Sep 16 02:39:09 2022 ] Top1: 55.48% +[ Fri Sep 16 02:39:10 2022 ] Top5: 81.22% +[ Fri Sep 16 02:39:10 2022 ] Training epoch: 91 +[ Fri Sep 16 02:39:28 2022 ] Batch(19/162) done. Loss: 0.0091 lr:0.001000 network_time: 0.0280 +[ Fri Sep 16 02:40:40 2022 ] Batch(119/162) done. Loss: 0.0052 lr:0.001000 network_time: 0.0256 +[ Fri Sep 16 02:41:11 2022 ] Eval epoch: 91 +[ Fri Sep 16 02:42:59 2022 ] Mean test loss of 930 batches: 2.58315372467041. +[ Fri Sep 16 02:43:00 2022 ] Top1: 55.23% +[ Fri Sep 16 02:43:00 2022 ] Top5: 81.20% +[ Fri Sep 16 02:43:00 2022 ] Training epoch: 92 +[ Fri Sep 16 02:43:46 2022 ] Batch(57/162) done. Loss: 0.0191 lr:0.001000 network_time: 0.0328 +[ Fri Sep 16 02:44:58 2022 ] Batch(157/162) done. Loss: 0.0156 lr:0.001000 network_time: 0.0282 +[ Fri Sep 16 02:45:02 2022 ] Eval epoch: 92 +[ Fri Sep 16 02:46:50 2022 ] Mean test loss of 930 batches: 2.5625314712524414. +[ Fri Sep 16 02:46:50 2022 ] Top1: 55.23% +[ Fri Sep 16 02:46:50 2022 ] Top5: 81.03% +[ Fri Sep 16 02:46:51 2022 ] Training epoch: 93 +[ Fri Sep 16 02:48:04 2022 ] Batch(95/162) done. Loss: 0.0076 lr:0.001000 network_time: 0.0268 +[ Fri Sep 16 02:48:52 2022 ] Eval epoch: 93 +[ Fri Sep 16 02:50:40 2022 ] Mean test loss of 930 batches: 2.5470073223114014. +[ Fri Sep 16 02:50:40 2022 ] Top1: 55.65% +[ Fri Sep 16 02:50:41 2022 ] Top5: 81.37% +[ Fri Sep 16 02:50:41 2022 ] Training epoch: 94 +[ Fri Sep 16 02:51:09 2022 ] Batch(33/162) done. Loss: 0.0263 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:52:22 2022 ] Batch(133/162) done. Loss: 0.0480 lr:0.001000 network_time: 0.0274 +[ Fri Sep 16 02:52:42 2022 ] Eval epoch: 94 +[ Fri Sep 16 02:54:30 2022 ] Mean test loss of 930 batches: 2.5392682552337646. +[ Fri Sep 16 02:54:30 2022 ] Top1: 55.68% +[ Fri Sep 16 02:54:31 2022 ] Top5: 81.33% +[ Fri Sep 16 02:54:31 2022 ] Training epoch: 95 +[ Fri Sep 16 02:55:26 2022 ] Batch(71/162) done. Loss: 0.0408 lr:0.001000 network_time: 0.0315 +[ Fri Sep 16 02:56:32 2022 ] Eval epoch: 95 +[ Fri Sep 16 02:58:20 2022 ] Mean test loss of 930 batches: 2.55815052986145. +[ Fri Sep 16 02:58:21 2022 ] Top1: 55.28% +[ Fri Sep 16 02:58:21 2022 ] Top5: 81.19% +[ Fri Sep 16 02:58:21 2022 ] Training epoch: 96 +[ Fri Sep 16 02:58:32 2022 ] Batch(9/162) done. Loss: 0.0116 lr:0.001000 network_time: 0.0290 +[ Fri Sep 16 02:59:44 2022 ] Batch(109/162) done. Loss: 0.0113 lr:0.001000 network_time: 0.0284 +[ Fri Sep 16 03:00:23 2022 ] Eval epoch: 96 +[ Fri Sep 16 03:02:10 2022 ] Mean test loss of 930 batches: 2.5667190551757812. +[ Fri Sep 16 03:02:11 2022 ] Top1: 55.56% +[ Fri Sep 16 03:02:11 2022 ] Top5: 81.15% +[ Fri Sep 16 03:02:11 2022 ] Training epoch: 97 +[ Fri Sep 16 03:02:49 2022 ] Batch(47/162) done. Loss: 0.0686 lr:0.001000 network_time: 0.0280 +[ Fri Sep 16 03:04:02 2022 ] Batch(147/162) done. Loss: 0.0074 lr:0.001000 network_time: 0.0278 +[ Fri Sep 16 03:04:12 2022 ] Eval epoch: 97 +[ Fri Sep 16 03:06:00 2022 ] Mean test loss of 930 batches: 2.544494867324829. +[ Fri Sep 16 03:06:00 2022 ] Top1: 55.48% +[ Fri Sep 16 03:06:01 2022 ] Top5: 81.22% +[ Fri Sep 16 03:06:01 2022 ] Training epoch: 98 +[ Fri Sep 16 03:07:07 2022 ] Batch(85/162) done. Loss: 0.0252 lr:0.001000 network_time: 0.0278 +[ Fri Sep 16 03:08:02 2022 ] Eval epoch: 98 +[ Fri Sep 16 03:09:50 2022 ] Mean test loss of 930 batches: 2.5435867309570312. +[ Fri Sep 16 03:09:50 2022 ] Top1: 55.66% +[ Fri Sep 16 03:09:51 2022 ] Top5: 81.36% +[ Fri Sep 16 03:09:51 2022 ] Training epoch: 99 +[ Fri Sep 16 03:10:12 2022 ] Batch(23/162) done. Loss: 0.0143 lr:0.001000 network_time: 0.0323 +[ Fri Sep 16 03:11:24 2022 ] Batch(123/162) done. Loss: 0.0108 lr:0.001000 network_time: 0.0284 +[ Fri Sep 16 03:11:52 2022 ] Eval epoch: 99 +[ Fri Sep 16 03:13:40 2022 ] Mean test loss of 930 batches: 2.562251567840576. +[ Fri Sep 16 03:13:40 2022 ] Top1: 55.43% +[ Fri Sep 16 03:13:41 2022 ] Top5: 81.23% +[ Fri Sep 16 03:13:41 2022 ] Training epoch: 100 +[ Fri Sep 16 03:14:29 2022 ] Batch(61/162) done. Loss: 0.0118 lr:0.001000 network_time: 0.0284 +[ Fri Sep 16 03:15:42 2022 ] Batch(161/162) done. Loss: 0.0102 lr:0.001000 network_time: 0.0315 +[ Fri Sep 16 03:15:42 2022 ] Eval epoch: 100 +[ Fri Sep 16 03:17:30 2022 ] Mean test loss of 930 batches: 2.5743367671966553. +[ Fri Sep 16 03:17:31 2022 ] Top1: 55.23% +[ Fri Sep 16 03:17:31 2022 ] Top5: 80.93% +[ Fri Sep 16 03:17:31 2022 ] Training epoch: 101 +[ Fri Sep 16 03:18:47 2022 ] Batch(99/162) done. Loss: 0.0172 lr:0.000100 network_time: 0.0292 +[ Fri Sep 16 03:19:32 2022 ] Eval epoch: 101 +[ Fri Sep 16 03:21:21 2022 ] Mean test loss of 930 batches: 2.547807455062866. +[ Fri Sep 16 03:21:21 2022 ] Top1: 55.69% +[ Fri Sep 16 03:21:22 2022 ] Top5: 81.43% +[ Fri Sep 16 03:21:22 2022 ] Training epoch: 102 +[ Fri Sep 16 03:21:52 2022 ] Batch(37/162) done. Loss: 0.0108 lr:0.000100 network_time: 0.0309 +[ Fri Sep 16 03:23:05 2022 ] Batch(137/162) done. Loss: 0.0219 lr:0.000100 network_time: 0.0266 +[ Fri Sep 16 03:23:23 2022 ] Eval epoch: 102 +[ Fri Sep 16 03:25:10 2022 ] Mean test loss of 930 batches: 2.5248377323150635. +[ Fri Sep 16 03:25:11 2022 ] Top1: 55.67% +[ Fri Sep 16 03:25:11 2022 ] Top5: 81.42% +[ Fri Sep 16 03:25:12 2022 ] Training epoch: 103 +[ Fri Sep 16 03:26:10 2022 ] Batch(75/162) done. Loss: 0.0303 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 03:27:13 2022 ] Eval epoch: 103 +[ Fri Sep 16 03:29:00 2022 ] Mean test loss of 930 batches: 2.5692896842956543. +[ Fri Sep 16 03:29:01 2022 ] Top1: 55.27% +[ Fri Sep 16 03:29:01 2022 ] Top5: 81.23% +[ Fri Sep 16 03:29:01 2022 ] Training epoch: 104 +[ Fri Sep 16 03:29:14 2022 ] Batch(13/162) done. Loss: 0.0176 lr:0.000100 network_time: 0.0322 +[ Fri Sep 16 03:30:27 2022 ] Batch(113/162) done. Loss: 0.0143 lr:0.000100 network_time: 0.0262 +[ Fri Sep 16 03:31:02 2022 ] Eval epoch: 104 +[ Fri Sep 16 03:32:51 2022 ] Mean test loss of 930 batches: 2.5471296310424805. +[ Fri Sep 16 03:32:51 2022 ] Top1: 55.70% +[ Fri Sep 16 03:32:51 2022 ] Top5: 81.42% +[ Fri Sep 16 03:32:52 2022 ] Training epoch: 105 +[ Fri Sep 16 03:33:33 2022 ] Batch(51/162) done. Loss: 0.0230 lr:0.000100 network_time: 0.0288 +[ Fri Sep 16 03:34:45 2022 ] Batch(151/162) done. Loss: 0.0163 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 03:34:53 2022 ] Eval epoch: 105 +[ Fri Sep 16 03:36:41 2022 ] Mean test loss of 930 batches: 2.578871488571167. +[ Fri Sep 16 03:36:41 2022 ] Top1: 55.30% +[ Fri Sep 16 03:36:42 2022 ] Top5: 81.05% +[ Fri Sep 16 03:36:42 2022 ] Training epoch: 106 +[ Fri Sep 16 03:37:50 2022 ] Batch(89/162) done. Loss: 0.0128 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 03:38:43 2022 ] Eval epoch: 106 +[ Fri Sep 16 03:40:30 2022 ] Mean test loss of 930 batches: 2.5462167263031006. +[ Fri Sep 16 03:40:31 2022 ] Top1: 55.46% +[ Fri Sep 16 03:40:31 2022 ] Top5: 81.28% +[ Fri Sep 16 03:40:32 2022 ] Training epoch: 107 +[ Fri Sep 16 03:40:55 2022 ] Batch(27/162) done. Loss: 0.0202 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 03:42:08 2022 ] Batch(127/162) done. Loss: 0.0215 lr:0.000100 network_time: 0.0324 +[ Fri Sep 16 03:42:33 2022 ] Eval epoch: 107 +[ Fri Sep 16 03:44:21 2022 ] Mean test loss of 930 batches: 2.5570015907287598. +[ Fri Sep 16 03:44:21 2022 ] Top1: 55.48% +[ Fri Sep 16 03:44:21 2022 ] Top5: 81.23% +[ Fri Sep 16 03:44:22 2022 ] Training epoch: 108 +[ Fri Sep 16 03:45:13 2022 ] Batch(65/162) done. Loss: 0.0059 lr:0.000100 network_time: 0.0286 +[ Fri Sep 16 03:46:23 2022 ] Eval epoch: 108 +[ Fri Sep 16 03:48:10 2022 ] Mean test loss of 930 batches: 2.585301637649536. +[ Fri Sep 16 03:48:11 2022 ] Top1: 55.24% +[ Fri Sep 16 03:48:11 2022 ] Top5: 81.05% +[ Fri Sep 16 03:48:12 2022 ] Training epoch: 109 +[ Fri Sep 16 03:48:17 2022 ] Batch(3/162) done. Loss: 0.0050 lr:0.000100 network_time: 0.0293 +[ Fri Sep 16 03:49:30 2022 ] Batch(103/162) done. Loss: 0.0114 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 03:50:13 2022 ] Eval epoch: 109 +[ Fri Sep 16 03:52:00 2022 ] Mean test loss of 930 batches: 2.557034492492676. +[ Fri Sep 16 03:52:01 2022 ] Top1: 55.44% +[ Fri Sep 16 03:52:01 2022 ] Top5: 81.18% +[ Fri Sep 16 03:52:02 2022 ] Training epoch: 110 +[ Fri Sep 16 03:52:35 2022 ] Batch(41/162) done. Loss: 0.0297 lr:0.000100 network_time: 0.0346 +[ Fri Sep 16 03:53:48 2022 ] Batch(141/162) done. Loss: 0.0063 lr:0.000100 network_time: 0.0528 +[ Fri Sep 16 03:54:02 2022 ] Eval epoch: 110 +[ Fri Sep 16 03:55:51 2022 ] Mean test loss of 930 batches: 2.576395034790039. +[ Fri Sep 16 03:55:51 2022 ] Top1: 55.13% +[ Fri Sep 16 03:55:52 2022 ] Top5: 81.20% +[ Fri Sep 16 03:55:52 2022 ] Training epoch: 111 +[ Fri Sep 16 03:56:53 2022 ] Batch(79/162) done. Loss: 0.0034 lr:0.000100 network_time: 0.0263 +[ Fri Sep 16 03:57:53 2022 ] Eval epoch: 111 +[ Fri Sep 16 03:59:41 2022 ] Mean test loss of 930 batches: 2.577402114868164. +[ Fri Sep 16 03:59:41 2022 ] Top1: 55.06% +[ Fri Sep 16 03:59:42 2022 ] Top5: 80.97% +[ Fri Sep 16 03:59:42 2022 ] Training epoch: 112 +[ Fri Sep 16 03:59:58 2022 ] Batch(17/162) done. Loss: 0.0095 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:01:11 2022 ] Batch(117/162) done. Loss: 0.0455 lr:0.000100 network_time: 0.0268 +[ Fri Sep 16 04:01:43 2022 ] Eval epoch: 112 +[ Fri Sep 16 04:03:30 2022 ] Mean test loss of 930 batches: 2.5660643577575684. +[ Fri Sep 16 04:03:31 2022 ] Top1: 55.42% +[ Fri Sep 16 04:03:31 2022 ] Top5: 81.13% +[ Fri Sep 16 04:03:32 2022 ] Training epoch: 113 +[ Fri Sep 16 04:04:15 2022 ] Batch(55/162) done. Loss: 0.0135 lr:0.000100 network_time: 0.0284 +[ Fri Sep 16 04:05:28 2022 ] Batch(155/162) done. Loss: 0.0128 lr:0.000100 network_time: 0.0269 +[ Fri Sep 16 04:05:33 2022 ] Eval epoch: 113 +[ Fri Sep 16 04:07:20 2022 ] Mean test loss of 930 batches: 2.570066213607788. +[ Fri Sep 16 04:07:21 2022 ] Top1: 54.85% +[ Fri Sep 16 04:07:21 2022 ] Top5: 81.01% +[ Fri Sep 16 04:07:21 2022 ] Training epoch: 114 +[ Fri Sep 16 04:08:33 2022 ] Batch(93/162) done. Loss: 0.0188 lr:0.000100 network_time: 0.0278 +[ Fri Sep 16 04:09:23 2022 ] Eval epoch: 114 +[ Fri Sep 16 04:11:11 2022 ] Mean test loss of 930 batches: 2.5548155307769775. +[ Fri Sep 16 04:11:11 2022 ] Top1: 55.34% +[ Fri Sep 16 04:11:12 2022 ] Top5: 81.18% +[ Fri Sep 16 04:11:12 2022 ] Training epoch: 115 +[ Fri Sep 16 04:11:38 2022 ] Batch(31/162) done. Loss: 0.0057 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:12:51 2022 ] Batch(131/162) done. Loss: 0.0136 lr:0.000100 network_time: 0.0275 +[ Fri Sep 16 04:13:13 2022 ] Eval epoch: 115 +[ Fri Sep 16 04:15:00 2022 ] Mean test loss of 930 batches: 2.551961898803711. +[ Fri Sep 16 04:15:01 2022 ] Top1: 55.49% +[ Fri Sep 16 04:15:01 2022 ] Top5: 81.27% +[ Fri Sep 16 04:15:02 2022 ] Training epoch: 116 +[ Fri Sep 16 04:15:55 2022 ] Batch(69/162) done. Loss: 0.0155 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 04:17:02 2022 ] Eval epoch: 116 +[ Fri Sep 16 04:18:50 2022 ] Mean test loss of 930 batches: 2.55898118019104. +[ Fri Sep 16 04:18:51 2022 ] Top1: 55.49% +[ Fri Sep 16 04:18:51 2022 ] Top5: 81.27% +[ Fri Sep 16 04:18:52 2022 ] Training epoch: 117 +[ Fri Sep 16 04:19:00 2022 ] Batch(7/162) done. Loss: 0.0189 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 04:20:13 2022 ] Batch(107/162) done. Loss: 0.0054 lr:0.000100 network_time: 0.0276 +[ Fri Sep 16 04:20:53 2022 ] Eval epoch: 117 +[ Fri Sep 16 04:22:40 2022 ] Mean test loss of 930 batches: 2.54909348487854. +[ Fri Sep 16 04:22:40 2022 ] Top1: 55.64% +[ Fri Sep 16 04:22:41 2022 ] Top5: 81.25% +[ Fri Sep 16 04:22:41 2022 ] Training epoch: 118 +[ Fri Sep 16 04:23:18 2022 ] Batch(45/162) done. Loss: 0.0172 lr:0.000100 network_time: 0.0312 +[ Fri Sep 16 04:24:31 2022 ] Batch(145/162) done. Loss: 0.0081 lr:0.000100 network_time: 0.0258 +[ Fri Sep 16 04:24:43 2022 ] Eval epoch: 118 +[ Fri Sep 16 04:26:30 2022 ] Mean test loss of 930 batches: 2.568805456161499. +[ Fri Sep 16 04:26:31 2022 ] Top1: 55.38% +[ Fri Sep 16 04:26:31 2022 ] Top5: 81.23% +[ Fri Sep 16 04:26:32 2022 ] Training epoch: 119 +[ Fri Sep 16 04:27:36 2022 ] Batch(83/162) done. Loss: 0.0195 lr:0.000100 network_time: 0.0252 +[ Fri Sep 16 04:28:33 2022 ] Eval epoch: 119 +[ Fri Sep 16 04:30:21 2022 ] Mean test loss of 930 batches: 2.5613887310028076. +[ Fri Sep 16 04:30:22 2022 ] Top1: 55.40% +[ Fri Sep 16 04:30:22 2022 ] Top5: 81.13% +[ Fri Sep 16 04:30:22 2022 ] Training epoch: 120 +[ Fri Sep 16 04:30:42 2022 ] Batch(21/162) done. Loss: 0.0305 lr:0.000100 network_time: 0.0549 +[ Fri Sep 16 04:31:54 2022 ] Batch(121/162) done. Loss: 0.0053 lr:0.000100 network_time: 0.0306 +[ Fri Sep 16 04:32:24 2022 ] Eval epoch: 120 +[ Fri Sep 16 04:34:11 2022 ] Mean test loss of 930 batches: 2.565460443496704. +[ Fri Sep 16 04:34:12 2022 ] Top1: 55.60% +[ Fri Sep 16 04:34:12 2022 ] Top5: 81.27% +[ Fri Sep 16 04:34:13 2022 ] Training epoch: 121 +[ Fri Sep 16 04:34:59 2022 ] Batch(59/162) done. Loss: 0.0073 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 04:36:12 2022 ] Batch(159/162) done. Loss: 0.0292 lr:0.000100 network_time: 0.0318 +[ Fri Sep 16 04:36:14 2022 ] Eval epoch: 121 +[ Fri Sep 16 04:38:02 2022 ] Mean test loss of 930 batches: 2.552762985229492. +[ Fri Sep 16 04:38:02 2022 ] Top1: 55.36% +[ Fri Sep 16 04:38:03 2022 ] Top5: 81.15% +[ Fri Sep 16 04:38:03 2022 ] Training epoch: 122 +[ Fri Sep 16 04:39:17 2022 ] Batch(97/162) done. Loss: 0.0103 lr:0.000100 network_time: 0.0281 +[ Fri Sep 16 04:40:04 2022 ] Eval epoch: 122 +[ Fri Sep 16 04:41:52 2022 ] Mean test loss of 930 batches: 2.5489044189453125. +[ Fri Sep 16 04:41:53 2022 ] Top1: 55.62% +[ Fri Sep 16 04:41:53 2022 ] Top5: 81.31% +[ Fri Sep 16 04:41:53 2022 ] Training epoch: 123 +[ Fri Sep 16 04:42:23 2022 ] Batch(35/162) done. Loss: 0.0388 lr:0.000100 network_time: 0.0267 +[ Fri Sep 16 04:43:35 2022 ] Batch(135/162) done. Loss: 0.0089 lr:0.000100 network_time: 0.0272 +[ Fri Sep 16 04:43:55 2022 ] Eval epoch: 123 +[ Fri Sep 16 04:45:42 2022 ] Mean test loss of 930 batches: 2.5398478507995605. +[ Fri Sep 16 04:45:43 2022 ] Top1: 55.33% +[ Fri Sep 16 04:45:43 2022 ] Top5: 81.14% +[ Fri Sep 16 04:45:43 2022 ] Training epoch: 124 +[ Fri Sep 16 04:46:40 2022 ] Batch(73/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 04:47:44 2022 ] Eval epoch: 124 +[ Fri Sep 16 04:49:33 2022 ] Mean test loss of 930 batches: 2.5149850845336914. +[ Fri Sep 16 04:49:33 2022 ] Top1: 55.92% +[ Fri Sep 16 04:49:34 2022 ] Top5: 81.55% +[ Fri Sep 16 04:49:34 2022 ] Training epoch: 125 +[ Fri Sep 16 04:49:46 2022 ] Batch(11/162) done. Loss: 0.0073 lr:0.000100 network_time: 0.0301 +[ Fri Sep 16 04:50:59 2022 ] Batch(111/162) done. Loss: 0.0125 lr:0.000100 network_time: 0.0331 +[ Fri Sep 16 04:51:35 2022 ] Eval epoch: 125 +[ Fri Sep 16 04:53:23 2022 ] Mean test loss of 930 batches: 2.5590920448303223. +[ Fri Sep 16 04:53:23 2022 ] Top1: 55.52% +[ Fri Sep 16 04:53:24 2022 ] Top5: 81.25% +[ Fri Sep 16 04:53:24 2022 ] Training epoch: 126 +[ Fri Sep 16 04:54:03 2022 ] Batch(49/162) done. Loss: 0.0049 lr:0.000100 network_time: 0.0409 +[ Fri Sep 16 04:55:16 2022 ] Batch(149/162) done. Loss: 0.0071 lr:0.000100 network_time: 0.0270 +[ Fri Sep 16 04:55:25 2022 ] Eval epoch: 126 +[ Fri Sep 16 04:57:13 2022 ] Mean test loss of 930 batches: 2.547355890274048. +[ Fri Sep 16 04:57:14 2022 ] Top1: 55.43% +[ Fri Sep 16 04:57:14 2022 ] Top5: 81.17% +[ Fri Sep 16 04:57:14 2022 ] Training epoch: 127 +[ Fri Sep 16 04:58:21 2022 ] Batch(87/162) done. Loss: 0.0054 lr:0.000100 network_time: 0.0279 +[ Fri Sep 16 04:59:15 2022 ] Eval epoch: 127 +[ Fri Sep 16 05:01:03 2022 ] Mean test loss of 930 batches: 2.557677745819092. +[ Fri Sep 16 05:01:04 2022 ] Top1: 55.55% +[ Fri Sep 16 05:01:04 2022 ] Top5: 81.28% +[ Fri Sep 16 05:01:04 2022 ] Training epoch: 128 +[ Fri Sep 16 05:01:26 2022 ] Batch(25/162) done. Loss: 0.0053 lr:0.000100 network_time: 0.0262 +[ Fri Sep 16 05:02:39 2022 ] Batch(125/162) done. Loss: 0.0104 lr:0.000100 network_time: 0.0308 +[ Fri Sep 16 05:03:06 2022 ] Eval epoch: 128 +[ Fri Sep 16 05:04:53 2022 ] Mean test loss of 930 batches: 2.564342498779297. +[ Fri Sep 16 05:04:54 2022 ] Top1: 55.33% +[ Fri Sep 16 05:04:54 2022 ] Top5: 81.05% +[ Fri Sep 16 05:04:54 2022 ] Training epoch: 129 +[ Fri Sep 16 05:05:44 2022 ] Batch(63/162) done. Loss: 0.0051 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:06:56 2022 ] Eval epoch: 129 +[ Fri Sep 16 05:08:43 2022 ] Mean test loss of 930 batches: 2.5628936290740967. +[ Fri Sep 16 05:08:43 2022 ] Top1: 55.28% +[ Fri Sep 16 05:08:44 2022 ] Top5: 81.18% +[ Fri Sep 16 05:08:44 2022 ] Training epoch: 130 +[ Fri Sep 16 05:08:48 2022 ] Batch(1/162) done. Loss: 0.0198 lr:0.000100 network_time: 0.0259 +[ Fri Sep 16 05:10:01 2022 ] Batch(101/162) done. Loss: 0.0120 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 05:10:45 2022 ] Eval epoch: 130 +[ Fri Sep 16 05:12:33 2022 ] Mean test loss of 930 batches: 2.5282130241394043. +[ Fri Sep 16 05:12:33 2022 ] Top1: 55.60% +[ Fri Sep 16 05:12:34 2022 ] Top5: 81.39% +[ Fri Sep 16 05:12:34 2022 ] Training epoch: 131 +[ Fri Sep 16 05:13:06 2022 ] Batch(39/162) done. Loss: 0.0072 lr:0.000100 network_time: 0.0219 +[ Fri Sep 16 05:14:19 2022 ] Batch(139/162) done. Loss: 0.0100 lr:0.000100 network_time: 0.0271 +[ Fri Sep 16 05:14:35 2022 ] Eval epoch: 131 +[ Fri Sep 16 05:16:23 2022 ] Mean test loss of 930 batches: 2.547071933746338. +[ Fri Sep 16 05:16:23 2022 ] Top1: 55.65% +[ Fri Sep 16 05:16:23 2022 ] Top5: 81.40% +[ Fri Sep 16 05:16:24 2022 ] Training epoch: 132 +[ Fri Sep 16 05:17:23 2022 ] Batch(77/162) done. Loss: 0.0125 lr:0.000100 network_time: 0.0322 +[ Fri Sep 16 05:18:25 2022 ] Eval epoch: 132 +[ Fri Sep 16 05:20:12 2022 ] Mean test loss of 930 batches: 2.5421905517578125. +[ Fri Sep 16 05:20:13 2022 ] Top1: 55.51% +[ Fri Sep 16 05:20:13 2022 ] Top5: 81.40% +[ Fri Sep 16 05:20:13 2022 ] Training epoch: 133 +[ Fri Sep 16 05:20:28 2022 ] Batch(15/162) done. Loss: 0.0080 lr:0.000100 network_time: 0.0302 +[ Fri Sep 16 05:21:41 2022 ] Batch(115/162) done. Loss: 0.0089 lr:0.000100 network_time: 0.0285 +[ Fri Sep 16 05:22:15 2022 ] Eval epoch: 133 +[ Fri Sep 16 05:24:04 2022 ] Mean test loss of 930 batches: 2.5551788806915283. +[ Fri Sep 16 05:24:04 2022 ] Top1: 55.47% +[ Fri Sep 16 05:24:05 2022 ] Top5: 81.22% +[ Fri Sep 16 05:24:05 2022 ] Training epoch: 134 +[ Fri Sep 16 05:24:47 2022 ] Batch(53/162) done. Loss: 0.0142 lr:0.000100 network_time: 0.0318 +[ Fri Sep 16 05:26:00 2022 ] Batch(153/162) done. Loss: 0.0062 lr:0.000100 network_time: 0.0561 +[ Fri Sep 16 05:26:06 2022 ] Eval epoch: 134 +[ Fri Sep 16 05:27:53 2022 ] Mean test loss of 930 batches: 2.6278786659240723. +[ Fri Sep 16 05:27:54 2022 ] Top1: 54.84% +[ Fri Sep 16 05:27:54 2022 ] Top5: 80.96% +[ Fri Sep 16 05:27:55 2022 ] Training epoch: 135 +[ Fri Sep 16 05:29:05 2022 ] Batch(91/162) done. Loss: 0.0093 lr:0.000100 network_time: 0.0310 +[ Fri Sep 16 05:29:56 2022 ] Eval epoch: 135 +[ Fri Sep 16 05:31:44 2022 ] Mean test loss of 930 batches: 2.53376841545105. +[ Fri Sep 16 05:31:45 2022 ] Top1: 55.78% +[ Fri Sep 16 05:31:45 2022 ] Top5: 81.33% +[ Fri Sep 16 05:31:46 2022 ] Training epoch: 136 +[ Fri Sep 16 05:32:11 2022 ] Batch(29/162) done. Loss: 0.0107 lr:0.000100 network_time: 0.0274 +[ Fri Sep 16 05:33:23 2022 ] Batch(129/162) done. Loss: 0.0118 lr:0.000100 network_time: 0.0265 +[ Fri Sep 16 05:33:47 2022 ] Eval epoch: 136 +[ Fri Sep 16 05:35:34 2022 ] Mean test loss of 930 batches: 2.548306703567505. +[ Fri Sep 16 05:35:35 2022 ] Top1: 55.70% +[ Fri Sep 16 05:35:35 2022 ] Top5: 81.47% +[ Fri Sep 16 05:35:35 2022 ] Training epoch: 137 +[ Fri Sep 16 05:36:28 2022 ] Batch(67/162) done. Loss: 0.0078 lr:0.000100 network_time: 0.0264 +[ Fri Sep 16 05:37:36 2022 ] Eval epoch: 137 +[ Fri Sep 16 05:39:24 2022 ] Mean test loss of 930 batches: 2.568533182144165. +[ Fri Sep 16 05:39:24 2022 ] Top1: 55.41% +[ Fri Sep 16 05:39:25 2022 ] Top5: 81.25% +[ Fri Sep 16 05:39:25 2022 ] Training epoch: 138 +[ Fri Sep 16 05:39:33 2022 ] Batch(5/162) done. Loss: 0.0063 lr:0.000100 network_time: 0.0273 +[ Fri Sep 16 05:40:45 2022 ] Batch(105/162) done. Loss: 0.0060 lr:0.000100 network_time: 0.0467 +[ Fri Sep 16 05:41:26 2022 ] Eval epoch: 138 +[ Fri Sep 16 05:43:14 2022 ] Mean test loss of 930 batches: 2.5520284175872803. +[ Fri Sep 16 05:43:15 2022 ] Top1: 55.65% +[ Fri Sep 16 05:43:15 2022 ] Top5: 81.48% +[ Fri Sep 16 05:43:15 2022 ] Training epoch: 139 +[ Fri Sep 16 05:43:50 2022 ] Batch(43/162) done. Loss: 0.0217 lr:0.000100 network_time: 0.0277 +[ Fri Sep 16 05:45:03 2022 ] Batch(143/162) done. Loss: 0.0124 lr:0.000100 network_time: 0.0297 +[ Fri Sep 16 05:45:16 2022 ] Eval epoch: 139 +[ Fri Sep 16 05:47:05 2022 ] Mean test loss of 930 batches: 2.5655524730682373. +[ Fri Sep 16 05:47:05 2022 ] Top1: 55.30% +[ Fri Sep 16 05:47:06 2022 ] Top5: 81.07% +[ Fri Sep 16 05:47:06 2022 ] Training epoch: 140 +[ Fri Sep 16 05:48:08 2022 ] Batch(81/162) done. Loss: 0.0033 lr:0.000100 network_time: 0.0285 +[ Fri Sep 16 05:49:07 2022 ] Eval epoch: 140 +[ Fri Sep 16 05:50:55 2022 ] Mean test loss of 930 batches: 2.5775716304779053. +[ Fri Sep 16 05:50:55 2022 ] Top1: 55.40% +[ Fri Sep 16 05:50:56 2022 ] Top5: 81.10% diff --git a/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xset/ntu120_joint_xset/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..985338f4372c8182b19517c4297214cd3208162e --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_bone_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_motion_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_motion_xsub diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7b5c1ed874e0f1664214d1c19ecde8552f4c6f07 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74a9778b7fd6291e4b9dc1beaa1efba338b1ce78ee62400e6010224782ba6c2f +size 29946137 diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d9c4c8cbe61cec0bae5a7ae70d5983411d4e5bc --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/log.txt @@ -0,0 +1,1043 @@ +[ Wed Sep 14 18:31:36 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_motion_xsub', 'model_saved_name': './save_models/ntu120_bone_motion_xsub', 'Experiment_name': 'ntu120_bone_motion_xsub', 'config': './config/ntu120_xsub/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 18:31:36 2022 ] Training epoch: 1 +[ Wed Sep 14 18:32:54 2022 ] Batch(99/243) done. Loss: 3.7551 lr:0.100000 network_time: 0.0252 +[ Wed Sep 14 18:34:07 2022 ] Batch(199/243) done. Loss: 3.0830 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 18:34:38 2022 ] Eval epoch: 1 +[ Wed Sep 14 18:36:12 2022 ] Mean test loss of 796 batches: 5.711165904998779. +[ Wed Sep 14 18:36:12 2022 ] Top1: 8.25% +[ Wed Sep 14 18:36:12 2022 ] Top5: 22.59% +[ Wed Sep 14 18:36:13 2022 ] Training epoch: 2 +[ Wed Sep 14 18:36:58 2022 ] Batch(56/243) done. Loss: 2.6547 lr:0.100000 network_time: 0.0297 +[ Wed Sep 14 18:38:10 2022 ] Batch(156/243) done. Loss: 2.3958 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 18:39:13 2022 ] Eval epoch: 2 +[ Wed Sep 14 18:40:47 2022 ] Mean test loss of 796 batches: 4.712782859802246. +[ Wed Sep 14 18:40:47 2022 ] Top1: 14.20% +[ Wed Sep 14 18:40:48 2022 ] Top5: 36.21% +[ Wed Sep 14 18:40:48 2022 ] Training epoch: 3 +[ Wed Sep 14 18:41:01 2022 ] Batch(13/243) done. Loss: 1.9252 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 18:42:14 2022 ] Batch(113/243) done. Loss: 1.6989 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 18:43:27 2022 ] Batch(213/243) done. Loss: 1.8036 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 18:43:49 2022 ] Eval epoch: 3 +[ Wed Sep 14 18:45:23 2022 ] Mean test loss of 796 batches: 4.215649127960205. +[ Wed Sep 14 18:45:23 2022 ] Top1: 20.46% +[ Wed Sep 14 18:45:24 2022 ] Top5: 45.79% +[ Wed Sep 14 18:45:24 2022 ] Training epoch: 4 +[ Wed Sep 14 18:46:19 2022 ] Batch(70/243) done. Loss: 1.2138 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 18:47:32 2022 ] Batch(170/243) done. Loss: 1.4167 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 18:48:24 2022 ] Eval epoch: 4 +[ Wed Sep 14 18:49:58 2022 ] Mean test loss of 796 batches: 3.361840009689331. +[ Wed Sep 14 18:49:58 2022 ] Top1: 22.90% +[ Wed Sep 14 18:49:59 2022 ] Top5: 50.33% +[ Wed Sep 14 18:49:59 2022 ] Training epoch: 5 +[ Wed Sep 14 18:50:23 2022 ] Batch(27/243) done. Loss: 1.5494 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 18:51:35 2022 ] Batch(127/243) done. Loss: 1.2381 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 18:52:48 2022 ] Batch(227/243) done. Loss: 1.5641 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 18:52:59 2022 ] Eval epoch: 5 +[ Wed Sep 14 18:54:33 2022 ] Mean test loss of 796 batches: 3.790829658508301. +[ Wed Sep 14 18:54:34 2022 ] Top1: 20.54% +[ Wed Sep 14 18:54:34 2022 ] Top5: 45.11% +[ Wed Sep 14 18:54:34 2022 ] Training epoch: 6 +[ Wed Sep 14 18:55:39 2022 ] Batch(84/243) done. Loss: 1.1953 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 18:56:52 2022 ] Batch(184/243) done. Loss: 1.1292 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 18:57:34 2022 ] Eval epoch: 6 +[ Wed Sep 14 18:59:08 2022 ] Mean test loss of 796 batches: 3.924736499786377. +[ Wed Sep 14 18:59:09 2022 ] Top1: 24.96% +[ Wed Sep 14 18:59:09 2022 ] Top5: 56.70% +[ Wed Sep 14 18:59:09 2022 ] Training epoch: 7 +[ Wed Sep 14 18:59:43 2022 ] Batch(41/243) done. Loss: 1.1832 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 19:00:55 2022 ] Batch(141/243) done. Loss: 1.0630 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 19:02:08 2022 ] Batch(241/243) done. Loss: 0.9897 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 19:02:09 2022 ] Eval epoch: 7 +[ Wed Sep 14 19:03:43 2022 ] Mean test loss of 796 batches: 2.814431667327881. +[ Wed Sep 14 19:03:44 2022 ] Top1: 30.49% +[ Wed Sep 14 19:03:44 2022 ] Top5: 64.70% +[ Wed Sep 14 19:03:44 2022 ] Training epoch: 8 +[ Wed Sep 14 19:04:59 2022 ] Batch(98/243) done. Loss: 0.7183 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 19:06:12 2022 ] Batch(198/243) done. Loss: 0.9279 lr:0.100000 network_time: 0.0288 +[ Wed Sep 14 19:06:44 2022 ] Eval epoch: 8 +[ Wed Sep 14 19:08:17 2022 ] Mean test loss of 796 batches: 3.179093599319458. +[ Wed Sep 14 19:08:18 2022 ] Top1: 27.80% +[ Wed Sep 14 19:08:18 2022 ] Top5: 60.67% +[ Wed Sep 14 19:08:18 2022 ] Training epoch: 9 +[ Wed Sep 14 19:09:02 2022 ] Batch(55/243) done. Loss: 0.9159 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:10:15 2022 ] Batch(155/243) done. Loss: 0.8653 lr:0.100000 network_time: 0.0302 +[ Wed Sep 14 19:11:18 2022 ] Eval epoch: 9 +[ Wed Sep 14 19:12:53 2022 ] Mean test loss of 796 batches: 3.112769603729248. +[ Wed Sep 14 19:12:53 2022 ] Top1: 29.67% +[ Wed Sep 14 19:12:53 2022 ] Top5: 64.43% +[ Wed Sep 14 19:12:54 2022 ] Training epoch: 10 +[ Wed Sep 14 19:13:06 2022 ] Batch(12/243) done. Loss: 1.1210 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 19:14:19 2022 ] Batch(112/243) done. Loss: 1.0336 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:15:32 2022 ] Batch(212/243) done. Loss: 1.4129 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 19:15:54 2022 ] Eval epoch: 10 +[ Wed Sep 14 19:17:28 2022 ] Mean test loss of 796 batches: 2.717728614807129. +[ Wed Sep 14 19:17:28 2022 ] Top1: 36.83% +[ Wed Sep 14 19:17:28 2022 ] Top5: 71.44% +[ Wed Sep 14 19:17:29 2022 ] Training epoch: 11 +[ Wed Sep 14 19:18:23 2022 ] Batch(69/243) done. Loss: 0.8001 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 19:19:36 2022 ] Batch(169/243) done. Loss: 0.6964 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 19:20:29 2022 ] Eval epoch: 11 +[ Wed Sep 14 19:22:02 2022 ] Mean test loss of 796 batches: 2.9447152614593506. +[ Wed Sep 14 19:22:03 2022 ] Top1: 35.68% +[ Wed Sep 14 19:22:03 2022 ] Top5: 69.27% +[ Wed Sep 14 19:22:03 2022 ] Training epoch: 12 +[ Wed Sep 14 19:22:26 2022 ] Batch(26/243) done. Loss: 0.4444 lr:0.100000 network_time: 0.0287 +[ Wed Sep 14 19:23:39 2022 ] Batch(126/243) done. Loss: 0.7001 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:24:52 2022 ] Batch(226/243) done. Loss: 1.1727 lr:0.100000 network_time: 0.0411 +[ Wed Sep 14 19:25:03 2022 ] Eval epoch: 12 +[ Wed Sep 14 19:26:37 2022 ] Mean test loss of 796 batches: 2.774251699447632. +[ Wed Sep 14 19:26:37 2022 ] Top1: 36.09% +[ Wed Sep 14 19:26:38 2022 ] Top5: 70.66% +[ Wed Sep 14 19:26:38 2022 ] Training epoch: 13 +[ Wed Sep 14 19:27:42 2022 ] Batch(83/243) done. Loss: 0.6696 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:28:55 2022 ] Batch(183/243) done. Loss: 0.6523 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 19:29:38 2022 ] Eval epoch: 13 +[ Wed Sep 14 19:31:12 2022 ] Mean test loss of 796 batches: 3.165262460708618. +[ Wed Sep 14 19:31:12 2022 ] Top1: 31.68% +[ Wed Sep 14 19:31:13 2022 ] Top5: 66.37% +[ Wed Sep 14 19:31:13 2022 ] Training epoch: 14 +[ Wed Sep 14 19:31:45 2022 ] Batch(40/243) done. Loss: 0.4915 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 19:32:58 2022 ] Batch(140/243) done. Loss: 0.9805 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:34:11 2022 ] Batch(240/243) done. Loss: 0.8167 lr:0.100000 network_time: 0.0306 +[ Wed Sep 14 19:34:13 2022 ] Eval epoch: 14 +[ Wed Sep 14 19:35:47 2022 ] Mean test loss of 796 batches: 2.915220022201538. +[ Wed Sep 14 19:35:47 2022 ] Top1: 37.93% +[ Wed Sep 14 19:35:47 2022 ] Top5: 70.42% +[ Wed Sep 14 19:35:48 2022 ] Training epoch: 15 +[ Wed Sep 14 19:37:02 2022 ] Batch(97/243) done. Loss: 0.7217 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 19:38:15 2022 ] Batch(197/243) done. Loss: 0.4206 lr:0.100000 network_time: 0.0292 +[ Wed Sep 14 19:38:48 2022 ] Eval epoch: 15 +[ Wed Sep 14 19:40:22 2022 ] Mean test loss of 796 batches: 3.5348544120788574. +[ Wed Sep 14 19:40:22 2022 ] Top1: 31.55% +[ Wed Sep 14 19:40:23 2022 ] Top5: 65.92% +[ Wed Sep 14 19:40:23 2022 ] Training epoch: 16 +[ Wed Sep 14 19:41:06 2022 ] Batch(54/243) done. Loss: 0.5286 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:42:19 2022 ] Batch(154/243) done. Loss: 0.6408 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 19:43:23 2022 ] Eval epoch: 16 +[ Wed Sep 14 19:44:57 2022 ] Mean test loss of 796 batches: 3.115510940551758. +[ Wed Sep 14 19:44:57 2022 ] Top1: 35.87% +[ Wed Sep 14 19:44:58 2022 ] Top5: 69.59% +[ Wed Sep 14 19:44:58 2022 ] Training epoch: 17 +[ Wed Sep 14 19:45:10 2022 ] Batch(11/243) done. Loss: 0.7054 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:46:22 2022 ] Batch(111/243) done. Loss: 0.6154 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 19:47:35 2022 ] Batch(211/243) done. Loss: 0.9023 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:47:58 2022 ] Eval epoch: 17 +[ Wed Sep 14 19:49:32 2022 ] Mean test loss of 796 batches: 3.3053030967712402. +[ Wed Sep 14 19:49:33 2022 ] Top1: 35.22% +[ Wed Sep 14 19:49:33 2022 ] Top5: 68.90% +[ Wed Sep 14 19:49:34 2022 ] Training epoch: 18 +[ Wed Sep 14 19:50:27 2022 ] Batch(68/243) done. Loss: 0.7232 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 19:51:40 2022 ] Batch(168/243) done. Loss: 0.4455 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 19:52:34 2022 ] Eval epoch: 18 +[ Wed Sep 14 19:54:07 2022 ] Mean test loss of 796 batches: 2.651683807373047. +[ Wed Sep 14 19:54:08 2022 ] Top1: 40.03% +[ Wed Sep 14 19:54:08 2022 ] Top5: 74.53% +[ Wed Sep 14 19:54:08 2022 ] Training epoch: 19 +[ Wed Sep 14 19:54:30 2022 ] Batch(25/243) done. Loss: 0.5252 lr:0.100000 network_time: 0.0248 +[ Wed Sep 14 19:55:43 2022 ] Batch(125/243) done. Loss: 0.5577 lr:0.100000 network_time: 0.0294 +[ Wed Sep 14 19:56:56 2022 ] Batch(225/243) done. Loss: 0.8240 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 19:57:08 2022 ] Eval epoch: 19 +[ Wed Sep 14 19:58:42 2022 ] Mean test loss of 796 batches: 2.9685568809509277. +[ Wed Sep 14 19:58:42 2022 ] Top1: 39.34% +[ Wed Sep 14 19:58:43 2022 ] Top5: 74.07% +[ Wed Sep 14 19:58:43 2022 ] Training epoch: 20 +[ Wed Sep 14 19:59:46 2022 ] Batch(82/243) done. Loss: 0.5369 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 20:00:59 2022 ] Batch(182/243) done. Loss: 0.5425 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 20:01:43 2022 ] Eval epoch: 20 +[ Wed Sep 14 20:03:17 2022 ] Mean test loss of 796 batches: 2.9139246940612793. +[ Wed Sep 14 20:03:18 2022 ] Top1: 38.98% +[ Wed Sep 14 20:03:18 2022 ] Top5: 73.25% +[ Wed Sep 14 20:03:19 2022 ] Training epoch: 21 +[ Wed Sep 14 20:03:51 2022 ] Batch(39/243) done. Loss: 0.3098 lr:0.100000 network_time: 0.0345 +[ Wed Sep 14 20:05:04 2022 ] Batch(139/243) done. Loss: 0.5166 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 20:06:16 2022 ] Batch(239/243) done. Loss: 0.6568 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 20:06:19 2022 ] Eval epoch: 21 +[ Wed Sep 14 20:07:52 2022 ] Mean test loss of 796 batches: 3.3424649238586426. +[ Wed Sep 14 20:07:53 2022 ] Top1: 36.41% +[ Wed Sep 14 20:07:54 2022 ] Top5: 70.73% +[ Wed Sep 14 20:07:54 2022 ] Training epoch: 22 +[ Wed Sep 14 20:09:08 2022 ] Batch(96/243) done. Loss: 0.4496 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 20:10:21 2022 ] Batch(196/243) done. Loss: 0.4193 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 20:10:54 2022 ] Eval epoch: 22 +[ Wed Sep 14 20:12:28 2022 ] Mean test loss of 796 batches: 2.9043772220611572. +[ Wed Sep 14 20:12:28 2022 ] Top1: 40.20% +[ Wed Sep 14 20:12:29 2022 ] Top5: 73.53% +[ Wed Sep 14 20:12:29 2022 ] Training epoch: 23 +[ Wed Sep 14 20:13:11 2022 ] Batch(53/243) done. Loss: 0.3166 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 20:14:24 2022 ] Batch(153/243) done. Loss: 0.5057 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 20:15:29 2022 ] Eval epoch: 23 +[ Wed Sep 14 20:17:02 2022 ] Mean test loss of 796 batches: 2.937326669692993. +[ Wed Sep 14 20:17:03 2022 ] Top1: 39.94% +[ Wed Sep 14 20:17:04 2022 ] Top5: 73.01% +[ Wed Sep 14 20:17:04 2022 ] Training epoch: 24 +[ Wed Sep 14 20:17:15 2022 ] Batch(10/243) done. Loss: 0.3329 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 20:18:27 2022 ] Batch(110/243) done. Loss: 0.6170 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 20:19:40 2022 ] Batch(210/243) done. Loss: 0.5816 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 20:20:04 2022 ] Eval epoch: 24 +[ Wed Sep 14 20:21:37 2022 ] Mean test loss of 796 batches: 3.271789312362671. +[ Wed Sep 14 20:21:37 2022 ] Top1: 39.86% +[ Wed Sep 14 20:21:38 2022 ] Top5: 73.14% +[ Wed Sep 14 20:21:38 2022 ] Training epoch: 25 +[ Wed Sep 14 20:22:30 2022 ] Batch(67/243) done. Loss: 0.4282 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 20:23:43 2022 ] Batch(167/243) done. Loss: 0.4217 lr:0.100000 network_time: 0.0306 +[ Wed Sep 14 20:24:38 2022 ] Eval epoch: 25 +[ Wed Sep 14 20:26:11 2022 ] Mean test loss of 796 batches: 3.2710368633270264. +[ Wed Sep 14 20:26:12 2022 ] Top1: 38.82% +[ Wed Sep 14 20:26:12 2022 ] Top5: 73.17% +[ Wed Sep 14 20:26:12 2022 ] Training epoch: 26 +[ Wed Sep 14 20:26:33 2022 ] Batch(24/243) done. Loss: 0.4654 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:27:46 2022 ] Batch(124/243) done. Loss: 0.5090 lr:0.100000 network_time: 0.0255 +[ Wed Sep 14 20:28:59 2022 ] Batch(224/243) done. Loss: 0.3150 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 20:29:12 2022 ] Eval epoch: 26 +[ Wed Sep 14 20:30:46 2022 ] Mean test loss of 796 batches: 3.2111380100250244. +[ Wed Sep 14 20:30:47 2022 ] Top1: 36.32% +[ Wed Sep 14 20:30:47 2022 ] Top5: 71.37% +[ Wed Sep 14 20:30:47 2022 ] Training epoch: 27 +[ Wed Sep 14 20:31:50 2022 ] Batch(81/243) done. Loss: 0.4068 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:33:03 2022 ] Batch(181/243) done. Loss: 0.3918 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:33:48 2022 ] Eval epoch: 27 +[ Wed Sep 14 20:35:22 2022 ] Mean test loss of 796 batches: 2.908709764480591. +[ Wed Sep 14 20:35:22 2022 ] Top1: 39.33% +[ Wed Sep 14 20:35:23 2022 ] Top5: 74.25% +[ Wed Sep 14 20:35:23 2022 ] Training epoch: 28 +[ Wed Sep 14 20:35:55 2022 ] Batch(38/243) done. Loss: 0.2538 lr:0.100000 network_time: 0.0294 +[ Wed Sep 14 20:37:08 2022 ] Batch(138/243) done. Loss: 0.4274 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 20:38:20 2022 ] Batch(238/243) done. Loss: 0.5532 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 20:38:24 2022 ] Eval epoch: 28 +[ Wed Sep 14 20:39:57 2022 ] Mean test loss of 796 batches: 3.379927396774292. +[ Wed Sep 14 20:39:58 2022 ] Top1: 38.54% +[ Wed Sep 14 20:39:58 2022 ] Top5: 72.27% +[ Wed Sep 14 20:39:59 2022 ] Training epoch: 29 +[ Wed Sep 14 20:41:12 2022 ] Batch(95/243) done. Loss: 0.4147 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:42:25 2022 ] Batch(195/243) done. Loss: 0.4782 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 20:42:59 2022 ] Eval epoch: 29 +[ Wed Sep 14 20:44:33 2022 ] Mean test loss of 796 batches: 2.9250919818878174. +[ Wed Sep 14 20:44:33 2022 ] Top1: 43.48% +[ Wed Sep 14 20:44:34 2022 ] Top5: 76.24% +[ Wed Sep 14 20:44:34 2022 ] Training epoch: 30 +[ Wed Sep 14 20:45:16 2022 ] Batch(52/243) done. Loss: 0.1942 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 20:46:29 2022 ] Batch(152/243) done. Loss: 0.2228 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 20:47:35 2022 ] Eval epoch: 30 +[ Wed Sep 14 20:49:09 2022 ] Mean test loss of 796 batches: 2.907973289489746. +[ Wed Sep 14 20:49:09 2022 ] Top1: 43.96% +[ Wed Sep 14 20:49:10 2022 ] Top5: 76.37% +[ Wed Sep 14 20:49:10 2022 ] Training epoch: 31 +[ Wed Sep 14 20:49:20 2022 ] Batch(9/243) done. Loss: 0.3925 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 20:50:33 2022 ] Batch(109/243) done. Loss: 0.3682 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 20:51:46 2022 ] Batch(209/243) done. Loss: 0.2874 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 20:52:10 2022 ] Eval epoch: 31 +[ Wed Sep 14 20:53:44 2022 ] Mean test loss of 796 batches: 3.1756463050842285. +[ Wed Sep 14 20:53:44 2022 ] Top1: 40.28% +[ Wed Sep 14 20:53:44 2022 ] Top5: 73.68% +[ Wed Sep 14 20:53:45 2022 ] Training epoch: 32 +[ Wed Sep 14 20:54:37 2022 ] Batch(66/243) done. Loss: 0.3473 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 20:55:49 2022 ] Batch(166/243) done. Loss: 0.5482 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 20:56:45 2022 ] Eval epoch: 32 +[ Wed Sep 14 20:58:18 2022 ] Mean test loss of 796 batches: 2.8646881580352783. +[ Wed Sep 14 20:58:18 2022 ] Top1: 42.91% +[ Wed Sep 14 20:58:19 2022 ] Top5: 75.13% +[ Wed Sep 14 20:58:19 2022 ] Training epoch: 33 +[ Wed Sep 14 20:58:39 2022 ] Batch(23/243) done. Loss: 0.2494 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:59:52 2022 ] Batch(123/243) done. Loss: 0.6065 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 21:01:05 2022 ] Batch(223/243) done. Loss: 0.6795 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 21:01:19 2022 ] Eval epoch: 33 +[ Wed Sep 14 21:02:53 2022 ] Mean test loss of 796 batches: 3.292654037475586. +[ Wed Sep 14 21:02:53 2022 ] Top1: 36.59% +[ Wed Sep 14 21:02:54 2022 ] Top5: 70.07% +[ Wed Sep 14 21:02:54 2022 ] Training epoch: 34 +[ Wed Sep 14 21:03:56 2022 ] Batch(80/243) done. Loss: 0.3388 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 21:05:09 2022 ] Batch(180/243) done. Loss: 0.3675 lr:0.100000 network_time: 0.0472 +[ Wed Sep 14 21:05:54 2022 ] Eval epoch: 34 +[ Wed Sep 14 21:07:27 2022 ] Mean test loss of 796 batches: 3.1438148021698. +[ Wed Sep 14 21:07:28 2022 ] Top1: 43.58% +[ Wed Sep 14 21:07:28 2022 ] Top5: 75.90% +[ Wed Sep 14 21:07:28 2022 ] Training epoch: 35 +[ Wed Sep 14 21:07:59 2022 ] Batch(37/243) done. Loss: 0.2174 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 21:09:12 2022 ] Batch(137/243) done. Loss: 0.2229 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:10:25 2022 ] Batch(237/243) done. Loss: 0.4025 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 21:10:29 2022 ] Eval epoch: 35 +[ Wed Sep 14 21:12:02 2022 ] Mean test loss of 796 batches: 3.187361717224121. +[ Wed Sep 14 21:12:03 2022 ] Top1: 41.11% +[ Wed Sep 14 21:12:03 2022 ] Top5: 74.95% +[ Wed Sep 14 21:12:04 2022 ] Training epoch: 36 +[ Wed Sep 14 21:13:16 2022 ] Batch(94/243) done. Loss: 0.3044 lr:0.100000 network_time: 0.0252 +[ Wed Sep 14 21:14:28 2022 ] Batch(194/243) done. Loss: 0.1637 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 21:15:04 2022 ] Eval epoch: 36 +[ Wed Sep 14 21:16:37 2022 ] Mean test loss of 796 batches: 2.897453546524048. +[ Wed Sep 14 21:16:37 2022 ] Top1: 41.74% +[ Wed Sep 14 21:16:38 2022 ] Top5: 75.11% +[ Wed Sep 14 21:16:38 2022 ] Training epoch: 37 +[ Wed Sep 14 21:17:19 2022 ] Batch(51/243) done. Loss: 0.2264 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 21:18:31 2022 ] Batch(151/243) done. Loss: 0.2391 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 21:19:38 2022 ] Eval epoch: 37 +[ Wed Sep 14 21:21:11 2022 ] Mean test loss of 796 batches: 3.246755838394165. +[ Wed Sep 14 21:21:12 2022 ] Top1: 42.70% +[ Wed Sep 14 21:21:12 2022 ] Top5: 75.04% +[ Wed Sep 14 21:21:13 2022 ] Training epoch: 38 +[ Wed Sep 14 21:21:22 2022 ] Batch(8/243) done. Loss: 0.2202 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:22:35 2022 ] Batch(108/243) done. Loss: 0.1588 lr:0.100000 network_time: 0.0324 +[ Wed Sep 14 21:23:48 2022 ] Batch(208/243) done. Loss: 0.3176 lr:0.100000 network_time: 0.0308 +[ Wed Sep 14 21:24:13 2022 ] Eval epoch: 38 +[ Wed Sep 14 21:25:47 2022 ] Mean test loss of 796 batches: 3.2101247310638428. +[ Wed Sep 14 21:25:47 2022 ] Top1: 41.23% +[ Wed Sep 14 21:25:48 2022 ] Top5: 73.09% +[ Wed Sep 14 21:25:48 2022 ] Training epoch: 39 +[ Wed Sep 14 21:26:39 2022 ] Batch(65/243) done. Loss: 0.4692 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 21:27:52 2022 ] Batch(165/243) done. Loss: 0.2537 lr:0.100000 network_time: 0.0323 +[ Wed Sep 14 21:28:48 2022 ] Eval epoch: 39 +[ Wed Sep 14 21:30:21 2022 ] Mean test loss of 796 batches: 3.237701177597046. +[ Wed Sep 14 21:30:21 2022 ] Top1: 41.29% +[ Wed Sep 14 21:30:22 2022 ] Top5: 75.22% +[ Wed Sep 14 21:30:22 2022 ] Training epoch: 40 +[ Wed Sep 14 21:30:42 2022 ] Batch(22/243) done. Loss: 0.2544 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 21:31:55 2022 ] Batch(122/243) done. Loss: 0.4558 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 21:33:08 2022 ] Batch(222/243) done. Loss: 0.2960 lr:0.100000 network_time: 0.0316 +[ Wed Sep 14 21:33:23 2022 ] Eval epoch: 40 +[ Wed Sep 14 21:34:56 2022 ] Mean test loss of 796 batches: 3.0716347694396973. +[ Wed Sep 14 21:34:56 2022 ] Top1: 43.59% +[ Wed Sep 14 21:34:57 2022 ] Top5: 77.24% +[ Wed Sep 14 21:34:57 2022 ] Training epoch: 41 +[ Wed Sep 14 21:35:58 2022 ] Batch(79/243) done. Loss: 0.1472 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 21:37:11 2022 ] Batch(179/243) done. Loss: 0.2046 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 21:37:57 2022 ] Eval epoch: 41 +[ Wed Sep 14 21:39:31 2022 ] Mean test loss of 796 batches: 3.687352180480957. +[ Wed Sep 14 21:39:31 2022 ] Top1: 35.92% +[ Wed Sep 14 21:39:31 2022 ] Top5: 70.46% +[ Wed Sep 14 21:39:32 2022 ] Training epoch: 42 +[ Wed Sep 14 21:40:02 2022 ] Batch(36/243) done. Loss: 0.2597 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 21:41:14 2022 ] Batch(136/243) done. Loss: 0.2971 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 21:42:27 2022 ] Batch(236/243) done. Loss: 0.2494 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:42:32 2022 ] Eval epoch: 42 +[ Wed Sep 14 21:44:06 2022 ] Mean test loss of 796 batches: 2.7111852169036865. +[ Wed Sep 14 21:44:06 2022 ] Top1: 45.89% +[ Wed Sep 14 21:44:06 2022 ] Top5: 78.77% +[ Wed Sep 14 21:44:07 2022 ] Training epoch: 43 +[ Wed Sep 14 21:45:18 2022 ] Batch(93/243) done. Loss: 0.1261 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:46:31 2022 ] Batch(193/243) done. Loss: 0.2820 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:47:07 2022 ] Eval epoch: 43 +[ Wed Sep 14 21:48:40 2022 ] Mean test loss of 796 batches: 3.5541832447052. +[ Wed Sep 14 21:48:41 2022 ] Top1: 38.99% +[ Wed Sep 14 21:48:41 2022 ] Top5: 71.08% +[ Wed Sep 14 21:48:41 2022 ] Training epoch: 44 +[ Wed Sep 14 21:49:21 2022 ] Batch(50/243) done. Loss: 0.1867 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 21:50:34 2022 ] Batch(150/243) done. Loss: 0.1899 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 21:51:41 2022 ] Eval epoch: 44 +[ Wed Sep 14 21:53:15 2022 ] Mean test loss of 796 batches: 3.259566307067871. +[ Wed Sep 14 21:53:15 2022 ] Top1: 39.92% +[ Wed Sep 14 21:53:16 2022 ] Top5: 72.24% +[ Wed Sep 14 21:53:16 2022 ] Training epoch: 45 +[ Wed Sep 14 21:53:25 2022 ] Batch(7/243) done. Loss: 0.1851 lr:0.100000 network_time: 0.0324 +[ Wed Sep 14 21:54:37 2022 ] Batch(107/243) done. Loss: 0.1600 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 21:55:50 2022 ] Batch(207/243) done. Loss: 0.1799 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 21:56:16 2022 ] Eval epoch: 45 +[ Wed Sep 14 21:57:49 2022 ] Mean test loss of 796 batches: 3.1690220832824707. +[ Wed Sep 14 21:57:50 2022 ] Top1: 42.68% +[ Wed Sep 14 21:57:50 2022 ] Top5: 75.85% +[ Wed Sep 14 21:57:50 2022 ] Training epoch: 46 +[ Wed Sep 14 21:58:41 2022 ] Batch(64/243) done. Loss: 0.3256 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:59:54 2022 ] Batch(164/243) done. Loss: 0.2898 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 22:00:51 2022 ] Eval epoch: 46 +[ Wed Sep 14 22:02:24 2022 ] Mean test loss of 796 batches: 3.144570827484131. +[ Wed Sep 14 22:02:24 2022 ] Top1: 43.82% +[ Wed Sep 14 22:02:25 2022 ] Top5: 75.41% +[ Wed Sep 14 22:02:25 2022 ] Training epoch: 47 +[ Wed Sep 14 22:02:44 2022 ] Batch(21/243) done. Loss: 0.2113 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 22:03:57 2022 ] Batch(121/243) done. Loss: 0.3029 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 22:05:09 2022 ] Batch(221/243) done. Loss: 0.2827 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 22:05:25 2022 ] Eval epoch: 47 +[ Wed Sep 14 22:06:59 2022 ] Mean test loss of 796 batches: 3.208387613296509. +[ Wed Sep 14 22:06:59 2022 ] Top1: 43.43% +[ Wed Sep 14 22:06:59 2022 ] Top5: 75.61% +[ Wed Sep 14 22:07:00 2022 ] Training epoch: 48 +[ Wed Sep 14 22:08:00 2022 ] Batch(78/243) done. Loss: 0.2341 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 22:09:13 2022 ] Batch(178/243) done. Loss: 0.2231 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:10:00 2022 ] Eval epoch: 48 +[ Wed Sep 14 22:11:33 2022 ] Mean test loss of 796 batches: 3.4265151023864746. +[ Wed Sep 14 22:11:34 2022 ] Top1: 41.23% +[ Wed Sep 14 22:11:34 2022 ] Top5: 73.94% +[ Wed Sep 14 22:11:34 2022 ] Training epoch: 49 +[ Wed Sep 14 22:12:03 2022 ] Batch(35/243) done. Loss: 0.3541 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 22:13:16 2022 ] Batch(135/243) done. Loss: 0.2556 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 22:14:29 2022 ] Batch(235/243) done. Loss: 0.1613 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 22:14:34 2022 ] Eval epoch: 49 +[ Wed Sep 14 22:16:08 2022 ] Mean test loss of 796 batches: 3.0899510383605957. +[ Wed Sep 14 22:16:08 2022 ] Top1: 42.97% +[ Wed Sep 14 22:16:09 2022 ] Top5: 76.07% +[ Wed Sep 14 22:16:09 2022 ] Training epoch: 50 +[ Wed Sep 14 22:17:19 2022 ] Batch(92/243) done. Loss: 0.2175 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 22:18:32 2022 ] Batch(192/243) done. Loss: 0.3184 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:19:09 2022 ] Eval epoch: 50 +[ Wed Sep 14 22:20:42 2022 ] Mean test loss of 796 batches: 3.1481566429138184. +[ Wed Sep 14 22:20:42 2022 ] Top1: 41.54% +[ Wed Sep 14 22:20:43 2022 ] Top5: 73.69% +[ Wed Sep 14 22:20:43 2022 ] Training epoch: 51 +[ Wed Sep 14 22:21:23 2022 ] Batch(49/243) done. Loss: 0.2181 lr:0.100000 network_time: 0.0286 +[ Wed Sep 14 22:22:36 2022 ] Batch(149/243) done. Loss: 0.2625 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 22:23:44 2022 ] Eval epoch: 51 +[ Wed Sep 14 22:25:17 2022 ] Mean test loss of 796 batches: 2.987004041671753. +[ Wed Sep 14 22:25:18 2022 ] Top1: 46.25% +[ Wed Sep 14 22:25:18 2022 ] Top5: 77.31% +[ Wed Sep 14 22:25:18 2022 ] Training epoch: 52 +[ Wed Sep 14 22:25:26 2022 ] Batch(6/243) done. Loss: 0.1527 lr:0.100000 network_time: 0.0295 +[ Wed Sep 14 22:26:39 2022 ] Batch(106/243) done. Loss: 0.1457 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:27:52 2022 ] Batch(206/243) done. Loss: 0.2779 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 22:28:18 2022 ] Eval epoch: 52 +[ Wed Sep 14 22:29:51 2022 ] Mean test loss of 796 batches: 3.231384515762329. +[ Wed Sep 14 22:29:52 2022 ] Top1: 42.97% +[ Wed Sep 14 22:29:52 2022 ] Top5: 75.65% +[ Wed Sep 14 22:29:52 2022 ] Training epoch: 53 +[ Wed Sep 14 22:30:42 2022 ] Batch(63/243) done. Loss: 0.2154 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 22:31:55 2022 ] Batch(163/243) done. Loss: 0.3158 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:32:53 2022 ] Eval epoch: 53 +[ Wed Sep 14 22:34:26 2022 ] Mean test loss of 796 batches: 2.9344334602355957. +[ Wed Sep 14 22:34:27 2022 ] Top1: 45.77% +[ Wed Sep 14 22:34:27 2022 ] Top5: 78.17% +[ Wed Sep 14 22:34:27 2022 ] Training epoch: 54 +[ Wed Sep 14 22:34:45 2022 ] Batch(20/243) done. Loss: 0.2143 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 22:35:58 2022 ] Batch(120/243) done. Loss: 0.1561 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 22:37:11 2022 ] Batch(220/243) done. Loss: 0.1489 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:37:28 2022 ] Eval epoch: 54 +[ Wed Sep 14 22:39:01 2022 ] Mean test loss of 796 batches: 3.3158111572265625. +[ Wed Sep 14 22:39:01 2022 ] Top1: 43.19% +[ Wed Sep 14 22:39:02 2022 ] Top5: 76.21% +[ Wed Sep 14 22:39:02 2022 ] Training epoch: 55 +[ Wed Sep 14 22:40:02 2022 ] Batch(77/243) done. Loss: 0.2478 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 22:41:15 2022 ] Batch(177/243) done. Loss: 0.3717 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 22:42:02 2022 ] Eval epoch: 55 +[ Wed Sep 14 22:43:36 2022 ] Mean test loss of 796 batches: 3.565183401107788. +[ Wed Sep 14 22:43:36 2022 ] Top1: 40.67% +[ Wed Sep 14 22:43:37 2022 ] Top5: 74.40% +[ Wed Sep 14 22:43:37 2022 ] Training epoch: 56 +[ Wed Sep 14 22:44:05 2022 ] Batch(34/243) done. Loss: 0.1565 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:45:18 2022 ] Batch(134/243) done. Loss: 0.3081 lr:0.100000 network_time: 0.0287 +[ Wed Sep 14 22:46:31 2022 ] Batch(234/243) done. Loss: 0.1216 lr:0.100000 network_time: 0.0253 +[ Wed Sep 14 22:46:37 2022 ] Eval epoch: 56 +[ Wed Sep 14 22:48:10 2022 ] Mean test loss of 796 batches: 3.3822531700134277. +[ Wed Sep 14 22:48:11 2022 ] Top1: 42.46% +[ Wed Sep 14 22:48:11 2022 ] Top5: 75.58% +[ Wed Sep 14 22:48:11 2022 ] Training epoch: 57 +[ Wed Sep 14 22:49:21 2022 ] Batch(91/243) done. Loss: 0.1433 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 22:50:34 2022 ] Batch(191/243) done. Loss: 0.2230 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:51:12 2022 ] Eval epoch: 57 +[ Wed Sep 14 22:52:46 2022 ] Mean test loss of 796 batches: 3.307635545730591. +[ Wed Sep 14 22:52:47 2022 ] Top1: 41.26% +[ Wed Sep 14 22:52:47 2022 ] Top5: 73.64% +[ Wed Sep 14 22:52:48 2022 ] Training epoch: 58 +[ Wed Sep 14 22:53:26 2022 ] Batch(48/243) done. Loss: 0.2950 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 22:54:39 2022 ] Batch(148/243) done. Loss: 0.1734 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 22:55:48 2022 ] Eval epoch: 58 +[ Wed Sep 14 22:57:21 2022 ] Mean test loss of 796 batches: 3.419945240020752. +[ Wed Sep 14 22:57:22 2022 ] Top1: 42.66% +[ Wed Sep 14 22:57:22 2022 ] Top5: 74.95% +[ Wed Sep 14 22:57:22 2022 ] Training epoch: 59 +[ Wed Sep 14 22:57:30 2022 ] Batch(5/243) done. Loss: 0.3432 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 22:58:43 2022 ] Batch(105/243) done. Loss: 0.3520 lr:0.100000 network_time: 0.0338 +[ Wed Sep 14 22:59:56 2022 ] Batch(205/243) done. Loss: 0.1924 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 23:00:23 2022 ] Eval epoch: 59 +[ Wed Sep 14 23:01:57 2022 ] Mean test loss of 796 batches: 2.8412914276123047. +[ Wed Sep 14 23:01:57 2022 ] Top1: 47.84% +[ Wed Sep 14 23:01:58 2022 ] Top5: 79.38% +[ Wed Sep 14 23:01:58 2022 ] Training epoch: 60 +[ Wed Sep 14 23:02:47 2022 ] Batch(62/243) done. Loss: 0.1149 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 23:04:00 2022 ] Batch(162/243) done. Loss: 0.2884 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 23:04:58 2022 ] Eval epoch: 60 +[ Wed Sep 14 23:06:32 2022 ] Mean test loss of 796 batches: 3.3663244247436523. +[ Wed Sep 14 23:06:33 2022 ] Top1: 42.97% +[ Wed Sep 14 23:06:33 2022 ] Top5: 75.24% +[ Wed Sep 14 23:06:33 2022 ] Training epoch: 61 +[ Wed Sep 14 23:06:51 2022 ] Batch(19/243) done. Loss: 0.1210 lr:0.010000 network_time: 0.0301 +[ Wed Sep 14 23:08:04 2022 ] Batch(119/243) done. Loss: 0.0949 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:09:16 2022 ] Batch(219/243) done. Loss: 0.0451 lr:0.010000 network_time: 0.0265 +[ Wed Sep 14 23:09:33 2022 ] Eval epoch: 61 +[ Wed Sep 14 23:11:07 2022 ] Mean test loss of 796 batches: 2.6556432247161865. +[ Wed Sep 14 23:11:08 2022 ] Top1: 51.52% +[ Wed Sep 14 23:11:09 2022 ] Top5: 81.85% +[ Wed Sep 14 23:11:09 2022 ] Training epoch: 62 +[ Wed Sep 14 23:12:08 2022 ] Batch(76/243) done. Loss: 0.0246 lr:0.010000 network_time: 0.0308 +[ Wed Sep 14 23:13:21 2022 ] Batch(176/243) done. Loss: 0.0368 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:14:09 2022 ] Eval epoch: 62 +[ Wed Sep 14 23:15:43 2022 ] Mean test loss of 796 batches: 2.673084259033203. +[ Wed Sep 14 23:15:43 2022 ] Top1: 51.91% +[ Wed Sep 14 23:15:43 2022 ] Top5: 82.23% +[ Wed Sep 14 23:15:44 2022 ] Training epoch: 63 +[ Wed Sep 14 23:16:12 2022 ] Batch(33/243) done. Loss: 0.0230 lr:0.010000 network_time: 0.0300 +[ Wed Sep 14 23:17:24 2022 ] Batch(133/243) done. Loss: 0.0565 lr:0.010000 network_time: 0.0332 +[ Wed Sep 14 23:18:37 2022 ] Batch(233/243) done. Loss: 0.0326 lr:0.010000 network_time: 0.0257 +[ Wed Sep 14 23:18:44 2022 ] Eval epoch: 63 +[ Wed Sep 14 23:20:17 2022 ] Mean test loss of 796 batches: 2.6055307388305664. +[ Wed Sep 14 23:20:18 2022 ] Top1: 52.54% +[ Wed Sep 14 23:20:19 2022 ] Top5: 82.33% +[ Wed Sep 14 23:20:19 2022 ] Training epoch: 64 +[ Wed Sep 14 23:21:28 2022 ] Batch(90/243) done. Loss: 0.0097 lr:0.010000 network_time: 0.0307 +[ Wed Sep 14 23:22:41 2022 ] Batch(190/243) done. Loss: 0.0286 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:23:19 2022 ] Eval epoch: 64 +[ Wed Sep 14 23:24:52 2022 ] Mean test loss of 796 batches: 2.6478676795959473. +[ Wed Sep 14 23:24:53 2022 ] Top1: 51.81% +[ Wed Sep 14 23:24:53 2022 ] Top5: 82.13% +[ Wed Sep 14 23:24:53 2022 ] Training epoch: 65 +[ Wed Sep 14 23:25:31 2022 ] Batch(47/243) done. Loss: 0.0156 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:26:44 2022 ] Batch(147/243) done. Loss: 0.0217 lr:0.010000 network_time: 0.0272 +[ Wed Sep 14 23:27:54 2022 ] Eval epoch: 65 +[ Wed Sep 14 23:29:27 2022 ] Mean test loss of 796 batches: 2.712385892868042. +[ Wed Sep 14 23:29:28 2022 ] Top1: 52.71% +[ Wed Sep 14 23:29:29 2022 ] Top5: 82.57% +[ Wed Sep 14 23:29:29 2022 ] Training epoch: 66 +[ Wed Sep 14 23:29:35 2022 ] Batch(4/243) done. Loss: 0.0296 lr:0.010000 network_time: 0.0305 +[ Wed Sep 14 23:30:48 2022 ] Batch(104/243) done. Loss: 0.0082 lr:0.010000 network_time: 0.0283 +[ Wed Sep 14 23:32:01 2022 ] Batch(204/243) done. Loss: 0.0840 lr:0.010000 network_time: 0.0287 +[ Wed Sep 14 23:32:29 2022 ] Eval epoch: 66 +[ Wed Sep 14 23:34:03 2022 ] Mean test loss of 796 batches: 2.6864709854125977. +[ Wed Sep 14 23:34:03 2022 ] Top1: 52.50% +[ Wed Sep 14 23:34:03 2022 ] Top5: 82.39% +[ Wed Sep 14 23:34:04 2022 ] Training epoch: 67 +[ Wed Sep 14 23:34:52 2022 ] Batch(61/243) done. Loss: 0.0235 lr:0.010000 network_time: 0.0258 +[ Wed Sep 14 23:36:05 2022 ] Batch(161/243) done. Loss: 0.0380 lr:0.010000 network_time: 0.0283 +[ Wed Sep 14 23:37:04 2022 ] Eval epoch: 67 +[ Wed Sep 14 23:38:37 2022 ] Mean test loss of 796 batches: 2.689728260040283. +[ Wed Sep 14 23:38:37 2022 ] Top1: 52.35% +[ Wed Sep 14 23:38:38 2022 ] Top5: 82.37% +[ Wed Sep 14 23:38:38 2022 ] Training epoch: 68 +[ Wed Sep 14 23:38:55 2022 ] Batch(18/243) done. Loss: 0.0303 lr:0.010000 network_time: 0.0293 +[ Wed Sep 14 23:40:08 2022 ] Batch(118/243) done. Loss: 0.0173 lr:0.010000 network_time: 0.0286 +[ Wed Sep 14 23:41:20 2022 ] Batch(218/243) done. Loss: 0.0388 lr:0.010000 network_time: 0.0266 +[ Wed Sep 14 23:41:38 2022 ] Eval epoch: 68 +[ Wed Sep 14 23:43:12 2022 ] Mean test loss of 796 batches: 2.7163398265838623. +[ Wed Sep 14 23:43:13 2022 ] Top1: 51.46% +[ Wed Sep 14 23:43:13 2022 ] Top5: 81.70% +[ Wed Sep 14 23:43:13 2022 ] Training epoch: 69 +[ Wed Sep 14 23:44:12 2022 ] Batch(75/243) done. Loss: 0.0145 lr:0.010000 network_time: 0.0321 +[ Wed Sep 14 23:45:24 2022 ] Batch(175/243) done. Loss: 0.0141 lr:0.010000 network_time: 0.0321 +[ Wed Sep 14 23:46:14 2022 ] Eval epoch: 69 +[ Wed Sep 14 23:47:47 2022 ] Mean test loss of 796 batches: 2.7593424320220947. +[ Wed Sep 14 23:47:47 2022 ] Top1: 51.67% +[ Wed Sep 14 23:47:48 2022 ] Top5: 81.67% +[ Wed Sep 14 23:47:48 2022 ] Training epoch: 70 +[ Wed Sep 14 23:48:15 2022 ] Batch(32/243) done. Loss: 0.0063 lr:0.010000 network_time: 0.0278 +[ Wed Sep 14 23:49:28 2022 ] Batch(132/243) done. Loss: 0.0079 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:50:41 2022 ] Batch(232/243) done. Loss: 0.0117 lr:0.010000 network_time: 0.0312 +[ Wed Sep 14 23:50:48 2022 ] Eval epoch: 70 +[ Wed Sep 14 23:52:22 2022 ] Mean test loss of 796 batches: 2.6540184020996094. +[ Wed Sep 14 23:52:23 2022 ] Top1: 53.21% +[ Wed Sep 14 23:52:23 2022 ] Top5: 82.79% +[ Wed Sep 14 23:52:24 2022 ] Training epoch: 71 +[ Wed Sep 14 23:53:32 2022 ] Batch(89/243) done. Loss: 0.0049 lr:0.010000 network_time: 0.0321 +[ Wed Sep 14 23:54:45 2022 ] Batch(189/243) done. Loss: 0.0227 lr:0.010000 network_time: 0.0279 +[ Wed Sep 14 23:55:24 2022 ] Eval epoch: 71 +[ Wed Sep 14 23:56:57 2022 ] Mean test loss of 796 batches: 2.771545648574829. +[ Wed Sep 14 23:56:58 2022 ] Top1: 50.36% +[ Wed Sep 14 23:56:58 2022 ] Top5: 81.09% +[ Wed Sep 14 23:56:59 2022 ] Training epoch: 72 +[ Wed Sep 14 23:57:36 2022 ] Batch(46/243) done. Loss: 0.0092 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:58:49 2022 ] Batch(146/243) done. Loss: 0.0121 lr:0.010000 network_time: 0.0303 +[ Wed Sep 14 23:59:59 2022 ] Eval epoch: 72 +[ Thu Sep 15 00:01:33 2022 ] Mean test loss of 796 batches: 2.7130022048950195. +[ Thu Sep 15 00:01:33 2022 ] Top1: 52.53% +[ Thu Sep 15 00:01:34 2022 ] Top5: 82.31% +[ Thu Sep 15 00:01:34 2022 ] Training epoch: 73 +[ Thu Sep 15 00:01:40 2022 ] Batch(3/243) done. Loss: 0.0085 lr:0.010000 network_time: 0.0277 +[ Thu Sep 15 00:02:53 2022 ] Batch(103/243) done. Loss: 0.0057 lr:0.010000 network_time: 0.0288 +[ Thu Sep 15 00:04:05 2022 ] Batch(203/243) done. Loss: 0.0071 lr:0.010000 network_time: 0.0289 +[ Thu Sep 15 00:04:34 2022 ] Eval epoch: 73 +[ Thu Sep 15 00:06:07 2022 ] Mean test loss of 796 batches: 2.7051308155059814. +[ Thu Sep 15 00:06:07 2022 ] Top1: 52.59% +[ Thu Sep 15 00:06:08 2022 ] Top5: 82.37% +[ Thu Sep 15 00:06:08 2022 ] Training epoch: 74 +[ Thu Sep 15 00:06:55 2022 ] Batch(60/243) done. Loss: 0.0133 lr:0.010000 network_time: 0.0303 +[ Thu Sep 15 00:08:08 2022 ] Batch(160/243) done. Loss: 0.0161 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 00:09:08 2022 ] Eval epoch: 74 +[ Thu Sep 15 00:10:42 2022 ] Mean test loss of 796 batches: 2.7454113960266113. +[ Thu Sep 15 00:10:43 2022 ] Top1: 53.42% +[ Thu Sep 15 00:10:43 2022 ] Top5: 82.85% +[ Thu Sep 15 00:10:43 2022 ] Training epoch: 75 +[ Thu Sep 15 00:10:59 2022 ] Batch(17/243) done. Loss: 0.0106 lr:0.010000 network_time: 0.0302 +[ Thu Sep 15 00:12:12 2022 ] Batch(117/243) done. Loss: 0.0101 lr:0.010000 network_time: 0.0315 +[ Thu Sep 15 00:13:25 2022 ] Batch(217/243) done. Loss: 0.0074 lr:0.010000 network_time: 0.0309 +[ Thu Sep 15 00:13:44 2022 ] Eval epoch: 75 +[ Thu Sep 15 00:15:17 2022 ] Mean test loss of 796 batches: 2.6957931518554688. +[ Thu Sep 15 00:15:17 2022 ] Top1: 53.34% +[ Thu Sep 15 00:15:18 2022 ] Top5: 82.92% +[ Thu Sep 15 00:15:18 2022 ] Training epoch: 76 +[ Thu Sep 15 00:16:15 2022 ] Batch(74/243) done. Loss: 0.0067 lr:0.010000 network_time: 0.0269 +[ Thu Sep 15 00:17:28 2022 ] Batch(174/243) done. Loss: 0.0176 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 00:18:18 2022 ] Eval epoch: 76 +[ Thu Sep 15 00:19:51 2022 ] Mean test loss of 796 batches: 2.671660900115967. +[ Thu Sep 15 00:19:52 2022 ] Top1: 52.39% +[ Thu Sep 15 00:19:52 2022 ] Top5: 82.38% +[ Thu Sep 15 00:19:52 2022 ] Training epoch: 77 +[ Thu Sep 15 00:20:18 2022 ] Batch(31/243) done. Loss: 0.0083 lr:0.010000 network_time: 0.0286 +[ Thu Sep 15 00:21:31 2022 ] Batch(131/243) done. Loss: 0.0144 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 00:22:44 2022 ] Batch(231/243) done. Loss: 0.0086 lr:0.010000 network_time: 0.0455 +[ Thu Sep 15 00:22:53 2022 ] Eval epoch: 77 +[ Thu Sep 15 00:24:25 2022 ] Mean test loss of 796 batches: 2.7283172607421875. +[ Thu Sep 15 00:24:26 2022 ] Top1: 52.29% +[ Thu Sep 15 00:24:26 2022 ] Top5: 82.03% +[ Thu Sep 15 00:24:26 2022 ] Training epoch: 78 +[ Thu Sep 15 00:25:34 2022 ] Batch(88/243) done. Loss: 0.0064 lr:0.010000 network_time: 0.0271 +[ Thu Sep 15 00:26:47 2022 ] Batch(188/243) done. Loss: 0.0194 lr:0.010000 network_time: 0.0329 +[ Thu Sep 15 00:27:27 2022 ] Eval epoch: 78 +[ Thu Sep 15 00:29:00 2022 ] Mean test loss of 796 batches: 2.826481580734253. +[ Thu Sep 15 00:29:00 2022 ] Top1: 51.79% +[ Thu Sep 15 00:29:00 2022 ] Top5: 81.89% +[ Thu Sep 15 00:29:01 2022 ] Training epoch: 79 +[ Thu Sep 15 00:29:37 2022 ] Batch(45/243) done. Loss: 0.0074 lr:0.010000 network_time: 0.0269 +[ Thu Sep 15 00:30:50 2022 ] Batch(145/243) done. Loss: 0.0053 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 00:32:01 2022 ] Eval epoch: 79 +[ Thu Sep 15 00:33:34 2022 ] Mean test loss of 796 batches: 2.7605230808258057. +[ Thu Sep 15 00:33:34 2022 ] Top1: 52.66% +[ Thu Sep 15 00:33:35 2022 ] Top5: 82.48% +[ Thu Sep 15 00:33:35 2022 ] Training epoch: 80 +[ Thu Sep 15 00:33:40 2022 ] Batch(2/243) done. Loss: 0.0047 lr:0.010000 network_time: 0.0335 +[ Thu Sep 15 00:34:53 2022 ] Batch(102/243) done. Loss: 0.0056 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 00:36:06 2022 ] Batch(202/243) done. Loss: 0.0122 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 00:36:35 2022 ] Eval epoch: 80 +[ Thu Sep 15 00:38:08 2022 ] Mean test loss of 796 batches: 2.718104600906372. +[ Thu Sep 15 00:38:09 2022 ] Top1: 52.58% +[ Thu Sep 15 00:38:09 2022 ] Top5: 82.38% +[ Thu Sep 15 00:38:09 2022 ] Training epoch: 81 +[ Thu Sep 15 00:38:56 2022 ] Batch(59/243) done. Loss: 0.0089 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 00:40:09 2022 ] Batch(159/243) done. Loss: 0.0033 lr:0.001000 network_time: 0.0361 +[ Thu Sep 15 00:41:09 2022 ] Eval epoch: 81 +[ Thu Sep 15 00:42:43 2022 ] Mean test loss of 796 batches: 2.718365430831909. +[ Thu Sep 15 00:42:43 2022 ] Top1: 52.48% +[ Thu Sep 15 00:42:44 2022 ] Top5: 82.39% +[ Thu Sep 15 00:42:44 2022 ] Training epoch: 82 +[ Thu Sep 15 00:42:59 2022 ] Batch(16/243) done. Loss: 0.0101 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 00:44:12 2022 ] Batch(116/243) done. Loss: 0.0033 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 00:45:25 2022 ] Batch(216/243) done. Loss: 0.0155 lr:0.001000 network_time: 0.0332 +[ Thu Sep 15 00:45:44 2022 ] Eval epoch: 82 +[ Thu Sep 15 00:47:17 2022 ] Mean test loss of 796 batches: 2.7449679374694824. +[ Thu Sep 15 00:47:17 2022 ] Top1: 52.72% +[ Thu Sep 15 00:47:17 2022 ] Top5: 82.64% +[ Thu Sep 15 00:47:18 2022 ] Training epoch: 83 +[ Thu Sep 15 00:48:15 2022 ] Batch(73/243) done. Loss: 0.0083 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 00:49:27 2022 ] Batch(173/243) done. Loss: 0.0050 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 00:50:18 2022 ] Eval epoch: 83 +[ Thu Sep 15 00:51:51 2022 ] Mean test loss of 796 batches: 2.822268486022949. +[ Thu Sep 15 00:51:51 2022 ] Top1: 52.45% +[ Thu Sep 15 00:51:52 2022 ] Top5: 82.06% +[ Thu Sep 15 00:51:52 2022 ] Training epoch: 84 +[ Thu Sep 15 00:52:18 2022 ] Batch(30/243) done. Loss: 0.0095 lr:0.001000 network_time: 0.0279 +[ Thu Sep 15 00:53:30 2022 ] Batch(130/243) done. Loss: 0.0173 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 00:54:43 2022 ] Batch(230/243) done. Loss: 0.0036 lr:0.001000 network_time: 0.0300 +[ Thu Sep 15 00:54:52 2022 ] Eval epoch: 84 +[ Thu Sep 15 00:56:26 2022 ] Mean test loss of 796 batches: 2.6593453884124756. +[ Thu Sep 15 00:56:27 2022 ] Top1: 53.58% +[ Thu Sep 15 00:56:27 2022 ] Top5: 83.14% +[ Thu Sep 15 00:56:27 2022 ] Training epoch: 85 +[ Thu Sep 15 00:57:34 2022 ] Batch(87/243) done. Loss: 0.0185 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 00:58:47 2022 ] Batch(187/243) done. Loss: 0.0083 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 00:59:28 2022 ] Eval epoch: 85 +[ Thu Sep 15 01:01:01 2022 ] Mean test loss of 796 batches: 2.702404737472534. +[ Thu Sep 15 01:01:01 2022 ] Top1: 53.10% +[ Thu Sep 15 01:01:02 2022 ] Top5: 82.78% +[ Thu Sep 15 01:01:02 2022 ] Training epoch: 86 +[ Thu Sep 15 01:01:38 2022 ] Batch(44/243) done. Loss: 0.0119 lr:0.001000 network_time: 0.0265 +[ Thu Sep 15 01:02:50 2022 ] Batch(144/243) done. Loss: 0.0163 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 01:04:02 2022 ] Eval epoch: 86 +[ Thu Sep 15 01:05:35 2022 ] Mean test loss of 796 batches: 2.6936655044555664. +[ Thu Sep 15 01:05:36 2022 ] Top1: 52.86% +[ Thu Sep 15 01:05:36 2022 ] Top5: 82.57% +[ Thu Sep 15 01:05:36 2022 ] Training epoch: 87 +[ Thu Sep 15 01:05:41 2022 ] Batch(1/243) done. Loss: 0.0072 lr:0.001000 network_time: 0.0245 +[ Thu Sep 15 01:06:54 2022 ] Batch(101/243) done. Loss: 0.0078 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 01:08:06 2022 ] Batch(201/243) done. Loss: 0.0066 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 01:08:37 2022 ] Eval epoch: 87 +[ Thu Sep 15 01:10:10 2022 ] Mean test loss of 796 batches: 2.7110776901245117. +[ Thu Sep 15 01:10:11 2022 ] Top1: 52.20% +[ Thu Sep 15 01:10:11 2022 ] Top5: 82.28% +[ Thu Sep 15 01:10:11 2022 ] Training epoch: 88 +[ Thu Sep 15 01:10:57 2022 ] Batch(58/243) done. Loss: 0.0064 lr:0.001000 network_time: 0.0315 +[ Thu Sep 15 01:12:10 2022 ] Batch(158/243) done. Loss: 0.0125 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 01:13:11 2022 ] Eval epoch: 88 +[ Thu Sep 15 01:14:44 2022 ] Mean test loss of 796 batches: 2.755002498626709. +[ Thu Sep 15 01:14:45 2022 ] Top1: 52.74% +[ Thu Sep 15 01:14:45 2022 ] Top5: 82.36% +[ Thu Sep 15 01:14:45 2022 ] Training epoch: 89 +[ Thu Sep 15 01:15:00 2022 ] Batch(15/243) done. Loss: 0.0059 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:16:13 2022 ] Batch(115/243) done. Loss: 0.0055 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 01:17:26 2022 ] Batch(215/243) done. Loss: 0.0053 lr:0.001000 network_time: 0.0314 +[ Thu Sep 15 01:17:46 2022 ] Eval epoch: 89 +[ Thu Sep 15 01:19:19 2022 ] Mean test loss of 796 batches: 2.7093260288238525. +[ Thu Sep 15 01:19:19 2022 ] Top1: 52.87% +[ Thu Sep 15 01:19:20 2022 ] Top5: 82.58% +[ Thu Sep 15 01:19:20 2022 ] Training epoch: 90 +[ Thu Sep 15 01:20:16 2022 ] Batch(72/243) done. Loss: 0.0056 lr:0.001000 network_time: 0.0516 +[ Thu Sep 15 01:21:29 2022 ] Batch(172/243) done. Loss: 0.0102 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 01:22:20 2022 ] Eval epoch: 90 +[ Thu Sep 15 01:23:53 2022 ] Mean test loss of 796 batches: 2.71431303024292. +[ Thu Sep 15 01:23:54 2022 ] Top1: 53.00% +[ Thu Sep 15 01:23:54 2022 ] Top5: 82.56% +[ Thu Sep 15 01:23:54 2022 ] Training epoch: 91 +[ Thu Sep 15 01:24:19 2022 ] Batch(29/243) done. Loss: 0.0107 lr:0.001000 network_time: 0.0299 +[ Thu Sep 15 01:25:32 2022 ] Batch(129/243) done. Loss: 0.0092 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 01:26:45 2022 ] Batch(229/243) done. Loss: 0.0047 lr:0.001000 network_time: 0.0313 +[ Thu Sep 15 01:26:54 2022 ] Eval epoch: 91 +[ Thu Sep 15 01:28:28 2022 ] Mean test loss of 796 batches: 2.732354164123535. +[ Thu Sep 15 01:28:28 2022 ] Top1: 52.52% +[ Thu Sep 15 01:28:29 2022 ] Top5: 82.25% +[ Thu Sep 15 01:28:29 2022 ] Training epoch: 92 +[ Thu Sep 15 01:29:35 2022 ] Batch(86/243) done. Loss: 0.0046 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 01:30:48 2022 ] Batch(186/243) done. Loss: 0.0053 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 01:31:29 2022 ] Eval epoch: 92 +[ Thu Sep 15 01:33:03 2022 ] Mean test loss of 796 batches: 2.710573196411133. +[ Thu Sep 15 01:33:03 2022 ] Top1: 53.56% +[ Thu Sep 15 01:33:04 2022 ] Top5: 83.14% +[ Thu Sep 15 01:33:04 2022 ] Training epoch: 93 +[ Thu Sep 15 01:33:39 2022 ] Batch(43/243) done. Loss: 0.0047 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 01:34:52 2022 ] Batch(143/243) done. Loss: 0.0108 lr:0.001000 network_time: 0.0303 +[ Thu Sep 15 01:36:04 2022 ] Eval epoch: 93 +[ Thu Sep 15 01:37:37 2022 ] Mean test loss of 796 batches: 2.7435684204101562. +[ Thu Sep 15 01:37:38 2022 ] Top1: 52.97% +[ Thu Sep 15 01:37:38 2022 ] Top5: 82.68% +[ Thu Sep 15 01:37:38 2022 ] Training epoch: 94 +[ Thu Sep 15 01:37:42 2022 ] Batch(0/243) done. Loss: 0.0251 lr:0.001000 network_time: 0.0575 +[ Thu Sep 15 01:38:55 2022 ] Batch(100/243) done. Loss: 0.0031 lr:0.001000 network_time: 0.0354 +[ Thu Sep 15 01:40:08 2022 ] Batch(200/243) done. Loss: 0.0062 lr:0.001000 network_time: 0.0263 +[ Thu Sep 15 01:40:39 2022 ] Eval epoch: 94 +[ Thu Sep 15 01:42:12 2022 ] Mean test loss of 796 batches: 2.8339927196502686. +[ Thu Sep 15 01:42:13 2022 ] Top1: 49.99% +[ Thu Sep 15 01:42:13 2022 ] Top5: 80.98% +[ Thu Sep 15 01:42:13 2022 ] Training epoch: 95 +[ Thu Sep 15 01:42:58 2022 ] Batch(57/243) done. Loss: 0.0037 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 01:44:11 2022 ] Batch(157/243) done. Loss: 0.0046 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 01:45:14 2022 ] Eval epoch: 95 +[ Thu Sep 15 01:46:47 2022 ] Mean test loss of 796 batches: 2.682378053665161. +[ Thu Sep 15 01:46:47 2022 ] Top1: 52.81% +[ Thu Sep 15 01:46:48 2022 ] Top5: 82.45% +[ Thu Sep 15 01:46:48 2022 ] Training epoch: 96 +[ Thu Sep 15 01:47:02 2022 ] Batch(14/243) done. Loss: 0.0071 lr:0.001000 network_time: 0.0259 +[ Thu Sep 15 01:48:15 2022 ] Batch(114/243) done. Loss: 0.0159 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 01:49:28 2022 ] Batch(214/243) done. Loss: 0.0060 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 01:49:48 2022 ] Eval epoch: 96 +[ Thu Sep 15 01:51:22 2022 ] Mean test loss of 796 batches: 2.7707955837249756. +[ Thu Sep 15 01:51:22 2022 ] Top1: 52.46% +[ Thu Sep 15 01:51:23 2022 ] Top5: 82.37% +[ Thu Sep 15 01:51:23 2022 ] Training epoch: 97 +[ Thu Sep 15 01:52:18 2022 ] Batch(71/243) done. Loss: 0.0095 lr:0.001000 network_time: 0.0325 +[ Thu Sep 15 01:53:31 2022 ] Batch(171/243) done. Loss: 0.0051 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 01:54:23 2022 ] Eval epoch: 97 +[ Thu Sep 15 01:55:56 2022 ] Mean test loss of 796 batches: 2.725510597229004. +[ Thu Sep 15 01:55:57 2022 ] Top1: 52.79% +[ Thu Sep 15 01:55:57 2022 ] Top5: 82.58% +[ Thu Sep 15 01:55:58 2022 ] Training epoch: 98 +[ Thu Sep 15 01:56:22 2022 ] Batch(28/243) done. Loss: 0.0046 lr:0.001000 network_time: 0.0261 +[ Thu Sep 15 01:57:34 2022 ] Batch(128/243) done. Loss: 0.0079 lr:0.001000 network_time: 0.0325 +[ Thu Sep 15 01:58:47 2022 ] Batch(228/243) done. Loss: 0.0101 lr:0.001000 network_time: 0.0305 +[ Thu Sep 15 01:58:58 2022 ] Eval epoch: 98 +[ Thu Sep 15 02:00:31 2022 ] Mean test loss of 796 batches: 2.7130801677703857. +[ Thu Sep 15 02:00:32 2022 ] Top1: 53.37% +[ Thu Sep 15 02:00:32 2022 ] Top5: 82.82% +[ Thu Sep 15 02:00:32 2022 ] Training epoch: 99 +[ Thu Sep 15 02:01:38 2022 ] Batch(85/243) done. Loss: 0.0145 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 02:02:51 2022 ] Batch(185/243) done. Loss: 0.0849 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 02:03:32 2022 ] Eval epoch: 99 +[ Thu Sep 15 02:05:06 2022 ] Mean test loss of 796 batches: 2.702904224395752. +[ Thu Sep 15 02:05:06 2022 ] Top1: 53.17% +[ Thu Sep 15 02:05:07 2022 ] Top5: 82.73% +[ Thu Sep 15 02:05:07 2022 ] Training epoch: 100 +[ Thu Sep 15 02:05:41 2022 ] Batch(42/243) done. Loss: 0.0102 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 02:06:54 2022 ] Batch(142/243) done. Loss: 0.0022 lr:0.001000 network_time: 0.0318 +[ Thu Sep 15 02:08:07 2022 ] Batch(242/243) done. Loss: 0.0068 lr:0.001000 network_time: 0.0308 +[ Thu Sep 15 02:08:07 2022 ] Eval epoch: 100 +[ Thu Sep 15 02:09:40 2022 ] Mean test loss of 796 batches: 2.74697208404541. +[ Thu Sep 15 02:09:41 2022 ] Top1: 53.47% +[ Thu Sep 15 02:09:42 2022 ] Top5: 82.99% +[ Thu Sep 15 02:09:42 2022 ] Training epoch: 101 +[ Thu Sep 15 02:10:58 2022 ] Batch(99/243) done. Loss: 0.0039 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 02:12:11 2022 ] Batch(199/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 02:12:42 2022 ] Eval epoch: 101 +[ Thu Sep 15 02:14:16 2022 ] Mean test loss of 796 batches: 2.763756275177002. +[ Thu Sep 15 02:14:16 2022 ] Top1: 53.25% +[ Thu Sep 15 02:14:17 2022 ] Top5: 82.73% +[ Thu Sep 15 02:14:17 2022 ] Training epoch: 102 +[ Thu Sep 15 02:15:02 2022 ] Batch(56/243) done. Loss: 0.0045 lr:0.000100 network_time: 0.0257 +[ Thu Sep 15 02:16:15 2022 ] Batch(156/243) done. Loss: 0.0088 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 02:17:18 2022 ] Eval epoch: 102 +[ Thu Sep 15 02:18:52 2022 ] Mean test loss of 796 batches: 2.7571358680725098. +[ Thu Sep 15 02:18:52 2022 ] Top1: 52.87% +[ Thu Sep 15 02:18:52 2022 ] Top5: 82.69% +[ Thu Sep 15 02:18:53 2022 ] Training epoch: 103 +[ Thu Sep 15 02:19:06 2022 ] Batch(13/243) done. Loss: 0.0021 lr:0.000100 network_time: 0.0316 +[ Thu Sep 15 02:20:19 2022 ] Batch(113/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 02:21:31 2022 ] Batch(213/243) done. Loss: 0.0023 lr:0.000100 network_time: 0.0336 +[ Thu Sep 15 02:21:53 2022 ] Eval epoch: 103 +[ Thu Sep 15 02:23:26 2022 ] Mean test loss of 796 batches: 2.692415952682495. +[ Thu Sep 15 02:23:26 2022 ] Top1: 52.04% +[ Thu Sep 15 02:23:27 2022 ] Top5: 82.16% +[ Thu Sep 15 02:23:27 2022 ] Training epoch: 104 +[ Thu Sep 15 02:24:21 2022 ] Batch(70/243) done. Loss: 0.0102 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 02:25:34 2022 ] Batch(170/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 02:26:27 2022 ] Eval epoch: 104 +[ Thu Sep 15 02:28:00 2022 ] Mean test loss of 796 batches: 2.7434446811676025. +[ Thu Sep 15 02:28:00 2022 ] Top1: 53.44% +[ Thu Sep 15 02:28:01 2022 ] Top5: 82.78% +[ Thu Sep 15 02:28:01 2022 ] Training epoch: 105 +[ Thu Sep 15 02:28:25 2022 ] Batch(27/243) done. Loss: 0.0211 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 02:29:38 2022 ] Batch(127/243) done. Loss: 0.0127 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 02:30:50 2022 ] Batch(227/243) done. Loss: 0.0139 lr:0.000100 network_time: 0.0337 +[ Thu Sep 15 02:31:02 2022 ] Eval epoch: 105 +[ Thu Sep 15 02:32:35 2022 ] Mean test loss of 796 batches: 2.795769691467285. +[ Thu Sep 15 02:32:35 2022 ] Top1: 52.07% +[ Thu Sep 15 02:32:36 2022 ] Top5: 82.01% +[ Thu Sep 15 02:32:36 2022 ] Training epoch: 106 +[ Thu Sep 15 02:33:41 2022 ] Batch(84/243) done. Loss: 0.0073 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 02:34:54 2022 ] Batch(184/243) done. Loss: 0.0051 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 02:35:37 2022 ] Eval epoch: 106 +[ Thu Sep 15 02:37:10 2022 ] Mean test loss of 796 batches: 2.712216377258301. +[ Thu Sep 15 02:37:11 2022 ] Top1: 53.32% +[ Thu Sep 15 02:37:11 2022 ] Top5: 82.78% +[ Thu Sep 15 02:37:11 2022 ] Training epoch: 107 +[ Thu Sep 15 02:37:45 2022 ] Batch(41/243) done. Loss: 0.0143 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 02:38:58 2022 ] Batch(141/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 02:40:11 2022 ] Batch(241/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 02:40:12 2022 ] Eval epoch: 107 +[ Thu Sep 15 02:41:45 2022 ] Mean test loss of 796 batches: 2.7313079833984375. +[ Thu Sep 15 02:41:46 2022 ] Top1: 52.23% +[ Thu Sep 15 02:41:47 2022 ] Top5: 82.21% +[ Thu Sep 15 02:41:47 2022 ] Training epoch: 108 +[ Thu Sep 15 02:43:02 2022 ] Batch(98/243) done. Loss: 0.0028 lr:0.000100 network_time: 0.0365 +[ Thu Sep 15 02:44:15 2022 ] Batch(198/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 02:44:48 2022 ] Eval epoch: 108 +[ Thu Sep 15 02:46:21 2022 ] Mean test loss of 796 batches: 2.706906318664551. +[ Thu Sep 15 02:46:22 2022 ] Top1: 53.04% +[ Thu Sep 15 02:46:22 2022 ] Top5: 82.57% +[ Thu Sep 15 02:46:22 2022 ] Training epoch: 109 +[ Thu Sep 15 02:47:06 2022 ] Batch(55/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0318 +[ Thu Sep 15 02:48:18 2022 ] Batch(155/243) done. Loss: 0.0065 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 02:49:22 2022 ] Eval epoch: 109 +[ Thu Sep 15 02:50:56 2022 ] Mean test loss of 796 batches: 2.666076898574829. +[ Thu Sep 15 02:50:56 2022 ] Top1: 53.15% +[ Thu Sep 15 02:50:56 2022 ] Top5: 82.74% +[ Thu Sep 15 02:50:57 2022 ] Training epoch: 110 +[ Thu Sep 15 02:51:09 2022 ] Batch(12/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 02:52:22 2022 ] Batch(112/243) done. Loss: 0.0100 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 02:53:35 2022 ] Batch(212/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 02:53:57 2022 ] Eval epoch: 110 +[ Thu Sep 15 02:55:30 2022 ] Mean test loss of 796 batches: 2.782313585281372. +[ Thu Sep 15 02:55:31 2022 ] Top1: 50.79% +[ Thu Sep 15 02:55:31 2022 ] Top5: 81.20% +[ Thu Sep 15 02:55:31 2022 ] Training epoch: 111 +[ Thu Sep 15 02:56:25 2022 ] Batch(69/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 02:57:38 2022 ] Batch(169/243) done. Loss: 0.0077 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 02:58:31 2022 ] Eval epoch: 111 +[ Thu Sep 15 03:00:05 2022 ] Mean test loss of 796 batches: 2.7520694732666016. +[ Thu Sep 15 03:00:06 2022 ] Top1: 52.88% +[ Thu Sep 15 03:00:06 2022 ] Top5: 82.42% +[ Thu Sep 15 03:00:06 2022 ] Training epoch: 112 +[ Thu Sep 15 03:00:29 2022 ] Batch(26/243) done. Loss: 0.0079 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 03:01:42 2022 ] Batch(126/243) done. Loss: 0.0070 lr:0.000100 network_time: 0.0292 +[ Thu Sep 15 03:02:54 2022 ] Batch(226/243) done. Loss: 0.0054 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 03:03:06 2022 ] Eval epoch: 112 +[ Thu Sep 15 03:04:39 2022 ] Mean test loss of 796 batches: 2.70778226852417. +[ Thu Sep 15 03:04:40 2022 ] Top1: 52.64% +[ Thu Sep 15 03:04:40 2022 ] Top5: 82.31% +[ Thu Sep 15 03:04:40 2022 ] Training epoch: 113 +[ Thu Sep 15 03:05:45 2022 ] Batch(83/243) done. Loss: 0.0080 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 03:06:57 2022 ] Batch(183/243) done. Loss: 0.0024 lr:0.000100 network_time: 0.0257 +[ Thu Sep 15 03:07:41 2022 ] Eval epoch: 113 +[ Thu Sep 15 03:09:14 2022 ] Mean test loss of 796 batches: 2.731163740158081. +[ Thu Sep 15 03:09:15 2022 ] Top1: 52.66% +[ Thu Sep 15 03:09:15 2022 ] Top5: 82.24% +[ Thu Sep 15 03:09:16 2022 ] Training epoch: 114 +[ Thu Sep 15 03:09:48 2022 ] Batch(40/243) done. Loss: 0.0099 lr:0.000100 network_time: 0.0255 +[ Thu Sep 15 03:11:01 2022 ] Batch(140/243) done. Loss: 0.0066 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 03:12:14 2022 ] Batch(240/243) done. Loss: 0.0070 lr:0.000100 network_time: 0.0321 +[ Thu Sep 15 03:12:16 2022 ] Eval epoch: 114 +[ Thu Sep 15 03:13:49 2022 ] Mean test loss of 796 batches: 2.6991231441497803. +[ Thu Sep 15 03:13:49 2022 ] Top1: 52.93% +[ Thu Sep 15 03:13:50 2022 ] Top5: 82.75% +[ Thu Sep 15 03:13:50 2022 ] Training epoch: 115 +[ Thu Sep 15 03:15:04 2022 ] Batch(97/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0321 +[ Thu Sep 15 03:16:17 2022 ] Batch(197/243) done. Loss: 0.0041 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 03:16:50 2022 ] Eval epoch: 115 +[ Thu Sep 15 03:18:23 2022 ] Mean test loss of 796 batches: 2.6701998710632324. +[ Thu Sep 15 03:18:24 2022 ] Top1: 53.25% +[ Thu Sep 15 03:18:24 2022 ] Top5: 82.79% +[ Thu Sep 15 03:18:24 2022 ] Training epoch: 116 +[ Thu Sep 15 03:19:07 2022 ] Batch(54/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:20:20 2022 ] Batch(154/243) done. Loss: 0.0053 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:21:24 2022 ] Eval epoch: 116 +[ Thu Sep 15 03:22:57 2022 ] Mean test loss of 796 batches: 2.6070761680603027. +[ Thu Sep 15 03:22:58 2022 ] Top1: 53.54% +[ Thu Sep 15 03:22:58 2022 ] Top5: 83.07% +[ Thu Sep 15 03:22:58 2022 ] Training epoch: 117 +[ Thu Sep 15 03:23:10 2022 ] Batch(11/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0337 +[ Thu Sep 15 03:24:23 2022 ] Batch(111/243) done. Loss: 0.0064 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 03:25:36 2022 ] Batch(211/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 03:25:59 2022 ] Eval epoch: 117 +[ Thu Sep 15 03:27:32 2022 ] Mean test loss of 796 batches: 2.7557594776153564. +[ Thu Sep 15 03:27:33 2022 ] Top1: 53.14% +[ Thu Sep 15 03:27:34 2022 ] Top5: 82.57% +[ Thu Sep 15 03:27:34 2022 ] Training epoch: 118 +[ Thu Sep 15 03:28:27 2022 ] Batch(68/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:29:40 2022 ] Batch(168/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 03:30:34 2022 ] Eval epoch: 118 +[ Thu Sep 15 03:32:08 2022 ] Mean test loss of 796 batches: 2.7875142097473145. +[ Thu Sep 15 03:32:08 2022 ] Top1: 51.65% +[ Thu Sep 15 03:32:09 2022 ] Top5: 81.81% +[ Thu Sep 15 03:32:09 2022 ] Training epoch: 119 +[ Thu Sep 15 03:32:31 2022 ] Batch(25/243) done. Loss: 0.0088 lr:0.000100 network_time: 0.0341 +[ Thu Sep 15 03:33:44 2022 ] Batch(125/243) done. Loss: 0.0112 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:34:57 2022 ] Batch(225/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0305 +[ Thu Sep 15 03:35:09 2022 ] Eval epoch: 119 +[ Thu Sep 15 03:36:42 2022 ] Mean test loss of 796 batches: 2.72087025642395. +[ Thu Sep 15 03:36:43 2022 ] Top1: 52.42% +[ Thu Sep 15 03:36:43 2022 ] Top5: 82.27% +[ Thu Sep 15 03:36:44 2022 ] Training epoch: 120 +[ Thu Sep 15 03:37:47 2022 ] Batch(82/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 03:39:00 2022 ] Batch(182/243) done. Loss: 0.0095 lr:0.000100 network_time: 0.0283 +[ Thu Sep 15 03:39:44 2022 ] Eval epoch: 120 +[ Thu Sep 15 03:41:17 2022 ] Mean test loss of 796 batches: 2.7935328483581543. +[ Thu Sep 15 03:41:17 2022 ] Top1: 53.19% +[ Thu Sep 15 03:41:17 2022 ] Top5: 82.61% +[ Thu Sep 15 03:41:18 2022 ] Training epoch: 121 +[ Thu Sep 15 03:41:50 2022 ] Batch(39/243) done. Loss: 0.0101 lr:0.000100 network_time: 0.0292 +[ Thu Sep 15 03:43:03 2022 ] Batch(139/243) done. Loss: 0.0053 lr:0.000100 network_time: 0.0309 +[ Thu Sep 15 03:44:16 2022 ] Batch(239/243) done. Loss: 0.0054 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 03:44:18 2022 ] Eval epoch: 121 +[ Thu Sep 15 03:45:51 2022 ] Mean test loss of 796 batches: 2.624255657196045. +[ Thu Sep 15 03:45:52 2022 ] Top1: 53.57% +[ Thu Sep 15 03:45:52 2022 ] Top5: 83.05% +[ Thu Sep 15 03:45:53 2022 ] Training epoch: 122 +[ Thu Sep 15 03:47:06 2022 ] Batch(96/243) done. Loss: 0.0058 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 03:48:19 2022 ] Batch(196/243) done. Loss: 0.0084 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 03:48:53 2022 ] Eval epoch: 122 +[ Thu Sep 15 03:50:26 2022 ] Mean test loss of 796 batches: 2.694089889526367. +[ Thu Sep 15 03:50:26 2022 ] Top1: 52.66% +[ Thu Sep 15 03:50:27 2022 ] Top5: 82.58% +[ Thu Sep 15 03:50:27 2022 ] Training epoch: 123 +[ Thu Sep 15 03:51:09 2022 ] Batch(53/243) done. Loss: 0.0045 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 03:52:22 2022 ] Batch(153/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 03:53:27 2022 ] Eval epoch: 123 +[ Thu Sep 15 03:55:01 2022 ] Mean test loss of 796 batches: 2.6755058765411377. +[ Thu Sep 15 03:55:01 2022 ] Top1: 52.82% +[ Thu Sep 15 03:55:01 2022 ] Top5: 82.64% +[ Thu Sep 15 03:55:02 2022 ] Training epoch: 124 +[ Thu Sep 15 03:55:13 2022 ] Batch(10/243) done. Loss: 0.0052 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 03:56:26 2022 ] Batch(110/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0298 +[ Thu Sep 15 03:57:39 2022 ] Batch(210/243) done. Loss: 0.0032 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:58:02 2022 ] Eval epoch: 124 +[ Thu Sep 15 03:59:35 2022 ] Mean test loss of 796 batches: 2.736020803451538. +[ Thu Sep 15 03:59:36 2022 ] Top1: 53.41% +[ Thu Sep 15 03:59:36 2022 ] Top5: 82.84% +[ Thu Sep 15 03:59:36 2022 ] Training epoch: 125 +[ Thu Sep 15 04:00:29 2022 ] Batch(67/243) done. Loss: 0.0056 lr:0.000100 network_time: 0.0287 +[ Thu Sep 15 04:01:42 2022 ] Batch(167/243) done. Loss: 0.0105 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 04:02:37 2022 ] Eval epoch: 125 +[ Thu Sep 15 04:04:10 2022 ] Mean test loss of 796 batches: 2.765360116958618. +[ Thu Sep 15 04:04:10 2022 ] Top1: 51.01% +[ Thu Sep 15 04:04:10 2022 ] Top5: 81.69% +[ Thu Sep 15 04:04:11 2022 ] Training epoch: 126 +[ Thu Sep 15 04:04:32 2022 ] Batch(24/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 04:05:45 2022 ] Batch(124/243) done. Loss: 0.0100 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 04:06:58 2022 ] Batch(224/243) done. Loss: 0.0066 lr:0.000100 network_time: 0.0310 +[ Thu Sep 15 04:07:11 2022 ] Eval epoch: 126 +[ Thu Sep 15 04:08:44 2022 ] Mean test loss of 796 batches: 2.742004156112671. +[ Thu Sep 15 04:08:44 2022 ] Top1: 53.03% +[ Thu Sep 15 04:08:45 2022 ] Top5: 82.69% +[ Thu Sep 15 04:08:45 2022 ] Training epoch: 127 +[ Thu Sep 15 04:09:48 2022 ] Batch(81/243) done. Loss: 0.0123 lr:0.000100 network_time: 0.0330 +[ Thu Sep 15 04:11:01 2022 ] Batch(181/243) done. Loss: 0.0086 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 04:11:45 2022 ] Eval epoch: 127 +[ Thu Sep 15 04:13:19 2022 ] Mean test loss of 796 batches: 2.7168538570404053. +[ Thu Sep 15 04:13:19 2022 ] Top1: 52.94% +[ Thu Sep 15 04:13:19 2022 ] Top5: 82.64% +[ Thu Sep 15 04:13:20 2022 ] Training epoch: 128 +[ Thu Sep 15 04:13:51 2022 ] Batch(38/243) done. Loss: 0.0030 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 04:15:04 2022 ] Batch(138/243) done. Loss: 0.0057 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 04:16:17 2022 ] Batch(238/243) done. Loss: 0.0104 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 04:16:20 2022 ] Eval epoch: 128 +[ Thu Sep 15 04:17:53 2022 ] Mean test loss of 796 batches: 2.6489810943603516. +[ Thu Sep 15 04:17:53 2022 ] Top1: 53.19% +[ Thu Sep 15 04:17:54 2022 ] Top5: 82.92% +[ Thu Sep 15 04:17:54 2022 ] Training epoch: 129 +[ Thu Sep 15 04:19:07 2022 ] Batch(95/243) done. Loss: 0.0094 lr:0.000100 network_time: 0.0305 +[ Thu Sep 15 04:20:19 2022 ] Batch(195/243) done. Loss: 0.0079 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 04:20:54 2022 ] Eval epoch: 129 +[ Thu Sep 15 04:22:28 2022 ] Mean test loss of 796 batches: 2.7015435695648193. +[ Thu Sep 15 04:22:28 2022 ] Top1: 51.93% +[ Thu Sep 15 04:22:29 2022 ] Top5: 82.12% +[ Thu Sep 15 04:22:29 2022 ] Training epoch: 130 +[ Thu Sep 15 04:23:11 2022 ] Batch(52/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 04:24:23 2022 ] Batch(152/243) done. Loss: 0.0051 lr:0.000100 network_time: 0.0332 +[ Thu Sep 15 04:25:29 2022 ] Eval epoch: 130 +[ Thu Sep 15 04:27:03 2022 ] Mean test loss of 796 batches: 2.6610214710235596. +[ Thu Sep 15 04:27:04 2022 ] Top1: 53.26% +[ Thu Sep 15 04:27:04 2022 ] Top5: 82.82% +[ Thu Sep 15 04:27:04 2022 ] Training epoch: 131 +[ Thu Sep 15 04:27:15 2022 ] Batch(9/243) done. Loss: 0.0044 lr:0.000100 network_time: 0.0285 +[ Thu Sep 15 04:28:28 2022 ] Batch(109/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 04:29:41 2022 ] Batch(209/243) done. Loss: 0.0111 lr:0.000100 network_time: 0.0398 +[ Thu Sep 15 04:30:05 2022 ] Eval epoch: 131 +[ Thu Sep 15 04:31:39 2022 ] Mean test loss of 796 batches: 2.6898159980773926. +[ Thu Sep 15 04:31:39 2022 ] Top1: 53.02% +[ Thu Sep 15 04:31:40 2022 ] Top5: 82.60% +[ Thu Sep 15 04:31:40 2022 ] Training epoch: 132 +[ Thu Sep 15 04:32:32 2022 ] Batch(66/243) done. Loss: 0.0019 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 04:33:45 2022 ] Batch(166/243) done. Loss: 0.0094 lr:0.000100 network_time: 0.0226 +[ Thu Sep 15 04:34:40 2022 ] Eval epoch: 132 +[ Thu Sep 15 04:36:14 2022 ] Mean test loss of 796 batches: 2.6874382495880127. +[ Thu Sep 15 04:36:14 2022 ] Top1: 52.61% +[ Thu Sep 15 04:36:15 2022 ] Top5: 82.28% +[ Thu Sep 15 04:36:15 2022 ] Training epoch: 133 +[ Thu Sep 15 04:36:35 2022 ] Batch(23/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0257 +[ Thu Sep 15 04:37:48 2022 ] Batch(123/243) done. Loss: 0.0057 lr:0.000100 network_time: 0.0328 +[ Thu Sep 15 04:39:01 2022 ] Batch(223/243) done. Loss: 0.0105 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 04:39:15 2022 ] Eval epoch: 133 +[ Thu Sep 15 04:40:48 2022 ] Mean test loss of 796 batches: 2.885190725326538. +[ Thu Sep 15 04:40:48 2022 ] Top1: 49.37% +[ Thu Sep 15 04:40:49 2022 ] Top5: 80.21% +[ Thu Sep 15 04:40:49 2022 ] Training epoch: 134 +[ Thu Sep 15 04:41:51 2022 ] Batch(80/243) done. Loss: 0.0058 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 04:43:04 2022 ] Batch(180/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0283 +[ Thu Sep 15 04:43:49 2022 ] Eval epoch: 134 +[ Thu Sep 15 04:45:22 2022 ] Mean test loss of 796 batches: 2.6978790760040283. +[ Thu Sep 15 04:45:23 2022 ] Top1: 52.91% +[ Thu Sep 15 04:45:23 2022 ] Top5: 82.54% +[ Thu Sep 15 04:45:23 2022 ] Training epoch: 135 +[ Thu Sep 15 04:45:54 2022 ] Batch(37/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 04:47:07 2022 ] Batch(137/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 04:48:20 2022 ] Batch(237/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 04:48:24 2022 ] Eval epoch: 135 +[ Thu Sep 15 04:49:58 2022 ] Mean test loss of 796 batches: 2.644139289855957. +[ Thu Sep 15 04:49:58 2022 ] Top1: 53.61% +[ Thu Sep 15 04:49:58 2022 ] Top5: 83.10% +[ Thu Sep 15 04:49:58 2022 ] Training epoch: 136 +[ Thu Sep 15 04:51:11 2022 ] Batch(94/243) done. Loss: 0.0077 lr:0.000100 network_time: 0.0305 +[ Thu Sep 15 04:52:24 2022 ] Batch(194/243) done. Loss: 0.0081 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:52:59 2022 ] Eval epoch: 136 +[ Thu Sep 15 04:54:32 2022 ] Mean test loss of 796 batches: 2.7351062297821045. +[ Thu Sep 15 04:54:33 2022 ] Top1: 53.33% +[ Thu Sep 15 04:54:34 2022 ] Top5: 82.60% +[ Thu Sep 15 04:54:34 2022 ] Training epoch: 137 +[ Thu Sep 15 04:55:15 2022 ] Batch(51/243) done. Loss: 0.0118 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:56:27 2022 ] Batch(151/243) done. Loss: 0.0092 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 04:57:34 2022 ] Eval epoch: 137 +[ Thu Sep 15 04:59:07 2022 ] Mean test loss of 796 batches: 2.6785876750946045. +[ Thu Sep 15 04:59:07 2022 ] Top1: 53.81% +[ Thu Sep 15 04:59:08 2022 ] Top5: 83.08% +[ Thu Sep 15 04:59:08 2022 ] Training epoch: 138 +[ Thu Sep 15 04:59:18 2022 ] Batch(8/243) done. Loss: 0.0041 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 05:00:30 2022 ] Batch(108/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 05:01:43 2022 ] Batch(208/243) done. Loss: 0.0105 lr:0.000100 network_time: 0.0305 +[ Thu Sep 15 05:02:08 2022 ] Eval epoch: 138 +[ Thu Sep 15 05:03:42 2022 ] Mean test loss of 796 batches: 2.7690837383270264. +[ Thu Sep 15 05:03:42 2022 ] Top1: 52.37% +[ Thu Sep 15 05:03:43 2022 ] Top5: 82.15% +[ Thu Sep 15 05:03:43 2022 ] Training epoch: 139 +[ Thu Sep 15 05:04:34 2022 ] Batch(65/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0451 +[ Thu Sep 15 05:05:47 2022 ] Batch(165/243) done. Loss: 0.0025 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 05:06:43 2022 ] Eval epoch: 139 +[ Thu Sep 15 05:08:16 2022 ] Mean test loss of 796 batches: 2.723361015319824. +[ Thu Sep 15 05:08:16 2022 ] Top1: 52.81% +[ Thu Sep 15 05:08:17 2022 ] Top5: 82.44% +[ Thu Sep 15 05:08:17 2022 ] Training epoch: 140 +[ Thu Sep 15 05:08:36 2022 ] Batch(22/243) done. Loss: 0.0100 lr:0.000100 network_time: 0.0315 +[ Thu Sep 15 05:09:49 2022 ] Batch(122/243) done. Loss: 0.0170 lr:0.000100 network_time: 0.0297 +[ Thu Sep 15 05:11:02 2022 ] Batch(222/243) done. Loss: 0.0133 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 05:11:17 2022 ] Eval epoch: 140 +[ Thu Sep 15 05:12:49 2022 ] Mean test loss of 796 batches: 2.719053030014038. +[ Thu Sep 15 05:12:50 2022 ] Top1: 53.15% +[ Thu Sep 15 05:12:50 2022 ] Top5: 82.74% diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_motion_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2222a051888e5b9fd1cc87bf8af74c9e10cae093 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_bone_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_bone.yaml +device: +- 0 +- 1 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_bone_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_bone_xsub diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..79e2dc63aa4d3c955567191786f16e503693dcd8 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1052d9d03789bd0448c62585aae1cc2edbab1c92ceaf3e29ecfb558c94dd972 +size 29946137 diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5e79d375266789836b1e22fff4c5ada506ddb32 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/log.txt @@ -0,0 +1,1043 @@ +[ Wed Sep 14 18:31:34 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_bone_xsub', 'model_saved_name': './save_models/ntu120_bone_xsub', 'Experiment_name': 'ntu120_bone_xsub', 'config': './config/ntu120_xsub/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [0, 1], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 18:31:34 2022 ] Training epoch: 1 +[ Wed Sep 14 18:32:53 2022 ] Batch(99/243) done. Loss: 4.0333 lr:0.100000 network_time: 0.0258 +[ Wed Sep 14 18:34:05 2022 ] Batch(199/243) done. Loss: 2.7616 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 18:34:37 2022 ] Eval epoch: 1 +[ Wed Sep 14 18:36:11 2022 ] Mean test loss of 796 batches: 5.216921806335449. +[ Wed Sep 14 18:36:12 2022 ] Top1: 8.18% +[ Wed Sep 14 18:36:12 2022 ] Top5: 22.12% +[ Wed Sep 14 18:36:12 2022 ] Training epoch: 2 +[ Wed Sep 14 18:36:57 2022 ] Batch(56/243) done. Loss: 2.9609 lr:0.100000 network_time: 0.0478 +[ Wed Sep 14 18:38:09 2022 ] Batch(156/243) done. Loss: 2.6439 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 18:39:12 2022 ] Eval epoch: 2 +[ Wed Sep 14 18:40:46 2022 ] Mean test loss of 796 batches: 4.073986053466797. +[ Wed Sep 14 18:40:46 2022 ] Top1: 16.06% +[ Wed Sep 14 18:40:46 2022 ] Top5: 34.36% +[ Wed Sep 14 18:40:47 2022 ] Training epoch: 3 +[ Wed Sep 14 18:41:00 2022 ] Batch(13/243) done. Loss: 1.7683 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 18:42:13 2022 ] Batch(113/243) done. Loss: 1.9985 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 18:43:25 2022 ] Batch(213/243) done. Loss: 2.0935 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 18:43:46 2022 ] Eval epoch: 3 +[ Wed Sep 14 18:45:21 2022 ] Mean test loss of 796 batches: 3.8530473709106445. +[ Wed Sep 14 18:45:21 2022 ] Top1: 17.67% +[ Wed Sep 14 18:45:21 2022 ] Top5: 40.53% +[ Wed Sep 14 18:45:22 2022 ] Training epoch: 4 +[ Wed Sep 14 18:46:16 2022 ] Batch(70/243) done. Loss: 1.3666 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 18:47:28 2022 ] Batch(170/243) done. Loss: 1.5859 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 18:48:21 2022 ] Eval epoch: 4 +[ Wed Sep 14 18:49:55 2022 ] Mean test loss of 796 batches: 3.4396579265594482. +[ Wed Sep 14 18:49:56 2022 ] Top1: 22.98% +[ Wed Sep 14 18:49:56 2022 ] Top5: 51.04% +[ Wed Sep 14 18:49:56 2022 ] Training epoch: 5 +[ Wed Sep 14 18:50:20 2022 ] Batch(27/243) done. Loss: 1.4973 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 18:51:32 2022 ] Batch(127/243) done. Loss: 1.3234 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 18:52:45 2022 ] Batch(227/243) done. Loss: 1.5324 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 18:52:56 2022 ] Eval epoch: 5 +[ Wed Sep 14 18:54:30 2022 ] Mean test loss of 796 batches: 3.1425249576568604. +[ Wed Sep 14 18:54:30 2022 ] Top1: 26.86% +[ Wed Sep 14 18:54:31 2022 ] Top5: 56.16% +[ Wed Sep 14 18:54:31 2022 ] Training epoch: 6 +[ Wed Sep 14 18:55:35 2022 ] Batch(84/243) done. Loss: 1.4138 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 18:56:48 2022 ] Batch(184/243) done. Loss: 1.0573 lr:0.100000 network_time: 0.0303 +[ Wed Sep 14 18:57:30 2022 ] Eval epoch: 6 +[ Wed Sep 14 18:59:05 2022 ] Mean test loss of 796 batches: 3.0790634155273438. +[ Wed Sep 14 18:59:05 2022 ] Top1: 29.51% +[ Wed Sep 14 18:59:06 2022 ] Top5: 62.24% +[ Wed Sep 14 18:59:06 2022 ] Training epoch: 7 +[ Wed Sep 14 18:59:39 2022 ] Batch(41/243) done. Loss: 1.3634 lr:0.100000 network_time: 0.0323 +[ Wed Sep 14 19:00:52 2022 ] Batch(141/243) done. Loss: 0.9326 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 19:02:05 2022 ] Batch(241/243) done. Loss: 1.1295 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 19:02:05 2022 ] Eval epoch: 7 +[ Wed Sep 14 19:03:39 2022 ] Mean test loss of 796 batches: 2.927468776702881. +[ Wed Sep 14 19:03:40 2022 ] Top1: 30.35% +[ Wed Sep 14 19:03:40 2022 ] Top5: 64.28% +[ Wed Sep 14 19:03:40 2022 ] Training epoch: 8 +[ Wed Sep 14 19:04:55 2022 ] Batch(98/243) done. Loss: 0.7392 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:06:07 2022 ] Batch(198/243) done. Loss: 0.8407 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:06:40 2022 ] Eval epoch: 8 +[ Wed Sep 14 19:08:14 2022 ] Mean test loss of 796 batches: 2.743701457977295. +[ Wed Sep 14 19:08:14 2022 ] Top1: 33.74% +[ Wed Sep 14 19:08:15 2022 ] Top5: 66.94% +[ Wed Sep 14 19:08:15 2022 ] Training epoch: 9 +[ Wed Sep 14 19:08:58 2022 ] Batch(55/243) done. Loss: 1.3084 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 19:10:11 2022 ] Batch(155/243) done. Loss: 1.2042 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:11:14 2022 ] Eval epoch: 9 +[ Wed Sep 14 19:12:48 2022 ] Mean test loss of 796 batches: 3.0599684715270996. +[ Wed Sep 14 19:12:48 2022 ] Top1: 29.85% +[ Wed Sep 14 19:12:49 2022 ] Top5: 60.50% +[ Wed Sep 14 19:12:49 2022 ] Training epoch: 10 +[ Wed Sep 14 19:13:01 2022 ] Batch(12/243) done. Loss: 1.1141 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:14:14 2022 ] Batch(112/243) done. Loss: 1.2123 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 19:15:26 2022 ] Batch(212/243) done. Loss: 1.2073 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:15:48 2022 ] Eval epoch: 10 +[ Wed Sep 14 19:17:23 2022 ] Mean test loss of 796 batches: 2.714928388595581. +[ Wed Sep 14 19:17:23 2022 ] Top1: 36.63% +[ Wed Sep 14 19:17:24 2022 ] Top5: 69.90% +[ Wed Sep 14 19:17:24 2022 ] Training epoch: 11 +[ Wed Sep 14 19:18:17 2022 ] Batch(69/243) done. Loss: 1.0347 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 19:19:30 2022 ] Batch(169/243) done. Loss: 0.8520 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:20:23 2022 ] Eval epoch: 11 +[ Wed Sep 14 19:21:58 2022 ] Mean test loss of 796 batches: 2.830739736557007. +[ Wed Sep 14 19:21:58 2022 ] Top1: 32.54% +[ Wed Sep 14 19:21:59 2022 ] Top5: 67.73% +[ Wed Sep 14 19:21:59 2022 ] Training epoch: 12 +[ Wed Sep 14 19:22:21 2022 ] Batch(26/243) done. Loss: 0.6660 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 19:23:34 2022 ] Batch(126/243) done. Loss: 0.9330 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 19:24:46 2022 ] Batch(226/243) done. Loss: 1.0268 lr:0.100000 network_time: 0.0297 +[ Wed Sep 14 19:24:58 2022 ] Eval epoch: 12 +[ Wed Sep 14 19:26:32 2022 ] Mean test loss of 796 batches: 3.2985918521881104. +[ Wed Sep 14 19:26:33 2022 ] Top1: 29.22% +[ Wed Sep 14 19:26:33 2022 ] Top5: 63.29% +[ Wed Sep 14 19:26:33 2022 ] Training epoch: 13 +[ Wed Sep 14 19:27:37 2022 ] Batch(83/243) done. Loss: 0.7545 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:28:50 2022 ] Batch(183/243) done. Loss: 0.6929 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:29:33 2022 ] Eval epoch: 13 +[ Wed Sep 14 19:31:08 2022 ] Mean test loss of 796 batches: 2.562110424041748. +[ Wed Sep 14 19:31:08 2022 ] Top1: 38.93% +[ Wed Sep 14 19:31:09 2022 ] Top5: 72.60% +[ Wed Sep 14 19:31:09 2022 ] Training epoch: 14 +[ Wed Sep 14 19:31:42 2022 ] Batch(40/243) done. Loss: 0.5393 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 19:32:54 2022 ] Batch(140/243) done. Loss: 0.9207 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:34:07 2022 ] Batch(240/243) done. Loss: 0.8997 lr:0.100000 network_time: 0.0302 +[ Wed Sep 14 19:34:09 2022 ] Eval epoch: 14 +[ Wed Sep 14 19:35:43 2022 ] Mean test loss of 796 batches: 2.663928508758545. +[ Wed Sep 14 19:35:43 2022 ] Top1: 37.57% +[ Wed Sep 14 19:35:44 2022 ] Top5: 71.04% +[ Wed Sep 14 19:35:44 2022 ] Training epoch: 15 +[ Wed Sep 14 19:36:58 2022 ] Batch(97/243) done. Loss: 0.6897 lr:0.100000 network_time: 0.0320 +[ Wed Sep 14 19:38:10 2022 ] Batch(197/243) done. Loss: 0.6975 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:38:43 2022 ] Eval epoch: 15 +[ Wed Sep 14 19:40:17 2022 ] Mean test loss of 796 batches: 2.4152395725250244. +[ Wed Sep 14 19:40:18 2022 ] Top1: 40.93% +[ Wed Sep 14 19:40:18 2022 ] Top5: 75.42% +[ Wed Sep 14 19:40:18 2022 ] Training epoch: 16 +[ Wed Sep 14 19:41:01 2022 ] Batch(54/243) done. Loss: 0.3969 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:42:14 2022 ] Batch(154/243) done. Loss: 0.7158 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 19:43:18 2022 ] Eval epoch: 16 +[ Wed Sep 14 19:44:52 2022 ] Mean test loss of 796 batches: 2.5785508155822754. +[ Wed Sep 14 19:44:53 2022 ] Top1: 38.90% +[ Wed Sep 14 19:44:53 2022 ] Top5: 72.23% +[ Wed Sep 14 19:44:53 2022 ] Training epoch: 17 +[ Wed Sep 14 19:45:04 2022 ] Batch(11/243) done. Loss: 0.7059 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 19:46:17 2022 ] Batch(111/243) done. Loss: 0.7283 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 19:47:30 2022 ] Batch(211/243) done. Loss: 0.9291 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:47:52 2022 ] Eval epoch: 17 +[ Wed Sep 14 19:49:26 2022 ] Mean test loss of 796 batches: 2.557190179824829. +[ Wed Sep 14 19:49:27 2022 ] Top1: 40.76% +[ Wed Sep 14 19:49:27 2022 ] Top5: 74.72% +[ Wed Sep 14 19:49:27 2022 ] Training epoch: 18 +[ Wed Sep 14 19:50:20 2022 ] Batch(68/243) done. Loss: 0.5587 lr:0.100000 network_time: 0.0295 +[ Wed Sep 14 19:51:32 2022 ] Batch(168/243) done. Loss: 0.3922 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 19:52:26 2022 ] Eval epoch: 18 +[ Wed Sep 14 19:54:00 2022 ] Mean test loss of 796 batches: 3.573277235031128. +[ Wed Sep 14 19:54:01 2022 ] Top1: 33.62% +[ Wed Sep 14 19:54:01 2022 ] Top5: 67.25% +[ Wed Sep 14 19:54:01 2022 ] Training epoch: 19 +[ Wed Sep 14 19:54:23 2022 ] Batch(25/243) done. Loss: 0.6683 lr:0.100000 network_time: 0.0347 +[ Wed Sep 14 19:55:35 2022 ] Batch(125/243) done. Loss: 0.7018 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 19:56:48 2022 ] Batch(225/243) done. Loss: 0.6643 lr:0.100000 network_time: 0.0296 +[ Wed Sep 14 19:57:01 2022 ] Eval epoch: 19 +[ Wed Sep 14 19:58:35 2022 ] Mean test loss of 796 batches: 2.8546314239501953. +[ Wed Sep 14 19:58:35 2022 ] Top1: 39.64% +[ Wed Sep 14 19:58:35 2022 ] Top5: 72.18% +[ Wed Sep 14 19:58:36 2022 ] Training epoch: 20 +[ Wed Sep 14 19:59:39 2022 ] Batch(82/243) done. Loss: 0.4966 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 20:00:51 2022 ] Batch(182/243) done. Loss: 0.4008 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 20:01:35 2022 ] Eval epoch: 20 +[ Wed Sep 14 20:03:09 2022 ] Mean test loss of 796 batches: 2.6202304363250732. +[ Wed Sep 14 20:03:10 2022 ] Top1: 40.66% +[ Wed Sep 14 20:03:10 2022 ] Top5: 74.78% +[ Wed Sep 14 20:03:10 2022 ] Training epoch: 21 +[ Wed Sep 14 20:03:42 2022 ] Batch(39/243) done. Loss: 0.3239 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 20:04:55 2022 ] Batch(139/243) done. Loss: 0.4198 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 20:06:07 2022 ] Batch(239/243) done. Loss: 0.7220 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 20:06:09 2022 ] Eval epoch: 21 +[ Wed Sep 14 20:07:44 2022 ] Mean test loss of 796 batches: 2.5382893085479736. +[ Wed Sep 14 20:07:44 2022 ] Top1: 41.41% +[ Wed Sep 14 20:07:45 2022 ] Top5: 75.24% +[ Wed Sep 14 20:07:45 2022 ] Training epoch: 22 +[ Wed Sep 14 20:08:58 2022 ] Batch(96/243) done. Loss: 0.4248 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:10:11 2022 ] Batch(196/243) done. Loss: 0.4894 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 20:10:44 2022 ] Eval epoch: 22 +[ Wed Sep 14 20:12:19 2022 ] Mean test loss of 796 batches: 2.62750244140625. +[ Wed Sep 14 20:12:19 2022 ] Top1: 42.49% +[ Wed Sep 14 20:12:20 2022 ] Top5: 75.99% +[ Wed Sep 14 20:12:20 2022 ] Training epoch: 23 +[ Wed Sep 14 20:13:02 2022 ] Batch(53/243) done. Loss: 0.3055 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 20:14:15 2022 ] Batch(153/243) done. Loss: 0.6483 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 20:15:19 2022 ] Eval epoch: 23 +[ Wed Sep 14 20:16:55 2022 ] Mean test loss of 796 batches: 2.8056583404541016. +[ Wed Sep 14 20:16:55 2022 ] Top1: 39.39% +[ Wed Sep 14 20:16:56 2022 ] Top5: 72.92% +[ Wed Sep 14 20:16:56 2022 ] Training epoch: 24 +[ Wed Sep 14 20:17:06 2022 ] Batch(10/243) done. Loss: 0.4323 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 20:18:19 2022 ] Batch(110/243) done. Loss: 0.8153 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:19:32 2022 ] Batch(210/243) done. Loss: 0.4780 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 20:19:55 2022 ] Eval epoch: 24 +[ Wed Sep 14 20:21:30 2022 ] Mean test loss of 796 batches: 2.699596405029297. +[ Wed Sep 14 20:21:30 2022 ] Top1: 42.59% +[ Wed Sep 14 20:21:31 2022 ] Top5: 74.85% +[ Wed Sep 14 20:21:31 2022 ] Training epoch: 25 +[ Wed Sep 14 20:22:23 2022 ] Batch(67/243) done. Loss: 0.5537 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 20:23:36 2022 ] Batch(167/243) done. Loss: 0.4563 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 20:24:30 2022 ] Eval epoch: 25 +[ Wed Sep 14 20:26:04 2022 ] Mean test loss of 796 batches: 2.6026360988616943. +[ Wed Sep 14 20:26:05 2022 ] Top1: 40.69% +[ Wed Sep 14 20:26:06 2022 ] Top5: 74.59% +[ Wed Sep 14 20:26:06 2022 ] Training epoch: 26 +[ Wed Sep 14 20:26:27 2022 ] Batch(24/243) done. Loss: 0.4626 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 20:27:39 2022 ] Batch(124/243) done. Loss: 0.4110 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 20:28:52 2022 ] Batch(224/243) done. Loss: 0.6452 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 20:29:05 2022 ] Eval epoch: 26 +[ Wed Sep 14 20:30:39 2022 ] Mean test loss of 796 batches: 2.294908046722412. +[ Wed Sep 14 20:30:40 2022 ] Top1: 45.39% +[ Wed Sep 14 20:30:40 2022 ] Top5: 78.63% +[ Wed Sep 14 20:30:40 2022 ] Training epoch: 27 +[ Wed Sep 14 20:31:42 2022 ] Batch(81/243) done. Loss: 0.5657 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 20:32:55 2022 ] Batch(181/243) done. Loss: 0.4664 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 20:33:39 2022 ] Eval epoch: 27 +[ Wed Sep 14 20:35:14 2022 ] Mean test loss of 796 batches: 2.6094589233398438. +[ Wed Sep 14 20:35:14 2022 ] Top1: 39.99% +[ Wed Sep 14 20:35:14 2022 ] Top5: 73.88% +[ Wed Sep 14 20:35:15 2022 ] Training epoch: 28 +[ Wed Sep 14 20:35:45 2022 ] Batch(38/243) done. Loss: 0.2889 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 20:36:58 2022 ] Batch(138/243) done. Loss: 0.5320 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:38:11 2022 ] Batch(238/243) done. Loss: 0.4978 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 20:38:14 2022 ] Eval epoch: 28 +[ Wed Sep 14 20:39:48 2022 ] Mean test loss of 796 batches: 2.759406089782715. +[ Wed Sep 14 20:39:48 2022 ] Top1: 40.93% +[ Wed Sep 14 20:39:49 2022 ] Top5: 73.80% +[ Wed Sep 14 20:39:49 2022 ] Training epoch: 29 +[ Wed Sep 14 20:41:01 2022 ] Batch(95/243) done. Loss: 0.5033 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 20:42:14 2022 ] Batch(195/243) done. Loss: 0.6511 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 20:42:48 2022 ] Eval epoch: 29 +[ Wed Sep 14 20:44:22 2022 ] Mean test loss of 796 batches: 2.587949275970459. +[ Wed Sep 14 20:44:23 2022 ] Top1: 42.27% +[ Wed Sep 14 20:44:24 2022 ] Top5: 75.12% +[ Wed Sep 14 20:44:24 2022 ] Training epoch: 30 +[ Wed Sep 14 20:45:05 2022 ] Batch(52/243) done. Loss: 0.2909 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 20:46:17 2022 ] Batch(152/243) done. Loss: 0.3604 lr:0.100000 network_time: 0.0255 +[ Wed Sep 14 20:47:23 2022 ] Eval epoch: 30 +[ Wed Sep 14 20:48:57 2022 ] Mean test loss of 796 batches: 2.8285579681396484. +[ Wed Sep 14 20:48:57 2022 ] Top1: 42.68% +[ Wed Sep 14 20:48:58 2022 ] Top5: 76.09% +[ Wed Sep 14 20:48:58 2022 ] Training epoch: 31 +[ Wed Sep 14 20:49:08 2022 ] Batch(9/243) done. Loss: 0.3543 lr:0.100000 network_time: 0.0257 +[ Wed Sep 14 20:50:20 2022 ] Batch(109/243) done. Loss: 0.2743 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 20:51:33 2022 ] Batch(209/243) done. Loss: 0.5345 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 20:51:57 2022 ] Eval epoch: 31 +[ Wed Sep 14 20:53:31 2022 ] Mean test loss of 796 batches: 2.879929542541504. +[ Wed Sep 14 20:53:32 2022 ] Top1: 41.34% +[ Wed Sep 14 20:53:32 2022 ] Top5: 74.41% +[ Wed Sep 14 20:53:32 2022 ] Training epoch: 32 +[ Wed Sep 14 20:54:24 2022 ] Batch(66/243) done. Loss: 0.4370 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 20:55:37 2022 ] Batch(166/243) done. Loss: 0.5667 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 20:56:32 2022 ] Eval epoch: 32 +[ Wed Sep 14 20:58:06 2022 ] Mean test loss of 796 batches: 2.7939627170562744. +[ Wed Sep 14 20:58:06 2022 ] Top1: 41.09% +[ Wed Sep 14 20:58:07 2022 ] Top5: 74.35% +[ Wed Sep 14 20:58:07 2022 ] Training epoch: 33 +[ Wed Sep 14 20:58:27 2022 ] Batch(23/243) done. Loss: 0.4639 lr:0.100000 network_time: 0.0295 +[ Wed Sep 14 20:59:40 2022 ] Batch(123/243) done. Loss: 0.6343 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 21:00:52 2022 ] Batch(223/243) done. Loss: 0.6872 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 21:01:06 2022 ] Eval epoch: 33 +[ Wed Sep 14 21:02:41 2022 ] Mean test loss of 796 batches: 2.557569980621338. +[ Wed Sep 14 21:02:41 2022 ] Top1: 43.26% +[ Wed Sep 14 21:02:42 2022 ] Top5: 75.92% +[ Wed Sep 14 21:02:42 2022 ] Training epoch: 34 +[ Wed Sep 14 21:03:43 2022 ] Batch(80/243) done. Loss: 0.2682 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 21:04:56 2022 ] Batch(180/243) done. Loss: 0.3253 lr:0.100000 network_time: 0.0424 +[ Wed Sep 14 21:05:41 2022 ] Eval epoch: 34 +[ Wed Sep 14 21:07:15 2022 ] Mean test loss of 796 batches: 2.8215174674987793. +[ Wed Sep 14 21:07:16 2022 ] Top1: 44.03% +[ Wed Sep 14 21:07:16 2022 ] Top5: 75.30% +[ Wed Sep 14 21:07:17 2022 ] Training epoch: 35 +[ Wed Sep 14 21:07:47 2022 ] Batch(37/243) done. Loss: 0.3251 lr:0.100000 network_time: 0.0257 +[ Wed Sep 14 21:08:59 2022 ] Batch(137/243) done. Loss: 0.3628 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 21:10:12 2022 ] Batch(237/243) done. Loss: 0.4573 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 21:10:16 2022 ] Eval epoch: 35 +[ Wed Sep 14 21:11:50 2022 ] Mean test loss of 796 batches: 2.5117599964141846. +[ Wed Sep 14 21:11:51 2022 ] Top1: 46.17% +[ Wed Sep 14 21:11:51 2022 ] Top5: 78.81% +[ Wed Sep 14 21:11:51 2022 ] Training epoch: 36 +[ Wed Sep 14 21:13:03 2022 ] Batch(94/243) done. Loss: 0.4862 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 21:14:16 2022 ] Batch(194/243) done. Loss: 0.3950 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:14:51 2022 ] Eval epoch: 36 +[ Wed Sep 14 21:16:25 2022 ] Mean test loss of 796 batches: 2.567964553833008. +[ Wed Sep 14 21:16:25 2022 ] Top1: 45.67% +[ Wed Sep 14 21:16:26 2022 ] Top5: 77.48% +[ Wed Sep 14 21:16:26 2022 ] Training epoch: 37 +[ Wed Sep 14 21:17:06 2022 ] Batch(51/243) done. Loss: 0.2134 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 21:18:19 2022 ] Batch(151/243) done. Loss: 0.3702 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 21:19:25 2022 ] Eval epoch: 37 +[ Wed Sep 14 21:21:00 2022 ] Mean test loss of 796 batches: 2.9781603813171387. +[ Wed Sep 14 21:21:00 2022 ] Top1: 41.35% +[ Wed Sep 14 21:21:01 2022 ] Top5: 74.87% +[ Wed Sep 14 21:21:01 2022 ] Training epoch: 38 +[ Wed Sep 14 21:21:10 2022 ] Batch(8/243) done. Loss: 0.4388 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 21:22:22 2022 ] Batch(108/243) done. Loss: 0.3741 lr:0.100000 network_time: 0.0425 +[ Wed Sep 14 21:23:35 2022 ] Batch(208/243) done. Loss: 0.3047 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 21:24:00 2022 ] Eval epoch: 38 +[ Wed Sep 14 21:25:34 2022 ] Mean test loss of 796 batches: 2.579361915588379. +[ Wed Sep 14 21:25:35 2022 ] Top1: 46.73% +[ Wed Sep 14 21:25:35 2022 ] Top5: 77.56% +[ Wed Sep 14 21:25:35 2022 ] Training epoch: 39 +[ Wed Sep 14 21:26:25 2022 ] Batch(65/243) done. Loss: 0.5252 lr:0.100000 network_time: 0.0289 +[ Wed Sep 14 21:27:38 2022 ] Batch(165/243) done. Loss: 0.3791 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 21:28:34 2022 ] Eval epoch: 39 +[ Wed Sep 14 21:30:08 2022 ] Mean test loss of 796 batches: 3.3219752311706543. +[ Wed Sep 14 21:30:09 2022 ] Top1: 37.83% +[ Wed Sep 14 21:30:10 2022 ] Top5: 71.83% +[ Wed Sep 14 21:30:10 2022 ] Training epoch: 40 +[ Wed Sep 14 21:30:29 2022 ] Batch(22/243) done. Loss: 0.2710 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 21:31:42 2022 ] Batch(122/243) done. Loss: 0.2538 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:32:54 2022 ] Batch(222/243) done. Loss: 0.2920 lr:0.100000 network_time: 0.0306 +[ Wed Sep 14 21:33:09 2022 ] Eval epoch: 40 +[ Wed Sep 14 21:34:43 2022 ] Mean test loss of 796 batches: 3.5191311836242676. +[ Wed Sep 14 21:34:44 2022 ] Top1: 38.85% +[ Wed Sep 14 21:34:44 2022 ] Top5: 71.95% +[ Wed Sep 14 21:34:45 2022 ] Training epoch: 41 +[ Wed Sep 14 21:35:45 2022 ] Batch(79/243) done. Loss: 0.3196 lr:0.100000 network_time: 0.0346 +[ Wed Sep 14 21:36:58 2022 ] Batch(179/243) done. Loss: 0.3180 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 21:37:44 2022 ] Eval epoch: 41 +[ Wed Sep 14 21:39:19 2022 ] Mean test loss of 796 batches: 2.699187994003296. +[ Wed Sep 14 21:39:19 2022 ] Top1: 44.14% +[ Wed Sep 14 21:39:20 2022 ] Top5: 76.58% +[ Wed Sep 14 21:39:20 2022 ] Training epoch: 42 +[ Wed Sep 14 21:39:49 2022 ] Batch(36/243) done. Loss: 0.3361 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:41:02 2022 ] Batch(136/243) done. Loss: 0.3384 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:42:15 2022 ] Batch(236/243) done. Loss: 0.3603 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 21:42:19 2022 ] Eval epoch: 42 +[ Wed Sep 14 21:43:54 2022 ] Mean test loss of 796 batches: 2.974973678588867. +[ Wed Sep 14 21:43:54 2022 ] Top1: 41.72% +[ Wed Sep 14 21:43:55 2022 ] Top5: 74.08% +[ Wed Sep 14 21:43:55 2022 ] Training epoch: 43 +[ Wed Sep 14 21:45:06 2022 ] Batch(93/243) done. Loss: 0.1706 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 21:46:19 2022 ] Batch(193/243) done. Loss: 0.4081 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 21:46:54 2022 ] Eval epoch: 43 +[ Wed Sep 14 21:48:28 2022 ] Mean test loss of 796 batches: 3.3875744342803955. +[ Wed Sep 14 21:48:29 2022 ] Top1: 39.21% +[ Wed Sep 14 21:48:29 2022 ] Top5: 71.23% +[ Wed Sep 14 21:48:29 2022 ] Training epoch: 44 +[ Wed Sep 14 21:49:09 2022 ] Batch(50/243) done. Loss: 0.2592 lr:0.100000 network_time: 0.0343 +[ Wed Sep 14 21:50:22 2022 ] Batch(150/243) done. Loss: 0.3864 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 21:51:29 2022 ] Eval epoch: 44 +[ Wed Sep 14 21:53:03 2022 ] Mean test loss of 796 batches: 2.42459774017334. +[ Wed Sep 14 21:53:04 2022 ] Top1: 45.90% +[ Wed Sep 14 21:53:04 2022 ] Top5: 78.51% +[ Wed Sep 14 21:53:04 2022 ] Training epoch: 45 +[ Wed Sep 14 21:53:12 2022 ] Batch(7/243) done. Loss: 0.2629 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 21:54:25 2022 ] Batch(107/243) done. Loss: 0.3106 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 21:55:37 2022 ] Batch(207/243) done. Loss: 0.3254 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 21:56:03 2022 ] Eval epoch: 45 +[ Wed Sep 14 21:57:38 2022 ] Mean test loss of 796 batches: 2.780808925628662. +[ Wed Sep 14 21:57:38 2022 ] Top1: 41.82% +[ Wed Sep 14 21:57:39 2022 ] Top5: 75.46% +[ Wed Sep 14 21:57:39 2022 ] Training epoch: 46 +[ Wed Sep 14 21:58:29 2022 ] Batch(64/243) done. Loss: 0.1354 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:59:42 2022 ] Batch(164/243) done. Loss: 0.3815 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 22:00:38 2022 ] Eval epoch: 46 +[ Wed Sep 14 22:02:13 2022 ] Mean test loss of 796 batches: 2.7003347873687744. +[ Wed Sep 14 22:02:13 2022 ] Top1: 45.25% +[ Wed Sep 14 22:02:13 2022 ] Top5: 76.78% +[ Wed Sep 14 22:02:14 2022 ] Training epoch: 47 +[ Wed Sep 14 22:02:32 2022 ] Batch(21/243) done. Loss: 0.1768 lr:0.100000 network_time: 0.0306 +[ Wed Sep 14 22:03:45 2022 ] Batch(121/243) done. Loss: 0.2490 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 22:04:57 2022 ] Batch(221/243) done. Loss: 0.2552 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:05:13 2022 ] Eval epoch: 47 +[ Wed Sep 14 22:06:47 2022 ] Mean test loss of 796 batches: 2.9509265422821045. +[ Wed Sep 14 22:06:48 2022 ] Top1: 44.42% +[ Wed Sep 14 22:06:48 2022 ] Top5: 76.73% +[ Wed Sep 14 22:06:48 2022 ] Training epoch: 48 +[ Wed Sep 14 22:07:49 2022 ] Batch(78/243) done. Loss: 0.3004 lr:0.100000 network_time: 0.0286 +[ Wed Sep 14 22:09:01 2022 ] Batch(178/243) done. Loss: 0.5320 lr:0.100000 network_time: 0.0322 +[ Wed Sep 14 22:09:48 2022 ] Eval epoch: 48 +[ Wed Sep 14 22:11:22 2022 ] Mean test loss of 796 batches: 2.4312541484832764. +[ Wed Sep 14 22:11:23 2022 ] Top1: 47.98% +[ Wed Sep 14 22:11:23 2022 ] Top5: 79.85% +[ Wed Sep 14 22:11:23 2022 ] Training epoch: 49 +[ Wed Sep 14 22:11:52 2022 ] Batch(35/243) done. Loss: 0.2771 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 22:13:05 2022 ] Batch(135/243) done. Loss: 0.2653 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 22:14:18 2022 ] Batch(235/243) done. Loss: 0.4622 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 22:14:23 2022 ] Eval epoch: 49 +[ Wed Sep 14 22:15:57 2022 ] Mean test loss of 796 batches: 2.64970064163208. +[ Wed Sep 14 22:15:58 2022 ] Top1: 44.03% +[ Wed Sep 14 22:15:58 2022 ] Top5: 77.05% +[ Wed Sep 14 22:15:58 2022 ] Training epoch: 50 +[ Wed Sep 14 22:17:08 2022 ] Batch(92/243) done. Loss: 0.3453 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 22:18:21 2022 ] Batch(192/243) done. Loss: 0.3537 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 22:18:58 2022 ] Eval epoch: 50 +[ Wed Sep 14 22:20:32 2022 ] Mean test loss of 796 batches: 2.8408055305480957. +[ Wed Sep 14 22:20:32 2022 ] Top1: 46.56% +[ Wed Sep 14 22:20:33 2022 ] Top5: 77.21% +[ Wed Sep 14 22:20:33 2022 ] Training epoch: 51 +[ Wed Sep 14 22:21:12 2022 ] Batch(49/243) done. Loss: 0.2958 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 22:22:25 2022 ] Batch(149/243) done. Loss: 0.3390 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 22:23:32 2022 ] Eval epoch: 51 +[ Wed Sep 14 22:25:07 2022 ] Mean test loss of 796 batches: 2.830892324447632. +[ Wed Sep 14 22:25:07 2022 ] Top1: 44.75% +[ Wed Sep 14 22:25:08 2022 ] Top5: 76.30% +[ Wed Sep 14 22:25:08 2022 ] Training epoch: 52 +[ Wed Sep 14 22:25:15 2022 ] Batch(6/243) done. Loss: 0.3292 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 22:26:28 2022 ] Batch(106/243) done. Loss: 0.2284 lr:0.100000 network_time: 0.0329 +[ Wed Sep 14 22:27:41 2022 ] Batch(206/243) done. Loss: 0.3900 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 22:28:07 2022 ] Eval epoch: 52 +[ Wed Sep 14 22:29:42 2022 ] Mean test loss of 796 batches: 2.779292583465576. +[ Wed Sep 14 22:29:42 2022 ] Top1: 43.91% +[ Wed Sep 14 22:29:42 2022 ] Top5: 76.34% +[ Wed Sep 14 22:29:43 2022 ] Training epoch: 53 +[ Wed Sep 14 22:30:32 2022 ] Batch(63/243) done. Loss: 0.1874 lr:0.100000 network_time: 0.0328 +[ Wed Sep 14 22:31:45 2022 ] Batch(163/243) done. Loss: 0.3138 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 22:32:42 2022 ] Eval epoch: 53 +[ Wed Sep 14 22:34:16 2022 ] Mean test loss of 796 batches: 2.5557665824890137. +[ Wed Sep 14 22:34:17 2022 ] Top1: 47.09% +[ Wed Sep 14 22:34:17 2022 ] Top5: 79.82% +[ Wed Sep 14 22:34:18 2022 ] Training epoch: 54 +[ Wed Sep 14 22:34:35 2022 ] Batch(20/243) done. Loss: 0.3232 lr:0.100000 network_time: 0.0292 +[ Wed Sep 14 22:35:48 2022 ] Batch(120/243) done. Loss: 0.2636 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 22:37:01 2022 ] Batch(220/243) done. Loss: 0.4322 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 22:37:17 2022 ] Eval epoch: 54 +[ Wed Sep 14 22:38:51 2022 ] Mean test loss of 796 batches: 2.8570163249969482. +[ Wed Sep 14 22:38:52 2022 ] Top1: 44.39% +[ Wed Sep 14 22:38:52 2022 ] Top5: 76.48% +[ Wed Sep 14 22:38:52 2022 ] Training epoch: 55 +[ Wed Sep 14 22:39:52 2022 ] Batch(77/243) done. Loss: 0.3885 lr:0.100000 network_time: 0.0258 +[ Wed Sep 14 22:41:04 2022 ] Batch(177/243) done. Loss: 0.3764 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 22:41:52 2022 ] Eval epoch: 55 +[ Wed Sep 14 22:43:26 2022 ] Mean test loss of 796 batches: 2.8412747383117676. +[ Wed Sep 14 22:43:26 2022 ] Top1: 45.56% +[ Wed Sep 14 22:43:27 2022 ] Top5: 77.42% +[ Wed Sep 14 22:43:27 2022 ] Training epoch: 56 +[ Wed Sep 14 22:43:55 2022 ] Batch(34/243) done. Loss: 0.2089 lr:0.100000 network_time: 0.0321 +[ Wed Sep 14 22:45:07 2022 ] Batch(134/243) done. Loss: 0.4471 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:46:20 2022 ] Batch(234/243) done. Loss: 0.3233 lr:0.100000 network_time: 0.0344 +[ Wed Sep 14 22:46:26 2022 ] Eval epoch: 56 +[ Wed Sep 14 22:48:00 2022 ] Mean test loss of 796 batches: 3.0499653816223145. +[ Wed Sep 14 22:48:01 2022 ] Top1: 42.66% +[ Wed Sep 14 22:48:01 2022 ] Top5: 74.60% +[ Wed Sep 14 22:48:01 2022 ] Training epoch: 57 +[ Wed Sep 14 22:49:11 2022 ] Batch(91/243) done. Loss: 0.4092 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 22:50:24 2022 ] Batch(191/243) done. Loss: 0.2308 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 22:51:01 2022 ] Eval epoch: 57 +[ Wed Sep 14 22:52:35 2022 ] Mean test loss of 796 batches: 3.305689811706543. +[ Wed Sep 14 22:52:36 2022 ] Top1: 41.40% +[ Wed Sep 14 22:52:36 2022 ] Top5: 73.17% +[ Wed Sep 14 22:52:36 2022 ] Training epoch: 58 +[ Wed Sep 14 22:53:15 2022 ] Batch(48/243) done. Loss: 0.2153 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:54:27 2022 ] Batch(148/243) done. Loss: 0.1952 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 22:55:36 2022 ] Eval epoch: 58 +[ Wed Sep 14 22:57:10 2022 ] Mean test loss of 796 batches: 2.48647403717041. +[ Wed Sep 14 22:57:10 2022 ] Top1: 47.80% +[ Wed Sep 14 22:57:11 2022 ] Top5: 78.56% +[ Wed Sep 14 22:57:11 2022 ] Training epoch: 59 +[ Wed Sep 14 22:57:18 2022 ] Batch(5/243) done. Loss: 0.2518 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:58:30 2022 ] Batch(105/243) done. Loss: 0.3340 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:59:43 2022 ] Batch(205/243) done. Loss: 0.2099 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 23:00:10 2022 ] Eval epoch: 59 +[ Wed Sep 14 23:01:45 2022 ] Mean test loss of 796 batches: 3.046830177307129. +[ Wed Sep 14 23:01:46 2022 ] Top1: 43.71% +[ Wed Sep 14 23:01:46 2022 ] Top5: 76.29% +[ Wed Sep 14 23:01:46 2022 ] Training epoch: 60 +[ Wed Sep 14 23:02:34 2022 ] Batch(62/243) done. Loss: 0.2846 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 23:03:47 2022 ] Batch(162/243) done. Loss: 0.2350 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 23:04:45 2022 ] Eval epoch: 60 +[ Wed Sep 14 23:06:19 2022 ] Mean test loss of 796 batches: 2.672731399536133. +[ Wed Sep 14 23:06:20 2022 ] Top1: 45.65% +[ Wed Sep 14 23:06:20 2022 ] Top5: 77.72% +[ Wed Sep 14 23:06:21 2022 ] Training epoch: 61 +[ Wed Sep 14 23:06:38 2022 ] Batch(19/243) done. Loss: 0.2963 lr:0.010000 network_time: 0.0297 +[ Wed Sep 14 23:07:50 2022 ] Batch(119/243) done. Loss: 0.1329 lr:0.010000 network_time: 0.0278 +[ Wed Sep 14 23:09:03 2022 ] Batch(219/243) done. Loss: 0.0661 lr:0.010000 network_time: 0.0258 +[ Wed Sep 14 23:09:20 2022 ] Eval epoch: 61 +[ Wed Sep 14 23:10:54 2022 ] Mean test loss of 796 batches: 2.2649903297424316. +[ Wed Sep 14 23:10:55 2022 ] Top1: 52.45% +[ Wed Sep 14 23:10:55 2022 ] Top5: 82.87% +[ Wed Sep 14 23:10:55 2022 ] Training epoch: 62 +[ Wed Sep 14 23:11:54 2022 ] Batch(76/243) done. Loss: 0.0351 lr:0.010000 network_time: 0.0317 +[ Wed Sep 14 23:13:07 2022 ] Batch(176/243) done. Loss: 0.1236 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:13:55 2022 ] Eval epoch: 62 +[ Wed Sep 14 23:15:29 2022 ] Mean test loss of 796 batches: 2.304346799850464. +[ Wed Sep 14 23:15:30 2022 ] Top1: 52.60% +[ Wed Sep 14 23:15:30 2022 ] Top5: 82.88% +[ Wed Sep 14 23:15:30 2022 ] Training epoch: 63 +[ Wed Sep 14 23:15:58 2022 ] Batch(33/243) done. Loss: 0.0665 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 23:17:10 2022 ] Batch(133/243) done. Loss: 0.0376 lr:0.010000 network_time: 0.0266 +[ Wed Sep 14 23:18:23 2022 ] Batch(233/243) done. Loss: 0.0498 lr:0.010000 network_time: 0.0271 +[ Wed Sep 14 23:18:30 2022 ] Eval epoch: 63 +[ Wed Sep 14 23:20:04 2022 ] Mean test loss of 796 batches: 2.3135626316070557. +[ Wed Sep 14 23:20:05 2022 ] Top1: 53.06% +[ Wed Sep 14 23:20:05 2022 ] Top5: 83.14% +[ Wed Sep 14 23:20:05 2022 ] Training epoch: 64 +[ Wed Sep 14 23:21:14 2022 ] Batch(90/243) done. Loss: 0.0455 lr:0.010000 network_time: 0.0272 +[ Wed Sep 14 23:22:27 2022 ] Batch(190/243) done. Loss: 0.0182 lr:0.010000 network_time: 0.0251 +[ Wed Sep 14 23:23:05 2022 ] Eval epoch: 64 +[ Wed Sep 14 23:24:39 2022 ] Mean test loss of 796 batches: 2.324078321456909. +[ Wed Sep 14 23:24:40 2022 ] Top1: 53.30% +[ Wed Sep 14 23:24:40 2022 ] Top5: 83.26% +[ Wed Sep 14 23:24:41 2022 ] Training epoch: 65 +[ Wed Sep 14 23:25:18 2022 ] Batch(47/243) done. Loss: 0.0222 lr:0.010000 network_time: 0.0284 +[ Wed Sep 14 23:26:31 2022 ] Batch(147/243) done. Loss: 0.1275 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 23:27:40 2022 ] Eval epoch: 65 +[ Wed Sep 14 23:29:14 2022 ] Mean test loss of 796 batches: 2.2940661907196045. +[ Wed Sep 14 23:29:15 2022 ] Top1: 53.94% +[ Wed Sep 14 23:29:15 2022 ] Top5: 83.59% +[ Wed Sep 14 23:29:15 2022 ] Training epoch: 66 +[ Wed Sep 14 23:29:21 2022 ] Batch(4/243) done. Loss: 0.0503 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:30:34 2022 ] Batch(104/243) done. Loss: 0.0199 lr:0.010000 network_time: 0.0265 +[ Wed Sep 14 23:31:47 2022 ] Batch(204/243) done. Loss: 0.0553 lr:0.010000 network_time: 0.0272 +[ Wed Sep 14 23:32:15 2022 ] Eval epoch: 66 +[ Wed Sep 14 23:33:49 2022 ] Mean test loss of 796 batches: 2.326700448989868. +[ Wed Sep 14 23:33:50 2022 ] Top1: 53.68% +[ Wed Sep 14 23:33:50 2022 ] Top5: 83.44% +[ Wed Sep 14 23:33:50 2022 ] Training epoch: 67 +[ Wed Sep 14 23:34:38 2022 ] Batch(61/243) done. Loss: 0.0315 lr:0.010000 network_time: 0.0284 +[ Wed Sep 14 23:35:51 2022 ] Batch(161/243) done. Loss: 0.0202 lr:0.010000 network_time: 0.0287 +[ Wed Sep 14 23:36:50 2022 ] Eval epoch: 67 +[ Wed Sep 14 23:38:24 2022 ] Mean test loss of 796 batches: 2.369642972946167. +[ Wed Sep 14 23:38:24 2022 ] Top1: 53.27% +[ Wed Sep 14 23:38:25 2022 ] Top5: 83.38% +[ Wed Sep 14 23:38:25 2022 ] Training epoch: 68 +[ Wed Sep 14 23:38:41 2022 ] Batch(18/243) done. Loss: 0.0330 lr:0.010000 network_time: 0.0361 +[ Wed Sep 14 23:39:54 2022 ] Batch(118/243) done. Loss: 0.0168 lr:0.010000 network_time: 0.0333 +[ Wed Sep 14 23:41:07 2022 ] Batch(218/243) done. Loss: 0.0458 lr:0.010000 network_time: 0.0287 +[ Wed Sep 14 23:41:24 2022 ] Eval epoch: 68 +[ Wed Sep 14 23:42:58 2022 ] Mean test loss of 796 batches: 2.3233730792999268. +[ Wed Sep 14 23:42:58 2022 ] Top1: 53.88% +[ Wed Sep 14 23:42:59 2022 ] Top5: 83.64% +[ Wed Sep 14 23:42:59 2022 ] Training epoch: 69 +[ Wed Sep 14 23:43:57 2022 ] Batch(75/243) done. Loss: 0.0249 lr:0.010000 network_time: 0.0281 +[ Wed Sep 14 23:45:10 2022 ] Batch(175/243) done. Loss: 0.0364 lr:0.010000 network_time: 0.0272 +[ Wed Sep 14 23:45:59 2022 ] Eval epoch: 69 +[ Wed Sep 14 23:47:33 2022 ] Mean test loss of 796 batches: 2.3240044116973877. +[ Wed Sep 14 23:47:34 2022 ] Top1: 53.81% +[ Wed Sep 14 23:47:34 2022 ] Top5: 83.58% +[ Wed Sep 14 23:47:34 2022 ] Training epoch: 70 +[ Wed Sep 14 23:48:00 2022 ] Batch(32/243) done. Loss: 0.0142 lr:0.010000 network_time: 0.0316 +[ Wed Sep 14 23:49:13 2022 ] Batch(132/243) done. Loss: 0.0259 lr:0.010000 network_time: 0.0279 +[ Wed Sep 14 23:50:26 2022 ] Batch(232/243) done. Loss: 0.0082 lr:0.010000 network_time: 0.0283 +[ Wed Sep 14 23:50:33 2022 ] Eval epoch: 70 +[ Wed Sep 14 23:52:08 2022 ] Mean test loss of 796 batches: 2.3367578983306885. +[ Wed Sep 14 23:52:08 2022 ] Top1: 54.09% +[ Wed Sep 14 23:52:09 2022 ] Top5: 83.63% +[ Wed Sep 14 23:52:09 2022 ] Training epoch: 71 +[ Wed Sep 14 23:53:17 2022 ] Batch(89/243) done. Loss: 0.0175 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 23:54:30 2022 ] Batch(189/243) done. Loss: 0.0308 lr:0.010000 network_time: 0.0320 +[ Wed Sep 14 23:55:08 2022 ] Eval epoch: 71 +[ Wed Sep 14 23:56:43 2022 ] Mean test loss of 796 batches: 2.4088292121887207. +[ Wed Sep 14 23:56:43 2022 ] Top1: 52.97% +[ Wed Sep 14 23:56:43 2022 ] Top5: 82.94% +[ Wed Sep 14 23:56:43 2022 ] Training epoch: 72 +[ Wed Sep 14 23:57:20 2022 ] Batch(46/243) done. Loss: 0.0128 lr:0.010000 network_time: 0.0322 +[ Wed Sep 14 23:58:33 2022 ] Batch(146/243) done. Loss: 0.0299 lr:0.010000 network_time: 0.0311 +[ Wed Sep 14 23:59:43 2022 ] Eval epoch: 72 +[ Thu Sep 15 00:01:17 2022 ] Mean test loss of 796 batches: 2.378607749938965. +[ Thu Sep 15 00:01:18 2022 ] Top1: 53.90% +[ Thu Sep 15 00:01:18 2022 ] Top5: 83.48% +[ Thu Sep 15 00:01:18 2022 ] Training epoch: 73 +[ Thu Sep 15 00:01:24 2022 ] Batch(3/243) done. Loss: 0.0092 lr:0.010000 network_time: 0.0333 +[ Thu Sep 15 00:02:36 2022 ] Batch(103/243) done. Loss: 0.0151 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 00:03:49 2022 ] Batch(203/243) done. Loss: 0.0208 lr:0.010000 network_time: 0.0284 +[ Thu Sep 15 00:04:18 2022 ] Eval epoch: 73 +[ Thu Sep 15 00:05:52 2022 ] Mean test loss of 796 batches: 2.3335354328155518. +[ Thu Sep 15 00:05:53 2022 ] Top1: 54.40% +[ Thu Sep 15 00:05:53 2022 ] Top5: 83.74% +[ Thu Sep 15 00:05:53 2022 ] Training epoch: 74 +[ Thu Sep 15 00:06:40 2022 ] Batch(60/243) done. Loss: 0.0226 lr:0.010000 network_time: 0.0270 +[ Thu Sep 15 00:07:53 2022 ] Batch(160/243) done. Loss: 0.0229 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 00:08:53 2022 ] Eval epoch: 74 +[ Thu Sep 15 00:10:27 2022 ] Mean test loss of 796 batches: 2.417653799057007. +[ Thu Sep 15 00:10:27 2022 ] Top1: 53.48% +[ Thu Sep 15 00:10:28 2022 ] Top5: 83.25% +[ Thu Sep 15 00:10:29 2022 ] Training epoch: 75 +[ Thu Sep 15 00:10:44 2022 ] Batch(17/243) done. Loss: 0.0171 lr:0.010000 network_time: 0.0279 +[ Thu Sep 15 00:11:57 2022 ] Batch(117/243) done. Loss: 0.0161 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 00:13:10 2022 ] Batch(217/243) done. Loss: 0.0076 lr:0.010000 network_time: 0.0333 +[ Thu Sep 15 00:13:28 2022 ] Eval epoch: 75 +[ Thu Sep 15 00:15:02 2022 ] Mean test loss of 796 batches: 2.3814995288848877. +[ Thu Sep 15 00:15:02 2022 ] Top1: 53.89% +[ Thu Sep 15 00:15:03 2022 ] Top5: 83.51% +[ Thu Sep 15 00:15:03 2022 ] Training epoch: 76 +[ Thu Sep 15 00:16:00 2022 ] Batch(74/243) done. Loss: 0.0306 lr:0.010000 network_time: 0.0282 +[ Thu Sep 15 00:17:13 2022 ] Batch(174/243) done. Loss: 0.0138 lr:0.010000 network_time: 0.0271 +[ Thu Sep 15 00:18:02 2022 ] Eval epoch: 76 +[ Thu Sep 15 00:19:36 2022 ] Mean test loss of 796 batches: 2.4096171855926514. +[ Thu Sep 15 00:19:37 2022 ] Top1: 53.70% +[ Thu Sep 15 00:19:37 2022 ] Top5: 83.48% +[ Thu Sep 15 00:19:37 2022 ] Training epoch: 77 +[ Thu Sep 15 00:20:03 2022 ] Batch(31/243) done. Loss: 0.0179 lr:0.010000 network_time: 0.0348 +[ Thu Sep 15 00:21:16 2022 ] Batch(131/243) done. Loss: 0.0148 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 00:22:29 2022 ] Batch(231/243) done. Loss: 0.0238 lr:0.010000 network_time: 0.0473 +[ Thu Sep 15 00:22:37 2022 ] Eval epoch: 77 +[ Thu Sep 15 00:24:11 2022 ] Mean test loss of 796 batches: 2.3831140995025635. +[ Thu Sep 15 00:24:11 2022 ] Top1: 54.25% +[ Thu Sep 15 00:24:12 2022 ] Top5: 83.61% +[ Thu Sep 15 00:24:12 2022 ] Training epoch: 78 +[ Thu Sep 15 00:25:19 2022 ] Batch(88/243) done. Loss: 0.0447 lr:0.010000 network_time: 0.0364 +[ Thu Sep 15 00:26:32 2022 ] Batch(188/243) done. Loss: 0.0100 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 00:27:12 2022 ] Eval epoch: 78 +[ Thu Sep 15 00:28:46 2022 ] Mean test loss of 796 batches: 2.429424285888672. +[ Thu Sep 15 00:28:46 2022 ] Top1: 53.82% +[ Thu Sep 15 00:28:47 2022 ] Top5: 83.37% +[ Thu Sep 15 00:28:47 2022 ] Training epoch: 79 +[ Thu Sep 15 00:29:23 2022 ] Batch(45/243) done. Loss: 0.0190 lr:0.010000 network_time: 0.0356 +[ Thu Sep 15 00:30:36 2022 ] Batch(145/243) done. Loss: 0.0106 lr:0.010000 network_time: 0.0277 +[ Thu Sep 15 00:31:47 2022 ] Eval epoch: 79 +[ Thu Sep 15 00:33:21 2022 ] Mean test loss of 796 batches: 2.470181465148926. +[ Thu Sep 15 00:33:21 2022 ] Top1: 53.71% +[ Thu Sep 15 00:33:22 2022 ] Top5: 83.12% +[ Thu Sep 15 00:33:22 2022 ] Training epoch: 80 +[ Thu Sep 15 00:33:27 2022 ] Batch(2/243) done. Loss: 0.0179 lr:0.010000 network_time: 0.0323 +[ Thu Sep 15 00:34:39 2022 ] Batch(102/243) done. Loss: 0.0104 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 00:35:52 2022 ] Batch(202/243) done. Loss: 0.0254 lr:0.010000 network_time: 0.0310 +[ Thu Sep 15 00:36:21 2022 ] Eval epoch: 80 +[ Thu Sep 15 00:37:56 2022 ] Mean test loss of 796 batches: 2.436082601547241. +[ Thu Sep 15 00:37:57 2022 ] Top1: 53.77% +[ Thu Sep 15 00:37:57 2022 ] Top5: 83.38% +[ Thu Sep 15 00:37:58 2022 ] Training epoch: 81 +[ Thu Sep 15 00:38:44 2022 ] Batch(59/243) done. Loss: 0.0275 lr:0.001000 network_time: 0.0314 +[ Thu Sep 15 00:39:56 2022 ] Batch(159/243) done. Loss: 0.0067 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 00:40:57 2022 ] Eval epoch: 81 +[ Thu Sep 15 00:42:31 2022 ] Mean test loss of 796 batches: 2.405709981918335. +[ Thu Sep 15 00:42:32 2022 ] Top1: 53.71% +[ Thu Sep 15 00:42:33 2022 ] Top5: 83.41% +[ Thu Sep 15 00:42:33 2022 ] Training epoch: 82 +[ Thu Sep 15 00:42:48 2022 ] Batch(16/243) done. Loss: 0.0293 lr:0.001000 network_time: 0.0645 +[ Thu Sep 15 00:44:00 2022 ] Batch(116/243) done. Loss: 0.0058 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 00:45:13 2022 ] Batch(216/243) done. Loss: 0.0168 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 00:45:32 2022 ] Eval epoch: 82 +[ Thu Sep 15 00:47:07 2022 ] Mean test loss of 796 batches: 2.4298956394195557. +[ Thu Sep 15 00:47:07 2022 ] Top1: 53.66% +[ Thu Sep 15 00:47:07 2022 ] Top5: 83.36% +[ Thu Sep 15 00:47:08 2022 ] Training epoch: 83 +[ Thu Sep 15 00:48:04 2022 ] Batch(73/243) done. Loss: 0.0350 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 00:49:16 2022 ] Batch(173/243) done. Loss: 0.0067 lr:0.001000 network_time: 0.0314 +[ Thu Sep 15 00:50:07 2022 ] Eval epoch: 83 +[ Thu Sep 15 00:51:41 2022 ] Mean test loss of 796 batches: 2.4424381256103516. +[ Thu Sep 15 00:51:41 2022 ] Top1: 53.44% +[ Thu Sep 15 00:51:41 2022 ] Top5: 83.01% +[ Thu Sep 15 00:51:42 2022 ] Training epoch: 84 +[ Thu Sep 15 00:52:07 2022 ] Batch(30/243) done. Loss: 0.0322 lr:0.001000 network_time: 0.0310 +[ Thu Sep 15 00:53:19 2022 ] Batch(130/243) done. Loss: 0.0077 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 00:54:32 2022 ] Batch(230/243) done. Loss: 0.0145 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 00:54:41 2022 ] Eval epoch: 84 +[ Thu Sep 15 00:56:16 2022 ] Mean test loss of 796 batches: 2.3991408348083496. +[ Thu Sep 15 00:56:16 2022 ] Top1: 54.34% +[ Thu Sep 15 00:56:16 2022 ] Top5: 83.64% +[ Thu Sep 15 00:56:17 2022 ] Training epoch: 85 +[ Thu Sep 15 00:57:23 2022 ] Batch(87/243) done. Loss: 0.0150 lr:0.001000 network_time: 0.0257 +[ Thu Sep 15 00:58:36 2022 ] Batch(187/243) done. Loss: 0.0192 lr:0.001000 network_time: 0.0315 +[ Thu Sep 15 00:59:16 2022 ] Eval epoch: 85 +[ Thu Sep 15 01:00:50 2022 ] Mean test loss of 796 batches: 2.388627290725708. +[ Thu Sep 15 01:00:51 2022 ] Top1: 54.17% +[ Thu Sep 15 01:00:51 2022 ] Top5: 83.64% +[ Thu Sep 15 01:00:51 2022 ] Training epoch: 86 +[ Thu Sep 15 01:01:26 2022 ] Batch(44/243) done. Loss: 0.0126 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 01:02:39 2022 ] Batch(144/243) done. Loss: 0.0083 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 01:03:51 2022 ] Eval epoch: 86 +[ Thu Sep 15 01:05:25 2022 ] Mean test loss of 796 batches: 2.4415183067321777. +[ Thu Sep 15 01:05:25 2022 ] Top1: 53.66% +[ Thu Sep 15 01:05:25 2022 ] Top5: 83.21% +[ Thu Sep 15 01:05:26 2022 ] Training epoch: 87 +[ Thu Sep 15 01:05:30 2022 ] Batch(1/243) done. Loss: 0.0151 lr:0.001000 network_time: 0.0328 +[ Thu Sep 15 01:06:42 2022 ] Batch(101/243) done. Loss: 0.0092 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 01:07:55 2022 ] Batch(201/243) done. Loss: 0.0195 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 01:08:25 2022 ] Eval epoch: 87 +[ Thu Sep 15 01:09:59 2022 ] Mean test loss of 796 batches: 2.3931989669799805. +[ Thu Sep 15 01:09:59 2022 ] Top1: 54.22% +[ Thu Sep 15 01:10:00 2022 ] Top5: 83.68% +[ Thu Sep 15 01:10:00 2022 ] Training epoch: 88 +[ Thu Sep 15 01:10:45 2022 ] Batch(58/243) done. Loss: 0.0041 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 01:11:58 2022 ] Batch(158/243) done. Loss: 0.0124 lr:0.001000 network_time: 0.0261 +[ Thu Sep 15 01:12:59 2022 ] Eval epoch: 88 +[ Thu Sep 15 01:14:33 2022 ] Mean test loss of 796 batches: 2.4158730506896973. +[ Thu Sep 15 01:14:34 2022 ] Top1: 53.63% +[ Thu Sep 15 01:14:34 2022 ] Top5: 83.43% +[ Thu Sep 15 01:14:34 2022 ] Training epoch: 89 +[ Thu Sep 15 01:14:49 2022 ] Batch(15/243) done. Loss: 0.0167 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 01:16:01 2022 ] Batch(115/243) done. Loss: 0.0156 lr:0.001000 network_time: 0.0321 +[ Thu Sep 15 01:17:14 2022 ] Batch(215/243) done. Loss: 0.0198 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 01:17:34 2022 ] Eval epoch: 89 +[ Thu Sep 15 01:19:08 2022 ] Mean test loss of 796 batches: 2.423422336578369. +[ Thu Sep 15 01:19:08 2022 ] Top1: 53.97% +[ Thu Sep 15 01:19:08 2022 ] Top5: 83.38% +[ Thu Sep 15 01:19:09 2022 ] Training epoch: 90 +[ Thu Sep 15 01:20:04 2022 ] Batch(72/243) done. Loss: 0.0045 lr:0.001000 network_time: 0.0310 +[ Thu Sep 15 01:21:17 2022 ] Batch(172/243) done. Loss: 0.0155 lr:0.001000 network_time: 0.0261 +[ Thu Sep 15 01:22:08 2022 ] Eval epoch: 90 +[ Thu Sep 15 01:23:42 2022 ] Mean test loss of 796 batches: 2.404895782470703. +[ Thu Sep 15 01:23:43 2022 ] Top1: 54.01% +[ Thu Sep 15 01:23:43 2022 ] Top5: 83.52% +[ Thu Sep 15 01:23:43 2022 ] Training epoch: 91 +[ Thu Sep 15 01:24:08 2022 ] Batch(29/243) done. Loss: 0.0065 lr:0.001000 network_time: 0.0261 +[ Thu Sep 15 01:25:21 2022 ] Batch(129/243) done. Loss: 0.0080 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 01:26:33 2022 ] Batch(229/243) done. Loss: 0.0078 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 01:26:43 2022 ] Eval epoch: 91 +[ Thu Sep 15 01:28:17 2022 ] Mean test loss of 796 batches: 2.4108152389526367. +[ Thu Sep 15 01:28:17 2022 ] Top1: 54.08% +[ Thu Sep 15 01:28:18 2022 ] Top5: 83.47% +[ Thu Sep 15 01:28:18 2022 ] Training epoch: 92 +[ Thu Sep 15 01:29:24 2022 ] Batch(86/243) done. Loss: 0.0084 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 01:30:36 2022 ] Batch(186/243) done. Loss: 0.0131 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 01:31:17 2022 ] Eval epoch: 92 +[ Thu Sep 15 01:32:52 2022 ] Mean test loss of 796 batches: 2.399904727935791. +[ Thu Sep 15 01:32:52 2022 ] Top1: 54.41% +[ Thu Sep 15 01:32:53 2022 ] Top5: 83.67% +[ Thu Sep 15 01:32:53 2022 ] Training epoch: 93 +[ Thu Sep 15 01:33:27 2022 ] Batch(43/243) done. Loss: 0.0037 lr:0.001000 network_time: 0.0279 +[ Thu Sep 15 01:34:40 2022 ] Batch(143/243) done. Loss: 0.0218 lr:0.001000 network_time: 0.0304 +[ Thu Sep 15 01:35:52 2022 ] Eval epoch: 93 +[ Thu Sep 15 01:37:26 2022 ] Mean test loss of 796 batches: 2.442288398742676. +[ Thu Sep 15 01:37:26 2022 ] Top1: 53.80% +[ Thu Sep 15 01:37:27 2022 ] Top5: 83.27% +[ Thu Sep 15 01:37:27 2022 ] Training epoch: 94 +[ Thu Sep 15 01:37:30 2022 ] Batch(0/243) done. Loss: 0.0418 lr:0.001000 network_time: 0.0714 +[ Thu Sep 15 01:38:43 2022 ] Batch(100/243) done. Loss: 0.0049 lr:0.001000 network_time: 0.0263 +[ Thu Sep 15 01:39:56 2022 ] Batch(200/243) done. Loss: 0.0088 lr:0.001000 network_time: 0.0297 +[ Thu Sep 15 01:40:26 2022 ] Eval epoch: 94 +[ Thu Sep 15 01:42:01 2022 ] Mean test loss of 796 batches: 2.4109578132629395. +[ Thu Sep 15 01:42:02 2022 ] Top1: 53.79% +[ Thu Sep 15 01:42:02 2022 ] Top5: 83.39% +[ Thu Sep 15 01:42:02 2022 ] Training epoch: 95 +[ Thu Sep 15 01:42:47 2022 ] Batch(57/243) done. Loss: 0.0056 lr:0.001000 network_time: 0.0256 +[ Thu Sep 15 01:44:00 2022 ] Batch(157/243) done. Loss: 0.0071 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 01:45:02 2022 ] Eval epoch: 95 +[ Thu Sep 15 01:46:37 2022 ] Mean test loss of 796 batches: 2.416574001312256. +[ Thu Sep 15 01:46:37 2022 ] Top1: 53.87% +[ Thu Sep 15 01:46:38 2022 ] Top5: 83.41% +[ Thu Sep 15 01:46:38 2022 ] Training epoch: 96 +[ Thu Sep 15 01:46:51 2022 ] Batch(14/243) done. Loss: 0.0216 lr:0.001000 network_time: 0.0610 +[ Thu Sep 15 01:48:04 2022 ] Batch(114/243) done. Loss: 0.0123 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 01:49:17 2022 ] Batch(214/243) done. Loss: 0.0047 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 01:49:37 2022 ] Eval epoch: 96 +[ Thu Sep 15 01:51:11 2022 ] Mean test loss of 796 batches: 2.443953514099121. +[ Thu Sep 15 01:51:12 2022 ] Top1: 53.56% +[ Thu Sep 15 01:51:12 2022 ] Top5: 83.21% +[ Thu Sep 15 01:51:12 2022 ] Training epoch: 97 +[ Thu Sep 15 01:52:07 2022 ] Batch(71/243) done. Loss: 0.0031 lr:0.001000 network_time: 0.0323 +[ Thu Sep 15 01:53:20 2022 ] Batch(171/243) done. Loss: 0.0060 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 01:54:12 2022 ] Eval epoch: 97 +[ Thu Sep 15 01:55:47 2022 ] Mean test loss of 796 batches: 2.4506213665008545. +[ Thu Sep 15 01:55:47 2022 ] Top1: 53.61% +[ Thu Sep 15 01:55:47 2022 ] Top5: 83.26% +[ Thu Sep 15 01:55:48 2022 ] Training epoch: 98 +[ Thu Sep 15 01:56:11 2022 ] Batch(28/243) done. Loss: 0.0082 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 01:57:24 2022 ] Batch(128/243) done. Loss: 0.0251 lr:0.001000 network_time: 0.0305 +[ Thu Sep 15 01:58:37 2022 ] Batch(228/243) done. Loss: 0.0272 lr:0.001000 network_time: 0.0311 +[ Thu Sep 15 01:58:47 2022 ] Eval epoch: 98 +[ Thu Sep 15 02:00:21 2022 ] Mean test loss of 796 batches: 2.4064249992370605. +[ Thu Sep 15 02:00:22 2022 ] Top1: 54.19% +[ Thu Sep 15 02:00:22 2022 ] Top5: 83.56% +[ Thu Sep 15 02:00:22 2022 ] Training epoch: 99 +[ Thu Sep 15 02:01:27 2022 ] Batch(85/243) done. Loss: 0.0195 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 02:02:40 2022 ] Batch(185/243) done. Loss: 0.0101 lr:0.001000 network_time: 0.0315 +[ Thu Sep 15 02:03:21 2022 ] Eval epoch: 99 +[ Thu Sep 15 02:04:55 2022 ] Mean test loss of 796 batches: 2.4065332412719727. +[ Thu Sep 15 02:04:56 2022 ] Top1: 54.13% +[ Thu Sep 15 02:04:56 2022 ] Top5: 83.58% +[ Thu Sep 15 02:04:56 2022 ] Training epoch: 100 +[ Thu Sep 15 02:05:30 2022 ] Batch(42/243) done. Loss: 0.0187 lr:0.001000 network_time: 0.0292 +[ Thu Sep 15 02:06:43 2022 ] Batch(142/243) done. Loss: 0.0032 lr:0.001000 network_time: 0.0285 +[ Thu Sep 15 02:07:55 2022 ] Batch(242/243) done. Loss: 0.0084 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 02:07:56 2022 ] Eval epoch: 100 +[ Thu Sep 15 02:09:30 2022 ] Mean test loss of 796 batches: 2.4123075008392334. +[ Thu Sep 15 02:09:30 2022 ] Top1: 54.21% +[ Thu Sep 15 02:09:30 2022 ] Top5: 83.67% +[ Thu Sep 15 02:09:31 2022 ] Training epoch: 101 +[ Thu Sep 15 02:10:46 2022 ] Batch(99/243) done. Loss: 0.0112 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 02:11:58 2022 ] Batch(199/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 02:12:30 2022 ] Eval epoch: 101 +[ Thu Sep 15 02:14:04 2022 ] Mean test loss of 796 batches: 2.413454055786133. +[ Thu Sep 15 02:14:04 2022 ] Top1: 54.20% +[ Thu Sep 15 02:14:05 2022 ] Top5: 83.54% +[ Thu Sep 15 02:14:05 2022 ] Training epoch: 102 +[ Thu Sep 15 02:14:49 2022 ] Batch(56/243) done. Loss: 0.0052 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 02:16:01 2022 ] Batch(156/243) done. Loss: 0.0132 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 02:17:04 2022 ] Eval epoch: 102 +[ Thu Sep 15 02:18:38 2022 ] Mean test loss of 796 batches: 2.4486000537872314. +[ Thu Sep 15 02:18:39 2022 ] Top1: 53.84% +[ Thu Sep 15 02:18:39 2022 ] Top5: 83.29% +[ Thu Sep 15 02:18:39 2022 ] Training epoch: 103 +[ Thu Sep 15 02:18:52 2022 ] Batch(13/243) done. Loss: 0.0056 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 02:20:05 2022 ] Batch(113/243) done. Loss: 0.0058 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 02:21:17 2022 ] Batch(213/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 02:21:39 2022 ] Eval epoch: 103 +[ Thu Sep 15 02:23:12 2022 ] Mean test loss of 796 batches: 2.4214422702789307. +[ Thu Sep 15 02:23:13 2022 ] Top1: 53.80% +[ Thu Sep 15 02:23:14 2022 ] Top5: 83.33% +[ Thu Sep 15 02:23:14 2022 ] Training epoch: 104 +[ Thu Sep 15 02:24:08 2022 ] Batch(70/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0286 +[ Thu Sep 15 02:25:21 2022 ] Batch(170/243) done. Loss: 0.0136 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 02:26:13 2022 ] Eval epoch: 104 +[ Thu Sep 15 02:27:47 2022 ] Mean test loss of 796 batches: 2.4433953762054443. +[ Thu Sep 15 02:27:47 2022 ] Top1: 53.95% +[ Thu Sep 15 02:27:48 2022 ] Top5: 83.43% +[ Thu Sep 15 02:27:48 2022 ] Training epoch: 105 +[ Thu Sep 15 02:28:11 2022 ] Batch(27/243) done. Loss: 0.0233 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 02:29:24 2022 ] Batch(127/243) done. Loss: 0.0035 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 02:30:36 2022 ] Batch(227/243) done. Loss: 0.0237 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 02:30:47 2022 ] Eval epoch: 105 +[ Thu Sep 15 02:32:21 2022 ] Mean test loss of 796 batches: 2.4214446544647217. +[ Thu Sep 15 02:32:21 2022 ] Top1: 53.84% +[ Thu Sep 15 02:32:22 2022 ] Top5: 83.38% +[ Thu Sep 15 02:32:22 2022 ] Training epoch: 106 +[ Thu Sep 15 02:33:26 2022 ] Batch(84/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 02:34:39 2022 ] Batch(184/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 02:35:21 2022 ] Eval epoch: 106 +[ Thu Sep 15 02:36:55 2022 ] Mean test loss of 796 batches: 2.4069089889526367. +[ Thu Sep 15 02:36:55 2022 ] Top1: 54.07% +[ Thu Sep 15 02:36:56 2022 ] Top5: 83.57% +[ Thu Sep 15 02:36:56 2022 ] Training epoch: 107 +[ Thu Sep 15 02:37:30 2022 ] Batch(41/243) done. Loss: 0.0087 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 02:38:42 2022 ] Batch(141/243) done. Loss: 0.0113 lr:0.000100 network_time: 0.0438 +[ Thu Sep 15 02:39:55 2022 ] Batch(241/243) done. Loss: 0.0101 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 02:39:56 2022 ] Eval epoch: 107 +[ Thu Sep 15 02:41:30 2022 ] Mean test loss of 796 batches: 2.4322874546051025. +[ Thu Sep 15 02:41:31 2022 ] Top1: 53.90% +[ Thu Sep 15 02:41:31 2022 ] Top5: 83.52% +[ Thu Sep 15 02:41:31 2022 ] Training epoch: 108 +[ Thu Sep 15 02:42:46 2022 ] Batch(98/243) done. Loss: 0.0044 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 02:43:58 2022 ] Batch(198/243) done. Loss: 0.0058 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 02:44:30 2022 ] Eval epoch: 108 +[ Thu Sep 15 02:46:04 2022 ] Mean test loss of 796 batches: 2.3948800563812256. +[ Thu Sep 15 02:46:04 2022 ] Top1: 54.13% +[ Thu Sep 15 02:46:05 2022 ] Top5: 83.52% +[ Thu Sep 15 02:46:05 2022 ] Training epoch: 109 +[ Thu Sep 15 02:46:49 2022 ] Batch(55/243) done. Loss: 0.0107 lr:0.000100 network_time: 0.0332 +[ Thu Sep 15 02:48:01 2022 ] Batch(155/243) done. Loss: 0.0194 lr:0.000100 network_time: 0.0330 +[ Thu Sep 15 02:49:05 2022 ] Eval epoch: 109 +[ Thu Sep 15 02:50:38 2022 ] Mean test loss of 796 batches: 2.4319751262664795. +[ Thu Sep 15 02:50:39 2022 ] Top1: 53.66% +[ Thu Sep 15 02:50:40 2022 ] Top5: 83.33% +[ Thu Sep 15 02:50:40 2022 ] Training epoch: 110 +[ Thu Sep 15 02:50:52 2022 ] Batch(12/243) done. Loss: 0.0097 lr:0.000100 network_time: 0.0330 +[ Thu Sep 15 02:52:05 2022 ] Batch(112/243) done. Loss: 0.0098 lr:0.000100 network_time: 0.0284 +[ Thu Sep 15 02:53:17 2022 ] Batch(212/243) done. Loss: 0.0089 lr:0.000100 network_time: 0.0325 +[ Thu Sep 15 02:53:39 2022 ] Eval epoch: 110 +[ Thu Sep 15 02:55:14 2022 ] Mean test loss of 796 batches: 2.4360392093658447. +[ Thu Sep 15 02:55:14 2022 ] Top1: 53.55% +[ Thu Sep 15 02:55:14 2022 ] Top5: 83.17% +[ Thu Sep 15 02:55:15 2022 ] Training epoch: 111 +[ Thu Sep 15 02:56:08 2022 ] Batch(69/243) done. Loss: 0.0112 lr:0.000100 network_time: 0.0260 +[ Thu Sep 15 02:57:21 2022 ] Batch(169/243) done. Loss: 0.0484 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 02:58:14 2022 ] Eval epoch: 111 +[ Thu Sep 15 02:59:48 2022 ] Mean test loss of 796 batches: 2.4042062759399414. +[ Thu Sep 15 02:59:48 2022 ] Top1: 53.91% +[ Thu Sep 15 02:59:48 2022 ] Top5: 83.49% +[ Thu Sep 15 02:59:49 2022 ] Training epoch: 112 +[ Thu Sep 15 03:00:11 2022 ] Batch(26/243) done. Loss: 0.0138 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 03:01:23 2022 ] Batch(126/243) done. Loss: 0.0057 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 03:02:36 2022 ] Batch(226/243) done. Loss: 0.0137 lr:0.000100 network_time: 0.0321 +[ Thu Sep 15 03:02:48 2022 ] Eval epoch: 112 +[ Thu Sep 15 03:04:22 2022 ] Mean test loss of 796 batches: 2.4613728523254395. +[ Thu Sep 15 03:04:23 2022 ] Top1: 53.36% +[ Thu Sep 15 03:04:23 2022 ] Top5: 82.97% +[ Thu Sep 15 03:04:23 2022 ] Training epoch: 113 +[ Thu Sep 15 03:05:27 2022 ] Batch(83/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:06:40 2022 ] Batch(183/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0254 +[ Thu Sep 15 03:07:23 2022 ] Eval epoch: 113 +[ Thu Sep 15 03:08:56 2022 ] Mean test loss of 796 batches: 2.4727606773376465. +[ Thu Sep 15 03:08:56 2022 ] Top1: 53.37% +[ Thu Sep 15 03:08:57 2022 ] Top5: 83.12% +[ Thu Sep 15 03:08:57 2022 ] Training epoch: 114 +[ Thu Sep 15 03:09:29 2022 ] Batch(40/243) done. Loss: 0.0036 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 03:10:42 2022 ] Batch(140/243) done. Loss: 0.0052 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 03:11:54 2022 ] Batch(240/243) done. Loss: 0.0067 lr:0.000100 network_time: 0.0295 +[ Thu Sep 15 03:11:56 2022 ] Eval epoch: 114 +[ Thu Sep 15 03:13:29 2022 ] Mean test loss of 796 batches: 2.4121384620666504. +[ Thu Sep 15 03:13:30 2022 ] Top1: 53.94% +[ Thu Sep 15 03:13:30 2022 ] Top5: 83.51% +[ Thu Sep 15 03:13:30 2022 ] Training epoch: 115 +[ Thu Sep 15 03:14:44 2022 ] Batch(97/243) done. Loss: 0.0103 lr:0.000100 network_time: 0.0335 +[ Thu Sep 15 03:15:57 2022 ] Batch(197/243) done. Loss: 0.0115 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 03:16:30 2022 ] Eval epoch: 115 +[ Thu Sep 15 03:18:04 2022 ] Mean test loss of 796 batches: 2.38508939743042. +[ Thu Sep 15 03:18:04 2022 ] Top1: 54.29% +[ Thu Sep 15 03:18:04 2022 ] Top5: 83.57% +[ Thu Sep 15 03:18:05 2022 ] Training epoch: 116 +[ Thu Sep 15 03:18:47 2022 ] Batch(54/243) done. Loss: 0.0095 lr:0.000100 network_time: 0.0289 +[ Thu Sep 15 03:20:00 2022 ] Batch(154/243) done. Loss: 0.0051 lr:0.000100 network_time: 0.0321 +[ Thu Sep 15 03:21:04 2022 ] Eval epoch: 116 +[ Thu Sep 15 03:22:38 2022 ] Mean test loss of 796 batches: 2.406360387802124. +[ Thu Sep 15 03:22:38 2022 ] Top1: 54.08% +[ Thu Sep 15 03:22:39 2022 ] Top5: 83.59% +[ Thu Sep 15 03:22:39 2022 ] Training epoch: 117 +[ Thu Sep 15 03:22:50 2022 ] Batch(11/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 03:24:03 2022 ] Batch(111/243) done. Loss: 0.0053 lr:0.000100 network_time: 0.0322 +[ Thu Sep 15 03:25:16 2022 ] Batch(211/243) done. Loss: 0.0159 lr:0.000100 network_time: 0.0300 +[ Thu Sep 15 03:25:38 2022 ] Eval epoch: 117 +[ Thu Sep 15 03:27:12 2022 ] Mean test loss of 796 batches: 2.4006166458129883. +[ Thu Sep 15 03:27:12 2022 ] Top1: 54.11% +[ Thu Sep 15 03:27:13 2022 ] Top5: 83.51% +[ Thu Sep 15 03:27:13 2022 ] Training epoch: 118 +[ Thu Sep 15 03:28:06 2022 ] Batch(68/243) done. Loss: 0.0068 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 03:29:18 2022 ] Batch(168/243) done. Loss: 0.0164 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:30:12 2022 ] Eval epoch: 118 +[ Thu Sep 15 03:31:46 2022 ] Mean test loss of 796 batches: 2.438443183898926. +[ Thu Sep 15 03:31:47 2022 ] Top1: 53.62% +[ Thu Sep 15 03:31:47 2022 ] Top5: 83.11% +[ Thu Sep 15 03:31:47 2022 ] Training epoch: 119 +[ Thu Sep 15 03:32:09 2022 ] Batch(25/243) done. Loss: 0.0219 lr:0.000100 network_time: 0.0284 +[ Thu Sep 15 03:33:21 2022 ] Batch(125/243) done. Loss: 0.0118 lr:0.000100 network_time: 0.0332 +[ Thu Sep 15 03:34:34 2022 ] Batch(225/243) done. Loss: 0.0198 lr:0.000100 network_time: 0.0322 +[ Thu Sep 15 03:34:47 2022 ] Eval epoch: 119 +[ Thu Sep 15 03:36:21 2022 ] Mean test loss of 796 batches: 2.43454909324646. +[ Thu Sep 15 03:36:21 2022 ] Top1: 53.57% +[ Thu Sep 15 03:36:22 2022 ] Top5: 83.23% +[ Thu Sep 15 03:36:22 2022 ] Training epoch: 120 +[ Thu Sep 15 03:37:25 2022 ] Batch(82/243) done. Loss: 0.0118 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:38:37 2022 ] Batch(182/243) done. Loss: 0.0137 lr:0.000100 network_time: 0.0315 +[ Thu Sep 15 03:39:21 2022 ] Eval epoch: 120 +[ Thu Sep 15 03:40:55 2022 ] Mean test loss of 796 batches: 2.455756902694702. +[ Thu Sep 15 03:40:56 2022 ] Top1: 53.77% +[ Thu Sep 15 03:40:56 2022 ] Top5: 83.27% +[ Thu Sep 15 03:40:57 2022 ] Training epoch: 121 +[ Thu Sep 15 03:41:28 2022 ] Batch(39/243) done. Loss: 0.0172 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:42:41 2022 ] Batch(139/243) done. Loss: 0.0125 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 03:43:53 2022 ] Batch(239/243) done. Loss: 0.0273 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 03:43:56 2022 ] Eval epoch: 121 +[ Thu Sep 15 03:45:30 2022 ] Mean test loss of 796 batches: 2.4042556285858154. +[ Thu Sep 15 03:45:30 2022 ] Top1: 54.06% +[ Thu Sep 15 03:45:30 2022 ] Top5: 83.58% +[ Thu Sep 15 03:45:31 2022 ] Training epoch: 122 +[ Thu Sep 15 03:46:44 2022 ] Batch(96/243) done. Loss: 0.0129 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 03:47:56 2022 ] Batch(196/243) done. Loss: 0.0073 lr:0.000100 network_time: 0.0320 +[ Thu Sep 15 03:48:30 2022 ] Eval epoch: 122 +[ Thu Sep 15 03:50:04 2022 ] Mean test loss of 796 batches: 2.415961980819702. +[ Thu Sep 15 03:50:04 2022 ] Top1: 54.06% +[ Thu Sep 15 03:50:05 2022 ] Top5: 83.58% +[ Thu Sep 15 03:50:05 2022 ] Training epoch: 123 +[ Thu Sep 15 03:50:47 2022 ] Batch(53/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:51:59 2022 ] Batch(153/243) done. Loss: 0.0056 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 03:53:04 2022 ] Eval epoch: 123 +[ Thu Sep 15 03:54:38 2022 ] Mean test loss of 796 batches: 2.43465518951416. +[ Thu Sep 15 03:54:39 2022 ] Top1: 53.76% +[ Thu Sep 15 03:54:39 2022 ] Top5: 83.39% +[ Thu Sep 15 03:54:39 2022 ] Training epoch: 124 +[ Thu Sep 15 03:54:50 2022 ] Batch(10/243) done. Loss: 0.0133 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:56:02 2022 ] Batch(110/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 03:57:15 2022 ] Batch(210/243) done. Loss: 0.0071 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:57:39 2022 ] Eval epoch: 124 +[ Thu Sep 15 03:59:13 2022 ] Mean test loss of 796 batches: 2.3815648555755615. +[ Thu Sep 15 03:59:13 2022 ] Top1: 54.19% +[ Thu Sep 15 03:59:13 2022 ] Top5: 83.69% +[ Thu Sep 15 03:59:14 2022 ] Training epoch: 125 +[ Thu Sep 15 04:00:06 2022 ] Batch(67/243) done. Loss: 0.0084 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 04:01:18 2022 ] Batch(167/243) done. Loss: 0.0476 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 04:02:13 2022 ] Eval epoch: 125 +[ Thu Sep 15 04:03:47 2022 ] Mean test loss of 796 batches: 2.405188798904419. +[ Thu Sep 15 04:03:47 2022 ] Top1: 53.87% +[ Thu Sep 15 04:03:48 2022 ] Top5: 83.34% +[ Thu Sep 15 04:03:48 2022 ] Training epoch: 126 +[ Thu Sep 15 04:04:08 2022 ] Batch(24/243) done. Loss: 0.0119 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 04:05:21 2022 ] Batch(124/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 04:06:34 2022 ] Batch(224/243) done. Loss: 0.0133 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 04:06:47 2022 ] Eval epoch: 126 +[ Thu Sep 15 04:08:21 2022 ] Mean test loss of 796 batches: 2.470874309539795. +[ Thu Sep 15 04:08:21 2022 ] Top1: 53.76% +[ Thu Sep 15 04:08:22 2022 ] Top5: 83.41% +[ Thu Sep 15 04:08:22 2022 ] Training epoch: 127 +[ Thu Sep 15 04:09:24 2022 ] Batch(81/243) done. Loss: 0.0116 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:10:37 2022 ] Batch(181/243) done. Loss: 0.0150 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 04:11:21 2022 ] Eval epoch: 127 +[ Thu Sep 15 04:12:54 2022 ] Mean test loss of 796 batches: 2.4488444328308105. +[ Thu Sep 15 04:12:55 2022 ] Top1: 53.71% +[ Thu Sep 15 04:12:56 2022 ] Top5: 83.29% +[ Thu Sep 15 04:12:56 2022 ] Training epoch: 128 +[ Thu Sep 15 04:13:27 2022 ] Batch(38/243) done. Loss: 0.0214 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 04:14:40 2022 ] Batch(138/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 04:15:52 2022 ] Batch(238/243) done. Loss: 0.0129 lr:0.000100 network_time: 0.0233 +[ Thu Sep 15 04:15:55 2022 ] Eval epoch: 128 +[ Thu Sep 15 04:17:30 2022 ] Mean test loss of 796 batches: 2.4242472648620605. +[ Thu Sep 15 04:17:30 2022 ] Top1: 53.89% +[ Thu Sep 15 04:17:31 2022 ] Top5: 83.42% +[ Thu Sep 15 04:17:31 2022 ] Training epoch: 129 +[ Thu Sep 15 04:18:43 2022 ] Batch(95/243) done. Loss: 0.0464 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 04:19:56 2022 ] Batch(195/243) done. Loss: 0.0134 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 04:20:30 2022 ] Eval epoch: 129 +[ Thu Sep 15 04:22:05 2022 ] Mean test loss of 796 batches: 2.4200432300567627. +[ Thu Sep 15 04:22:05 2022 ] Top1: 53.90% +[ Thu Sep 15 04:22:06 2022 ] Top5: 83.39% +[ Thu Sep 15 04:22:06 2022 ] Training epoch: 130 +[ Thu Sep 15 04:22:47 2022 ] Batch(52/243) done. Loss: 0.0061 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:24:00 2022 ] Batch(152/243) done. Loss: 0.0041 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 04:25:05 2022 ] Eval epoch: 130 +[ Thu Sep 15 04:26:39 2022 ] Mean test loss of 796 batches: 2.39705491065979. +[ Thu Sep 15 04:26:39 2022 ] Top1: 54.33% +[ Thu Sep 15 04:26:40 2022 ] Top5: 83.68% +[ Thu Sep 15 04:26:40 2022 ] Training epoch: 131 +[ Thu Sep 15 04:26:50 2022 ] Batch(9/243) done. Loss: 0.0103 lr:0.000100 network_time: 0.0366 +[ Thu Sep 15 04:28:03 2022 ] Batch(109/243) done. Loss: 0.0039 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 04:29:15 2022 ] Batch(209/243) done. Loss: 0.0089 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 04:29:39 2022 ] Eval epoch: 131 +[ Thu Sep 15 04:31:13 2022 ] Mean test loss of 796 batches: 2.4424407482147217. +[ Thu Sep 15 04:31:13 2022 ] Top1: 53.93% +[ Thu Sep 15 04:31:14 2022 ] Top5: 83.32% +[ Thu Sep 15 04:31:14 2022 ] Training epoch: 132 +[ Thu Sep 15 04:32:05 2022 ] Batch(66/243) done. Loss: 0.0062 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 04:33:18 2022 ] Batch(166/243) done. Loss: 0.0148 lr:0.000100 network_time: 0.0305 +[ Thu Sep 15 04:34:13 2022 ] Eval epoch: 132 +[ Thu Sep 15 04:35:47 2022 ] Mean test loss of 796 batches: 2.369658946990967. +[ Thu Sep 15 04:35:47 2022 ] Top1: 54.38% +[ Thu Sep 15 04:35:48 2022 ] Top5: 83.72% +[ Thu Sep 15 04:35:48 2022 ] Training epoch: 133 +[ Thu Sep 15 04:36:08 2022 ] Batch(23/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 04:37:20 2022 ] Batch(123/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 04:38:33 2022 ] Batch(223/243) done. Loss: 0.0116 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 04:38:47 2022 ] Eval epoch: 133 +[ Thu Sep 15 04:40:21 2022 ] Mean test loss of 796 batches: 2.389495611190796. +[ Thu Sep 15 04:40:22 2022 ] Top1: 53.83% +[ Thu Sep 15 04:40:22 2022 ] Top5: 83.35% +[ Thu Sep 15 04:40:23 2022 ] Training epoch: 134 +[ Thu Sep 15 04:41:24 2022 ] Batch(80/243) done. Loss: 0.0105 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 04:42:37 2022 ] Batch(180/243) done. Loss: 0.0188 lr:0.000100 network_time: 0.0320 +[ Thu Sep 15 04:43:22 2022 ] Eval epoch: 134 +[ Thu Sep 15 04:44:56 2022 ] Mean test loss of 796 batches: 2.4094290733337402. +[ Thu Sep 15 04:44:57 2022 ] Top1: 54.14% +[ Thu Sep 15 04:44:58 2022 ] Top5: 83.63% +[ Thu Sep 15 04:44:58 2022 ] Training epoch: 135 +[ Thu Sep 15 04:45:28 2022 ] Batch(37/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0298 +[ Thu Sep 15 04:46:40 2022 ] Batch(137/243) done. Loss: 0.0224 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 04:47:53 2022 ] Batch(237/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:47:57 2022 ] Eval epoch: 135 +[ Thu Sep 15 04:49:32 2022 ] Mean test loss of 796 batches: 2.4336893558502197. +[ Thu Sep 15 04:49:32 2022 ] Top1: 53.72% +[ Thu Sep 15 04:49:33 2022 ] Top5: 83.31% +[ Thu Sep 15 04:49:33 2022 ] Training epoch: 136 +[ Thu Sep 15 04:50:45 2022 ] Batch(94/243) done. Loss: 0.0068 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 04:51:57 2022 ] Batch(194/243) done. Loss: 0.0108 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 04:52:33 2022 ] Eval epoch: 136 +[ Thu Sep 15 04:54:07 2022 ] Mean test loss of 796 batches: 2.4274051189422607. +[ Thu Sep 15 04:54:07 2022 ] Top1: 53.80% +[ Thu Sep 15 04:54:07 2022 ] Top5: 83.48% +[ Thu Sep 15 04:54:08 2022 ] Training epoch: 137 +[ Thu Sep 15 04:54:48 2022 ] Batch(51/243) done. Loss: 0.0082 lr:0.000100 network_time: 0.0320 +[ Thu Sep 15 04:56:01 2022 ] Batch(151/243) done. Loss: 0.0078 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 04:57:07 2022 ] Eval epoch: 137 +[ Thu Sep 15 04:58:41 2022 ] Mean test loss of 796 batches: 2.395327091217041. +[ Thu Sep 15 04:58:41 2022 ] Top1: 54.43% +[ Thu Sep 15 04:58:42 2022 ] Top5: 83.79% +[ Thu Sep 15 04:58:42 2022 ] Training epoch: 138 +[ Thu Sep 15 04:58:51 2022 ] Batch(8/243) done. Loss: 0.0090 lr:0.000100 network_time: 0.0316 +[ Thu Sep 15 05:00:04 2022 ] Batch(108/243) done. Loss: 0.0243 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 05:01:17 2022 ] Batch(208/243) done. Loss: 0.0360 lr:0.000100 network_time: 0.0318 +[ Thu Sep 15 05:01:42 2022 ] Eval epoch: 138 +[ Thu Sep 15 05:03:16 2022 ] Mean test loss of 796 batches: 2.4107162952423096. +[ Thu Sep 15 05:03:16 2022 ] Top1: 53.67% +[ Thu Sep 15 05:03:17 2022 ] Top5: 83.25% +[ Thu Sep 15 05:03:17 2022 ] Training epoch: 139 +[ Thu Sep 15 05:04:07 2022 ] Batch(65/243) done. Loss: 0.0301 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 05:05:20 2022 ] Batch(165/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 05:06:16 2022 ] Eval epoch: 139 +[ Thu Sep 15 05:07:50 2022 ] Mean test loss of 796 batches: 2.4515626430511475. +[ Thu Sep 15 05:07:50 2022 ] Top1: 53.41% +[ Thu Sep 15 05:07:51 2022 ] Top5: 83.25% +[ Thu Sep 15 05:07:51 2022 ] Training epoch: 140 +[ Thu Sep 15 05:08:11 2022 ] Batch(22/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 05:09:23 2022 ] Batch(122/243) done. Loss: 0.0135 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 05:10:36 2022 ] Batch(222/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 05:10:51 2022 ] Eval epoch: 140 +[ Thu Sep 15 05:12:25 2022 ] Mean test loss of 796 batches: 2.40610671043396. +[ Thu Sep 15 05:12:26 2022 ] Top1: 54.18% +[ Thu Sep 15 05:12:26 2022 ] Top5: 83.66% diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_bone_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..111341780d9de746d2bdc14b223633f00f89802c --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_joint_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_motion_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_motion_xsub diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a13845a58d8744d3f8685fea8e8fdd07d55d47ae --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de10912eab25311307f44b8b05e3e3d54ffcf6211fc9bef161e5f600473a2510 +size 29946137 diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea3d9bc108646872c941de60b4cf7bcc5acc485 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/log.txt @@ -0,0 +1,1043 @@ +[ Wed Sep 14 18:31:51 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_motion_xsub', 'model_saved_name': './save_models/ntu120_joint_motion_xsub', 'Experiment_name': 'ntu120_joint_motion_xsub', 'config': './config/ntu120_xsub/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 18:31:51 2022 ] Training epoch: 1 +[ Wed Sep 14 18:33:10 2022 ] Batch(99/243) done. Loss: 4.0202 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 18:34:23 2022 ] Batch(199/243) done. Loss: 3.0476 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 18:34:55 2022 ] Eval epoch: 1 +[ Wed Sep 14 18:36:30 2022 ] Mean test loss of 796 batches: 5.69941520690918. +[ Wed Sep 14 18:36:30 2022 ] Top1: 8.02% +[ Wed Sep 14 18:36:31 2022 ] Top5: 20.03% +[ Wed Sep 14 18:36:31 2022 ] Training epoch: 2 +[ Wed Sep 14 18:37:15 2022 ] Batch(56/243) done. Loss: 2.7135 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 18:38:28 2022 ] Batch(156/243) done. Loss: 2.2763 lr:0.100000 network_time: 0.0318 +[ Wed Sep 14 18:39:31 2022 ] Eval epoch: 2 +[ Wed Sep 14 18:41:06 2022 ] Mean test loss of 796 batches: 4.1878180503845215. +[ Wed Sep 14 18:41:06 2022 ] Top1: 16.12% +[ Wed Sep 14 18:41:06 2022 ] Top5: 36.34% +[ Wed Sep 14 18:41:07 2022 ] Training epoch: 3 +[ Wed Sep 14 18:41:20 2022 ] Batch(13/243) done. Loss: 1.7547 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 18:42:33 2022 ] Batch(113/243) done. Loss: 1.4097 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 18:43:45 2022 ] Batch(213/243) done. Loss: 1.7054 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 18:44:07 2022 ] Eval epoch: 3 +[ Wed Sep 14 18:45:41 2022 ] Mean test loss of 796 batches: 4.271505355834961. +[ Wed Sep 14 18:45:41 2022 ] Top1: 18.78% +[ Wed Sep 14 18:45:42 2022 ] Top5: 44.19% +[ Wed Sep 14 18:45:42 2022 ] Training epoch: 4 +[ Wed Sep 14 18:46:37 2022 ] Batch(70/243) done. Loss: 1.4182 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 18:47:49 2022 ] Batch(170/243) done. Loss: 1.4677 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 18:48:42 2022 ] Eval epoch: 4 +[ Wed Sep 14 18:50:17 2022 ] Mean test loss of 796 batches: 3.8030338287353516. +[ Wed Sep 14 18:50:17 2022 ] Top1: 23.18% +[ Wed Sep 14 18:50:17 2022 ] Top5: 49.43% +[ Wed Sep 14 18:50:18 2022 ] Training epoch: 5 +[ Wed Sep 14 18:50:41 2022 ] Batch(27/243) done. Loss: 1.4289 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 18:51:54 2022 ] Batch(127/243) done. Loss: 1.2314 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 18:53:07 2022 ] Batch(227/243) done. Loss: 1.4400 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 18:53:18 2022 ] Eval epoch: 5 +[ Wed Sep 14 18:54:52 2022 ] Mean test loss of 796 batches: 3.845407247543335. +[ Wed Sep 14 18:54:53 2022 ] Top1: 21.89% +[ Wed Sep 14 18:54:53 2022 ] Top5: 49.36% +[ Wed Sep 14 18:54:53 2022 ] Training epoch: 6 +[ Wed Sep 14 18:55:58 2022 ] Batch(84/243) done. Loss: 1.2397 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 18:57:11 2022 ] Batch(184/243) done. Loss: 1.0354 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 18:57:53 2022 ] Eval epoch: 6 +[ Wed Sep 14 18:59:28 2022 ] Mean test loss of 796 batches: 3.845848560333252. +[ Wed Sep 14 18:59:28 2022 ] Top1: 26.16% +[ Wed Sep 14 18:59:29 2022 ] Top5: 56.40% +[ Wed Sep 14 18:59:29 2022 ] Training epoch: 7 +[ Wed Sep 14 19:00:02 2022 ] Batch(41/243) done. Loss: 1.1685 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 19:01:15 2022 ] Batch(141/243) done. Loss: 0.8339 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:02:28 2022 ] Batch(241/243) done. Loss: 0.9553 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 19:02:29 2022 ] Eval epoch: 7 +[ Wed Sep 14 19:04:04 2022 ] Mean test loss of 796 batches: 3.6132612228393555. +[ Wed Sep 14 19:04:04 2022 ] Top1: 26.16% +[ Wed Sep 14 19:04:05 2022 ] Top5: 57.86% +[ Wed Sep 14 19:04:05 2022 ] Training epoch: 8 +[ Wed Sep 14 19:05:20 2022 ] Batch(98/243) done. Loss: 0.7015 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:06:33 2022 ] Batch(198/243) done. Loss: 0.8786 lr:0.100000 network_time: 0.0285 +[ Wed Sep 14 19:07:05 2022 ] Eval epoch: 8 +[ Wed Sep 14 19:08:40 2022 ] Mean test loss of 796 batches: 3.2343382835388184. +[ Wed Sep 14 19:08:40 2022 ] Top1: 29.52% +[ Wed Sep 14 19:08:41 2022 ] Top5: 62.62% +[ Wed Sep 14 19:08:41 2022 ] Training epoch: 9 +[ Wed Sep 14 19:09:24 2022 ] Batch(55/243) done. Loss: 1.0992 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 19:10:37 2022 ] Batch(155/243) done. Loss: 0.8874 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:11:41 2022 ] Eval epoch: 9 +[ Wed Sep 14 19:13:16 2022 ] Mean test loss of 796 batches: 3.12418270111084. +[ Wed Sep 14 19:13:16 2022 ] Top1: 34.95% +[ Wed Sep 14 19:13:16 2022 ] Top5: 66.12% +[ Wed Sep 14 19:13:17 2022 ] Training epoch: 10 +[ Wed Sep 14 19:13:29 2022 ] Batch(12/243) done. Loss: 1.0292 lr:0.100000 network_time: 0.0220 +[ Wed Sep 14 19:14:42 2022 ] Batch(112/243) done. Loss: 0.8647 lr:0.100000 network_time: 0.0256 +[ Wed Sep 14 19:15:55 2022 ] Batch(212/243) done. Loss: 1.3004 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 19:16:17 2022 ] Eval epoch: 10 +[ Wed Sep 14 19:17:51 2022 ] Mean test loss of 796 batches: 3.8285813331604004. +[ Wed Sep 14 19:17:51 2022 ] Top1: 30.90% +[ Wed Sep 14 19:17:52 2022 ] Top5: 63.59% +[ Wed Sep 14 19:17:52 2022 ] Training epoch: 11 +[ Wed Sep 14 19:18:46 2022 ] Batch(69/243) done. Loss: 0.8339 lr:0.100000 network_time: 0.0323 +[ Wed Sep 14 19:19:59 2022 ] Batch(169/243) done. Loss: 0.7797 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 19:20:52 2022 ] Eval epoch: 11 +[ Wed Sep 14 19:22:26 2022 ] Mean test loss of 796 batches: 3.172834634780884. +[ Wed Sep 14 19:22:27 2022 ] Top1: 36.06% +[ Wed Sep 14 19:22:27 2022 ] Top5: 70.82% +[ Wed Sep 14 19:22:27 2022 ] Training epoch: 12 +[ Wed Sep 14 19:22:50 2022 ] Batch(26/243) done. Loss: 0.3926 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 19:24:03 2022 ] Batch(126/243) done. Loss: 0.9194 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 19:25:16 2022 ] Batch(226/243) done. Loss: 0.9495 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 19:25:27 2022 ] Eval epoch: 12 +[ Wed Sep 14 19:27:02 2022 ] Mean test loss of 796 batches: 3.429459810256958. +[ Wed Sep 14 19:27:02 2022 ] Top1: 33.83% +[ Wed Sep 14 19:27:02 2022 ] Top5: 67.91% +[ Wed Sep 14 19:27:03 2022 ] Training epoch: 13 +[ Wed Sep 14 19:28:07 2022 ] Batch(83/243) done. Loss: 0.5501 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 19:29:20 2022 ] Batch(183/243) done. Loss: 0.8154 lr:0.100000 network_time: 0.0303 +[ Wed Sep 14 19:30:03 2022 ] Eval epoch: 13 +[ Wed Sep 14 19:31:37 2022 ] Mean test loss of 796 batches: 3.606862783432007. +[ Wed Sep 14 19:31:37 2022 ] Top1: 27.25% +[ Wed Sep 14 19:31:38 2022 ] Top5: 57.85% +[ Wed Sep 14 19:31:38 2022 ] Training epoch: 14 +[ Wed Sep 14 19:32:11 2022 ] Batch(40/243) done. Loss: 0.4214 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:33:24 2022 ] Batch(140/243) done. Loss: 0.8487 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 19:34:37 2022 ] Batch(240/243) done. Loss: 0.6233 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:34:38 2022 ] Eval epoch: 14 +[ Wed Sep 14 19:36:13 2022 ] Mean test loss of 796 batches: 4.171570777893066. +[ Wed Sep 14 19:36:13 2022 ] Top1: 29.66% +[ Wed Sep 14 19:36:14 2022 ] Top5: 58.28% +[ Wed Sep 14 19:36:14 2022 ] Training epoch: 15 +[ Wed Sep 14 19:37:28 2022 ] Batch(97/243) done. Loss: 0.5808 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 19:38:41 2022 ] Batch(197/243) done. Loss: 0.5147 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:39:14 2022 ] Eval epoch: 15 +[ Wed Sep 14 19:40:48 2022 ] Mean test loss of 796 batches: 3.827831506729126. +[ Wed Sep 14 19:40:49 2022 ] Top1: 33.09% +[ Wed Sep 14 19:40:49 2022 ] Top5: 68.44% +[ Wed Sep 14 19:40:49 2022 ] Training epoch: 16 +[ Wed Sep 14 19:41:32 2022 ] Batch(54/243) done. Loss: 0.4489 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 19:42:45 2022 ] Batch(154/243) done. Loss: 0.5374 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 19:43:50 2022 ] Eval epoch: 16 +[ Wed Sep 14 19:45:24 2022 ] Mean test loss of 796 batches: 3.337961196899414. +[ Wed Sep 14 19:45:25 2022 ] Top1: 36.75% +[ Wed Sep 14 19:45:25 2022 ] Top5: 68.71% +[ Wed Sep 14 19:45:25 2022 ] Training epoch: 17 +[ Wed Sep 14 19:45:37 2022 ] Batch(11/243) done. Loss: 0.6355 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 19:46:50 2022 ] Batch(111/243) done. Loss: 0.5497 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 19:48:02 2022 ] Batch(211/243) done. Loss: 0.9412 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 19:48:25 2022 ] Eval epoch: 17 +[ Wed Sep 14 19:50:00 2022 ] Mean test loss of 796 batches: 3.2908499240875244. +[ Wed Sep 14 19:50:00 2022 ] Top1: 37.76% +[ Wed Sep 14 19:50:01 2022 ] Top5: 71.83% +[ Wed Sep 14 19:50:01 2022 ] Training epoch: 18 +[ Wed Sep 14 19:50:54 2022 ] Batch(68/243) done. Loss: 0.5626 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:52:07 2022 ] Batch(168/243) done. Loss: 0.5032 lr:0.100000 network_time: 0.0250 +[ Wed Sep 14 19:53:01 2022 ] Eval epoch: 18 +[ Wed Sep 14 19:54:35 2022 ] Mean test loss of 796 batches: 3.725764513015747. +[ Wed Sep 14 19:54:36 2022 ] Top1: 38.57% +[ Wed Sep 14 19:54:36 2022 ] Top5: 69.67% +[ Wed Sep 14 19:54:36 2022 ] Training epoch: 19 +[ Wed Sep 14 19:54:58 2022 ] Batch(25/243) done. Loss: 0.5624 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 19:56:11 2022 ] Batch(125/243) done. Loss: 0.6199 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 19:57:24 2022 ] Batch(225/243) done. Loss: 0.6703 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 19:57:36 2022 ] Eval epoch: 19 +[ Wed Sep 14 19:59:11 2022 ] Mean test loss of 796 batches: 3.639695882797241. +[ Wed Sep 14 19:59:11 2022 ] Top1: 36.78% +[ Wed Sep 14 19:59:12 2022 ] Top5: 71.43% +[ Wed Sep 14 19:59:12 2022 ] Training epoch: 20 +[ Wed Sep 14 20:00:16 2022 ] Batch(82/243) done. Loss: 0.4942 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:01:29 2022 ] Batch(182/243) done. Loss: 0.4595 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 20:02:13 2022 ] Eval epoch: 20 +[ Wed Sep 14 20:03:47 2022 ] Mean test loss of 796 batches: 3.102022886276245. +[ Wed Sep 14 20:03:47 2022 ] Top1: 37.25% +[ Wed Sep 14 20:03:48 2022 ] Top5: 69.94% +[ Wed Sep 14 20:03:48 2022 ] Training epoch: 21 +[ Wed Sep 14 20:04:20 2022 ] Batch(39/243) done. Loss: 0.2484 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 20:05:33 2022 ] Batch(139/243) done. Loss: 0.5053 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:06:46 2022 ] Batch(239/243) done. Loss: 0.5454 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 20:06:48 2022 ] Eval epoch: 21 +[ Wed Sep 14 20:08:22 2022 ] Mean test loss of 796 batches: 3.5374462604522705. +[ Wed Sep 14 20:08:23 2022 ] Top1: 35.02% +[ Wed Sep 14 20:08:23 2022 ] Top5: 69.82% +[ Wed Sep 14 20:08:23 2022 ] Training epoch: 22 +[ Wed Sep 14 20:09:37 2022 ] Batch(96/243) done. Loss: 0.5373 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:10:50 2022 ] Batch(196/243) done. Loss: 0.4305 lr:0.100000 network_time: 0.0249 +[ Wed Sep 14 20:11:24 2022 ] Eval epoch: 22 +[ Wed Sep 14 20:12:58 2022 ] Mean test loss of 796 batches: 3.3867008686065674. +[ Wed Sep 14 20:12:58 2022 ] Top1: 42.28% +[ Wed Sep 14 20:12:59 2022 ] Top5: 74.57% +[ Wed Sep 14 20:12:59 2022 ] Training epoch: 23 +[ Wed Sep 14 20:13:41 2022 ] Batch(53/243) done. Loss: 0.4115 lr:0.100000 network_time: 0.0342 +[ Wed Sep 14 20:14:54 2022 ] Batch(153/243) done. Loss: 0.7559 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:15:59 2022 ] Eval epoch: 23 +[ Wed Sep 14 20:17:32 2022 ] Mean test loss of 796 batches: 3.638932228088379. +[ Wed Sep 14 20:17:33 2022 ] Top1: 38.48% +[ Wed Sep 14 20:17:33 2022 ] Top5: 71.98% +[ Wed Sep 14 20:17:33 2022 ] Training epoch: 24 +[ Wed Sep 14 20:17:44 2022 ] Batch(10/243) done. Loss: 0.3779 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 20:18:57 2022 ] Batch(110/243) done. Loss: 0.4044 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 20:20:10 2022 ] Batch(210/243) done. Loss: 0.4047 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:20:34 2022 ] Eval epoch: 24 +[ Wed Sep 14 20:22:07 2022 ] Mean test loss of 796 batches: 3.194326162338257. +[ Wed Sep 14 20:22:08 2022 ] Top1: 41.16% +[ Wed Sep 14 20:22:08 2022 ] Top5: 73.29% +[ Wed Sep 14 20:22:08 2022 ] Training epoch: 25 +[ Wed Sep 14 20:23:01 2022 ] Batch(67/243) done. Loss: 0.3711 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:24:13 2022 ] Batch(167/243) done. Loss: 0.4264 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 20:25:08 2022 ] Eval epoch: 25 +[ Wed Sep 14 20:26:42 2022 ] Mean test loss of 796 batches: 3.711545705795288. +[ Wed Sep 14 20:26:43 2022 ] Top1: 37.13% +[ Wed Sep 14 20:26:43 2022 ] Top5: 68.57% +[ Wed Sep 14 20:26:43 2022 ] Training epoch: 26 +[ Wed Sep 14 20:27:04 2022 ] Batch(24/243) done. Loss: 0.4519 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:28:17 2022 ] Batch(124/243) done. Loss: 0.3969 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 20:29:30 2022 ] Batch(224/243) done. Loss: 0.2840 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:29:43 2022 ] Eval epoch: 26 +[ Wed Sep 14 20:31:18 2022 ] Mean test loss of 796 batches: 3.380185127258301. +[ Wed Sep 14 20:31:18 2022 ] Top1: 44.60% +[ Wed Sep 14 20:31:18 2022 ] Top5: 76.95% +[ Wed Sep 14 20:31:18 2022 ] Training epoch: 27 +[ Wed Sep 14 20:32:21 2022 ] Batch(81/243) done. Loss: 0.4860 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 20:33:34 2022 ] Batch(181/243) done. Loss: 0.4455 lr:0.100000 network_time: 0.0433 +[ Wed Sep 14 20:34:19 2022 ] Eval epoch: 27 +[ Wed Sep 14 20:35:53 2022 ] Mean test loss of 796 batches: 3.682363748550415. +[ Wed Sep 14 20:35:54 2022 ] Top1: 34.85% +[ Wed Sep 14 20:35:54 2022 ] Top5: 68.94% +[ Wed Sep 14 20:35:55 2022 ] Training epoch: 28 +[ Wed Sep 14 20:36:26 2022 ] Batch(38/243) done. Loss: 0.2284 lr:0.100000 network_time: 0.0537 +[ Wed Sep 14 20:37:39 2022 ] Batch(138/243) done. Loss: 0.3881 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:38:52 2022 ] Batch(238/243) done. Loss: 0.4989 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 20:38:55 2022 ] Eval epoch: 28 +[ Wed Sep 14 20:40:29 2022 ] Mean test loss of 796 batches: 4.050222873687744. +[ Wed Sep 14 20:40:29 2022 ] Top1: 39.72% +[ Wed Sep 14 20:40:29 2022 ] Top5: 72.02% +[ Wed Sep 14 20:40:30 2022 ] Training epoch: 29 +[ Wed Sep 14 20:41:42 2022 ] Batch(95/243) done. Loss: 0.2913 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 20:42:55 2022 ] Batch(195/243) done. Loss: 0.4301 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 20:43:30 2022 ] Eval epoch: 29 +[ Wed Sep 14 20:45:04 2022 ] Mean test loss of 796 batches: 4.2824883460998535. +[ Wed Sep 14 20:45:04 2022 ] Top1: 41.94% +[ Wed Sep 14 20:45:04 2022 ] Top5: 74.24% +[ Wed Sep 14 20:45:05 2022 ] Training epoch: 30 +[ Wed Sep 14 20:45:46 2022 ] Batch(52/243) done. Loss: 0.3259 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:46:59 2022 ] Batch(152/243) done. Loss: 0.3045 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 20:48:05 2022 ] Eval epoch: 30 +[ Wed Sep 14 20:49:39 2022 ] Mean test loss of 796 batches: 3.3407092094421387. +[ Wed Sep 14 20:49:40 2022 ] Top1: 40.77% +[ Wed Sep 14 20:49:40 2022 ] Top5: 74.75% +[ Wed Sep 14 20:49:40 2022 ] Training epoch: 31 +[ Wed Sep 14 20:49:50 2022 ] Batch(9/243) done. Loss: 0.4007 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 20:51:03 2022 ] Batch(109/243) done. Loss: 0.2993 lr:0.100000 network_time: 0.0316 +[ Wed Sep 14 20:52:16 2022 ] Batch(209/243) done. Loss: 0.3673 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 20:52:41 2022 ] Eval epoch: 31 +[ Wed Sep 14 20:54:15 2022 ] Mean test loss of 796 batches: 3.6521658897399902. +[ Wed Sep 14 20:54:15 2022 ] Top1: 38.46% +[ Wed Sep 14 20:54:15 2022 ] Top5: 72.08% +[ Wed Sep 14 20:54:16 2022 ] Training epoch: 32 +[ Wed Sep 14 20:55:07 2022 ] Batch(66/243) done. Loss: 0.3483 lr:0.100000 network_time: 0.0256 +[ Wed Sep 14 20:56:20 2022 ] Batch(166/243) done. Loss: 0.5043 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 20:57:16 2022 ] Eval epoch: 32 +[ Wed Sep 14 20:58:49 2022 ] Mean test loss of 796 batches: 3.2773547172546387. +[ Wed Sep 14 20:58:50 2022 ] Top1: 42.23% +[ Wed Sep 14 20:58:50 2022 ] Top5: 74.82% +[ Wed Sep 14 20:58:50 2022 ] Training epoch: 33 +[ Wed Sep 14 20:59:11 2022 ] Batch(23/243) done. Loss: 0.2605 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 21:00:23 2022 ] Batch(123/243) done. Loss: 0.3847 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 21:01:36 2022 ] Batch(223/243) done. Loss: 0.5128 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 21:01:50 2022 ] Eval epoch: 33 +[ Wed Sep 14 21:03:25 2022 ] Mean test loss of 796 batches: 3.2684571743011475. +[ Wed Sep 14 21:03:25 2022 ] Top1: 41.48% +[ Wed Sep 14 21:03:26 2022 ] Top5: 73.69% +[ Wed Sep 14 21:03:26 2022 ] Training epoch: 34 +[ Wed Sep 14 21:04:28 2022 ] Batch(80/243) done. Loss: 0.2067 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 21:05:41 2022 ] Batch(180/243) done. Loss: 0.3515 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 21:06:26 2022 ] Eval epoch: 34 +[ Wed Sep 14 21:08:00 2022 ] Mean test loss of 796 batches: 3.645359754562378. +[ Wed Sep 14 21:08:01 2022 ] Top1: 40.37% +[ Wed Sep 14 21:08:02 2022 ] Top5: 74.26% +[ Wed Sep 14 21:08:02 2022 ] Training epoch: 35 +[ Wed Sep 14 21:08:32 2022 ] Batch(37/243) done. Loss: 0.2540 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 21:09:45 2022 ] Batch(137/243) done. Loss: 0.3441 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 21:10:58 2022 ] Batch(237/243) done. Loss: 0.4167 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 21:11:02 2022 ] Eval epoch: 35 +[ Wed Sep 14 21:12:35 2022 ] Mean test loss of 796 batches: 3.4303860664367676. +[ Wed Sep 14 21:12:36 2022 ] Top1: 40.54% +[ Wed Sep 14 21:12:36 2022 ] Top5: 74.03% +[ Wed Sep 14 21:12:36 2022 ] Training epoch: 36 +[ Wed Sep 14 21:13:48 2022 ] Batch(94/243) done. Loss: 0.3030 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 21:15:01 2022 ] Batch(194/243) done. Loss: 0.3031 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:15:36 2022 ] Eval epoch: 36 +[ Wed Sep 14 21:17:10 2022 ] Mean test loss of 796 batches: 3.0533454418182373. +[ Wed Sep 14 21:17:10 2022 ] Top1: 42.74% +[ Wed Sep 14 21:17:11 2022 ] Top5: 74.29% +[ Wed Sep 14 21:17:11 2022 ] Training epoch: 37 +[ Wed Sep 14 21:17:52 2022 ] Batch(51/243) done. Loss: 0.2052 lr:0.100000 network_time: 0.0289 +[ Wed Sep 14 21:19:05 2022 ] Batch(151/243) done. Loss: 0.3076 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 21:20:11 2022 ] Eval epoch: 37 +[ Wed Sep 14 21:21:45 2022 ] Mean test loss of 796 batches: 3.8635685443878174. +[ Wed Sep 14 21:21:46 2022 ] Top1: 38.69% +[ Wed Sep 14 21:21:46 2022 ] Top5: 73.05% +[ Wed Sep 14 21:21:46 2022 ] Training epoch: 38 +[ Wed Sep 14 21:21:56 2022 ] Batch(8/243) done. Loss: 0.3111 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 21:23:09 2022 ] Batch(108/243) done. Loss: 0.3223 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 21:24:22 2022 ] Batch(208/243) done. Loss: 0.3715 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 21:24:47 2022 ] Eval epoch: 38 +[ Wed Sep 14 21:26:20 2022 ] Mean test loss of 796 batches: 3.5092737674713135. +[ Wed Sep 14 21:26:21 2022 ] Top1: 40.92% +[ Wed Sep 14 21:26:21 2022 ] Top5: 74.36% +[ Wed Sep 14 21:26:21 2022 ] Training epoch: 39 +[ Wed Sep 14 21:27:12 2022 ] Batch(65/243) done. Loss: 0.3190 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 21:28:25 2022 ] Batch(165/243) done. Loss: 0.3411 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 21:29:22 2022 ] Eval epoch: 39 +[ Wed Sep 14 21:30:56 2022 ] Mean test loss of 796 batches: 3.9432618618011475. +[ Wed Sep 14 21:30:56 2022 ] Top1: 38.59% +[ Wed Sep 14 21:30:57 2022 ] Top5: 70.74% +[ Wed Sep 14 21:30:57 2022 ] Training epoch: 40 +[ Wed Sep 14 21:31:17 2022 ] Batch(22/243) done. Loss: 0.2824 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 21:32:30 2022 ] Batch(122/243) done. Loss: 0.2105 lr:0.100000 network_time: 0.0327 +[ Wed Sep 14 21:33:43 2022 ] Batch(222/243) done. Loss: 0.3019 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 21:33:57 2022 ] Eval epoch: 40 +[ Wed Sep 14 21:35:31 2022 ] Mean test loss of 796 batches: 3.499183177947998. +[ Wed Sep 14 21:35:32 2022 ] Top1: 40.04% +[ Wed Sep 14 21:35:32 2022 ] Top5: 72.85% +[ Wed Sep 14 21:35:32 2022 ] Training epoch: 41 +[ Wed Sep 14 21:36:33 2022 ] Batch(79/243) done. Loss: 0.1238 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 21:37:46 2022 ] Batch(179/243) done. Loss: 0.4174 lr:0.100000 network_time: 0.0323 +[ Wed Sep 14 21:38:32 2022 ] Eval epoch: 41 +[ Wed Sep 14 21:40:06 2022 ] Mean test loss of 796 batches: 3.4872095584869385. +[ Wed Sep 14 21:40:07 2022 ] Top1: 37.58% +[ Wed Sep 14 21:40:07 2022 ] Top5: 70.06% +[ Wed Sep 14 21:40:07 2022 ] Training epoch: 42 +[ Wed Sep 14 21:40:37 2022 ] Batch(36/243) done. Loss: 0.2036 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 21:41:50 2022 ] Batch(136/243) done. Loss: 0.3632 lr:0.100000 network_time: 0.0331 +[ Wed Sep 14 21:43:03 2022 ] Batch(236/243) done. Loss: 0.3208 lr:0.100000 network_time: 0.0316 +[ Wed Sep 14 21:43:08 2022 ] Eval epoch: 42 +[ Wed Sep 14 21:44:42 2022 ] Mean test loss of 796 batches: 3.3474175930023193. +[ Wed Sep 14 21:44:42 2022 ] Top1: 39.01% +[ Wed Sep 14 21:44:42 2022 ] Top5: 72.59% +[ Wed Sep 14 21:44:43 2022 ] Training epoch: 43 +[ Wed Sep 14 21:45:54 2022 ] Batch(93/243) done. Loss: 0.2545 lr:0.100000 network_time: 0.0256 +[ Wed Sep 14 21:47:07 2022 ] Batch(193/243) done. Loss: 0.3229 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 21:47:43 2022 ] Eval epoch: 43 +[ Wed Sep 14 21:49:17 2022 ] Mean test loss of 796 batches: 3.7657318115234375. +[ Wed Sep 14 21:49:17 2022 ] Top1: 37.50% +[ Wed Sep 14 21:49:17 2022 ] Top5: 69.66% +[ Wed Sep 14 21:49:18 2022 ] Training epoch: 44 +[ Wed Sep 14 21:49:58 2022 ] Batch(50/243) done. Loss: 0.1688 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 21:51:11 2022 ] Batch(150/243) done. Loss: 0.2368 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 21:52:18 2022 ] Eval epoch: 44 +[ Wed Sep 14 21:53:52 2022 ] Mean test loss of 796 batches: 3.6248888969421387. +[ Wed Sep 14 21:53:52 2022 ] Top1: 38.95% +[ Wed Sep 14 21:53:52 2022 ] Top5: 70.46% +[ Wed Sep 14 21:53:53 2022 ] Training epoch: 45 +[ Wed Sep 14 21:54:01 2022 ] Batch(7/243) done. Loss: 0.1919 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 21:55:14 2022 ] Batch(107/243) done. Loss: 0.2056 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 21:56:27 2022 ] Batch(207/243) done. Loss: 0.2504 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 21:56:52 2022 ] Eval epoch: 45 +[ Wed Sep 14 21:58:27 2022 ] Mean test loss of 796 batches: 3.2078866958618164. +[ Wed Sep 14 21:58:27 2022 ] Top1: 39.89% +[ Wed Sep 14 21:58:27 2022 ] Top5: 71.88% +[ Wed Sep 14 21:58:27 2022 ] Training epoch: 46 +[ Wed Sep 14 21:59:18 2022 ] Batch(64/243) done. Loss: 0.3002 lr:0.100000 network_time: 0.0321 +[ Wed Sep 14 22:00:31 2022 ] Batch(164/243) done. Loss: 0.2156 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 22:01:28 2022 ] Eval epoch: 46 +[ Wed Sep 14 22:03:01 2022 ] Mean test loss of 796 batches: 3.4447925090789795. +[ Wed Sep 14 22:03:01 2022 ] Top1: 43.89% +[ Wed Sep 14 22:03:02 2022 ] Top5: 75.43% +[ Wed Sep 14 22:03:02 2022 ] Training epoch: 47 +[ Wed Sep 14 22:03:21 2022 ] Batch(21/243) done. Loss: 0.1526 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 22:04:34 2022 ] Batch(121/243) done. Loss: 0.3723 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 22:05:47 2022 ] Batch(221/243) done. Loss: 0.2064 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 22:06:02 2022 ] Eval epoch: 47 +[ Wed Sep 14 22:07:36 2022 ] Mean test loss of 796 batches: 4.070017337799072. +[ Wed Sep 14 22:07:37 2022 ] Top1: 43.00% +[ Wed Sep 14 22:07:37 2022 ] Top5: 74.75% +[ Wed Sep 14 22:07:38 2022 ] Training epoch: 48 +[ Wed Sep 14 22:08:38 2022 ] Batch(78/243) done. Loss: 0.1903 lr:0.100000 network_time: 0.0329 +[ Wed Sep 14 22:09:51 2022 ] Batch(178/243) done. Loss: 0.3535 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 22:10:38 2022 ] Eval epoch: 48 +[ Wed Sep 14 22:12:12 2022 ] Mean test loss of 796 batches: 3.5496559143066406. +[ Wed Sep 14 22:12:12 2022 ] Top1: 39.57% +[ Wed Sep 14 22:12:12 2022 ] Top5: 71.81% +[ Wed Sep 14 22:12:13 2022 ] Training epoch: 49 +[ Wed Sep 14 22:12:42 2022 ] Batch(35/243) done. Loss: 0.3369 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 22:13:54 2022 ] Batch(135/243) done. Loss: 0.2675 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 22:15:07 2022 ] Batch(235/243) done. Loss: 0.2864 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:15:13 2022 ] Eval epoch: 49 +[ Wed Sep 14 22:16:46 2022 ] Mean test loss of 796 batches: 3.7140417098999023. +[ Wed Sep 14 22:16:47 2022 ] Top1: 38.81% +[ Wed Sep 14 22:16:47 2022 ] Top5: 71.16% +[ Wed Sep 14 22:16:47 2022 ] Training epoch: 50 +[ Wed Sep 14 22:17:58 2022 ] Batch(92/243) done. Loss: 0.2630 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 22:19:11 2022 ] Batch(192/243) done. Loss: 0.2089 lr:0.100000 network_time: 0.0326 +[ Wed Sep 14 22:19:47 2022 ] Eval epoch: 50 +[ Wed Sep 14 22:21:21 2022 ] Mean test loss of 796 batches: 3.919886350631714. +[ Wed Sep 14 22:21:21 2022 ] Top1: 40.21% +[ Wed Sep 14 22:21:22 2022 ] Top5: 73.40% +[ Wed Sep 14 22:21:22 2022 ] Training epoch: 51 +[ Wed Sep 14 22:22:01 2022 ] Batch(49/243) done. Loss: 0.2332 lr:0.100000 network_time: 0.0306 +[ Wed Sep 14 22:23:14 2022 ] Batch(149/243) done. Loss: 0.4120 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:24:22 2022 ] Eval epoch: 51 +[ Wed Sep 14 22:25:56 2022 ] Mean test loss of 796 batches: 3.310202121734619. +[ Wed Sep 14 22:25:56 2022 ] Top1: 45.68% +[ Wed Sep 14 22:25:57 2022 ] Top5: 76.46% +[ Wed Sep 14 22:25:57 2022 ] Training epoch: 52 +[ Wed Sep 14 22:26:05 2022 ] Batch(6/243) done. Loss: 0.1790 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 22:27:18 2022 ] Batch(106/243) done. Loss: 0.1615 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 22:28:31 2022 ] Batch(206/243) done. Loss: 0.2678 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 22:28:57 2022 ] Eval epoch: 52 +[ Wed Sep 14 22:30:31 2022 ] Mean test loss of 796 batches: 3.7315006256103516. +[ Wed Sep 14 22:30:32 2022 ] Top1: 38.94% +[ Wed Sep 14 22:30:32 2022 ] Top5: 71.32% +[ Wed Sep 14 22:30:32 2022 ] Training epoch: 53 +[ Wed Sep 14 22:31:22 2022 ] Batch(63/243) done. Loss: 0.1572 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 22:32:35 2022 ] Batch(163/243) done. Loss: 0.2304 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:33:32 2022 ] Eval epoch: 53 +[ Wed Sep 14 22:35:06 2022 ] Mean test loss of 796 batches: 3.6942200660705566. +[ Wed Sep 14 22:35:07 2022 ] Top1: 42.98% +[ Wed Sep 14 22:35:07 2022 ] Top5: 74.85% +[ Wed Sep 14 22:35:08 2022 ] Training epoch: 54 +[ Wed Sep 14 22:35:26 2022 ] Batch(20/243) done. Loss: 0.1527 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 22:36:39 2022 ] Batch(120/243) done. Loss: 0.1793 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 22:37:52 2022 ] Batch(220/243) done. Loss: 0.2885 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:38:08 2022 ] Eval epoch: 54 +[ Wed Sep 14 22:39:42 2022 ] Mean test loss of 796 batches: 3.254549026489258. +[ Wed Sep 14 22:39:42 2022 ] Top1: 41.90% +[ Wed Sep 14 22:39:43 2022 ] Top5: 72.66% +[ Wed Sep 14 22:39:43 2022 ] Training epoch: 55 +[ Wed Sep 14 22:40:43 2022 ] Batch(77/243) done. Loss: 0.1745 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:41:56 2022 ] Batch(177/243) done. Loss: 0.2407 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 22:42:43 2022 ] Eval epoch: 55 +[ Wed Sep 14 22:44:18 2022 ] Mean test loss of 796 batches: 3.397036075592041. +[ Wed Sep 14 22:44:19 2022 ] Top1: 46.16% +[ Wed Sep 14 22:44:19 2022 ] Top5: 77.40% +[ Wed Sep 14 22:44:19 2022 ] Training epoch: 56 +[ Wed Sep 14 22:44:48 2022 ] Batch(34/243) done. Loss: 0.1203 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 22:46:00 2022 ] Batch(134/243) done. Loss: 0.1849 lr:0.100000 network_time: 0.0397 +[ Wed Sep 14 22:47:13 2022 ] Batch(234/243) done. Loss: 0.1408 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 22:47:19 2022 ] Eval epoch: 56 +[ Wed Sep 14 22:48:53 2022 ] Mean test loss of 796 batches: 3.7120611667633057. +[ Wed Sep 14 22:48:54 2022 ] Top1: 39.24% +[ Wed Sep 14 22:48:54 2022 ] Top5: 71.20% +[ Wed Sep 14 22:48:54 2022 ] Training epoch: 57 +[ Wed Sep 14 22:50:04 2022 ] Batch(91/243) done. Loss: 0.2682 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 22:51:17 2022 ] Batch(191/243) done. Loss: 0.4857 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 22:51:54 2022 ] Eval epoch: 57 +[ Wed Sep 14 22:53:28 2022 ] Mean test loss of 796 batches: 4.152618408203125. +[ Wed Sep 14 22:53:28 2022 ] Top1: 40.22% +[ Wed Sep 14 22:53:29 2022 ] Top5: 73.39% +[ Wed Sep 14 22:53:29 2022 ] Training epoch: 58 +[ Wed Sep 14 22:54:08 2022 ] Batch(48/243) done. Loss: 0.2004 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:55:20 2022 ] Batch(148/243) done. Loss: 0.1205 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 22:56:29 2022 ] Eval epoch: 58 +[ Wed Sep 14 22:58:03 2022 ] Mean test loss of 796 batches: 3.781721353530884. +[ Wed Sep 14 22:58:04 2022 ] Top1: 36.52% +[ Wed Sep 14 22:58:04 2022 ] Top5: 69.00% +[ Wed Sep 14 22:58:04 2022 ] Training epoch: 59 +[ Wed Sep 14 22:58:12 2022 ] Batch(5/243) done. Loss: 0.3946 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 22:59:25 2022 ] Batch(105/243) done. Loss: 0.2194 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 23:00:38 2022 ] Batch(205/243) done. Loss: 0.2770 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 23:01:05 2022 ] Eval epoch: 59 +[ Wed Sep 14 23:02:39 2022 ] Mean test loss of 796 batches: 3.6728172302246094. +[ Wed Sep 14 23:02:40 2022 ] Top1: 42.16% +[ Wed Sep 14 23:02:40 2022 ] Top5: 75.15% +[ Wed Sep 14 23:02:40 2022 ] Training epoch: 60 +[ Wed Sep 14 23:03:29 2022 ] Batch(62/243) done. Loss: 0.2575 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 23:04:42 2022 ] Batch(162/243) done. Loss: 0.2812 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 23:05:40 2022 ] Eval epoch: 60 +[ Wed Sep 14 23:07:15 2022 ] Mean test loss of 796 batches: 3.4849016666412354. +[ Wed Sep 14 23:07:15 2022 ] Top1: 42.90% +[ Wed Sep 14 23:07:16 2022 ] Top5: 75.80% +[ Wed Sep 14 23:07:16 2022 ] Training epoch: 61 +[ Wed Sep 14 23:07:33 2022 ] Batch(19/243) done. Loss: 0.2216 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:08:46 2022 ] Batch(119/243) done. Loss: 0.0335 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:09:59 2022 ] Batch(219/243) done. Loss: 0.0458 lr:0.010000 network_time: 0.0300 +[ Wed Sep 14 23:10:16 2022 ] Eval epoch: 61 +[ Wed Sep 14 23:11:50 2022 ] Mean test loss of 796 batches: 3.100501537322998. +[ Wed Sep 14 23:11:50 2022 ] Top1: 49.91% +[ Wed Sep 14 23:11:51 2022 ] Top5: 81.10% +[ Wed Sep 14 23:11:51 2022 ] Training epoch: 62 +[ Wed Sep 14 23:12:50 2022 ] Batch(76/243) done. Loss: 0.0528 lr:0.010000 network_time: 0.0337 +[ Wed Sep 14 23:14:03 2022 ] Batch(176/243) done. Loss: 0.0395 lr:0.010000 network_time: 0.0262 +[ Wed Sep 14 23:14:51 2022 ] Eval epoch: 62 +[ Wed Sep 14 23:16:25 2022 ] Mean test loss of 796 batches: 2.988133430480957. +[ Wed Sep 14 23:16:26 2022 ] Top1: 50.26% +[ Wed Sep 14 23:16:26 2022 ] Top5: 81.17% +[ Wed Sep 14 23:16:26 2022 ] Training epoch: 63 +[ Wed Sep 14 23:16:54 2022 ] Batch(33/243) done. Loss: 0.0427 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:18:07 2022 ] Batch(133/243) done. Loss: 0.0722 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:19:20 2022 ] Batch(233/243) done. Loss: 0.0502 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:19:27 2022 ] Eval epoch: 63 +[ Wed Sep 14 23:21:01 2022 ] Mean test loss of 796 batches: 2.960383653640747. +[ Wed Sep 14 23:21:01 2022 ] Top1: 50.49% +[ Wed Sep 14 23:21:02 2022 ] Top5: 81.23% +[ Wed Sep 14 23:21:02 2022 ] Training epoch: 64 +[ Wed Sep 14 23:22:11 2022 ] Batch(90/243) done. Loss: 0.0194 lr:0.010000 network_time: 0.0266 +[ Wed Sep 14 23:23:24 2022 ] Batch(190/243) done. Loss: 0.0085 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 23:24:02 2022 ] Eval epoch: 64 +[ Wed Sep 14 23:25:37 2022 ] Mean test loss of 796 batches: 2.9251861572265625. +[ Wed Sep 14 23:25:37 2022 ] Top1: 49.86% +[ Wed Sep 14 23:25:38 2022 ] Top5: 80.80% +[ Wed Sep 14 23:25:38 2022 ] Training epoch: 65 +[ Wed Sep 14 23:26:16 2022 ] Batch(47/243) done. Loss: 0.0262 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 23:27:29 2022 ] Batch(147/243) done. Loss: 0.0334 lr:0.010000 network_time: 0.0302 +[ Wed Sep 14 23:28:38 2022 ] Eval epoch: 65 +[ Wed Sep 14 23:30:13 2022 ] Mean test loss of 796 batches: 3.311321973800659. +[ Wed Sep 14 23:30:13 2022 ] Top1: 50.82% +[ Wed Sep 14 23:30:13 2022 ] Top5: 81.65% +[ Wed Sep 14 23:30:14 2022 ] Training epoch: 66 +[ Wed Sep 14 23:30:20 2022 ] Batch(4/243) done. Loss: 0.0525 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:31:33 2022 ] Batch(104/243) done. Loss: 0.0223 lr:0.010000 network_time: 0.0279 +[ Wed Sep 14 23:32:46 2022 ] Batch(204/243) done. Loss: 0.0726 lr:0.010000 network_time: 0.0264 +[ Wed Sep 14 23:33:14 2022 ] Eval epoch: 66 +[ Wed Sep 14 23:34:48 2022 ] Mean test loss of 796 batches: 3.099126100540161. +[ Wed Sep 14 23:34:48 2022 ] Top1: 50.70% +[ Wed Sep 14 23:34:49 2022 ] Top5: 81.42% +[ Wed Sep 14 23:34:49 2022 ] Training epoch: 67 +[ Wed Sep 14 23:35:37 2022 ] Batch(61/243) done. Loss: 0.0213 lr:0.010000 network_time: 0.0261 +[ Wed Sep 14 23:36:50 2022 ] Batch(161/243) done. Loss: 0.0166 lr:0.010000 network_time: 0.0277 +[ Wed Sep 14 23:37:49 2022 ] Eval epoch: 67 +[ Wed Sep 14 23:39:23 2022 ] Mean test loss of 796 batches: 2.9924604892730713. +[ Wed Sep 14 23:39:23 2022 ] Top1: 50.13% +[ Wed Sep 14 23:39:24 2022 ] Top5: 81.19% +[ Wed Sep 14 23:39:24 2022 ] Training epoch: 68 +[ Wed Sep 14 23:39:40 2022 ] Batch(18/243) done. Loss: 0.0176 lr:0.010000 network_time: 0.0314 +[ Wed Sep 14 23:40:53 2022 ] Batch(118/243) done. Loss: 0.0206 lr:0.010000 network_time: 0.0297 +[ Wed Sep 14 23:42:06 2022 ] Batch(218/243) done. Loss: 0.0430 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 23:42:24 2022 ] Eval epoch: 68 +[ Wed Sep 14 23:43:58 2022 ] Mean test loss of 796 batches: 3.0478153228759766. +[ Wed Sep 14 23:43:58 2022 ] Top1: 49.25% +[ Wed Sep 14 23:43:58 2022 ] Top5: 80.62% +[ Wed Sep 14 23:43:58 2022 ] Training epoch: 69 +[ Wed Sep 14 23:44:57 2022 ] Batch(75/243) done. Loss: 0.0174 lr:0.010000 network_time: 0.0304 +[ Wed Sep 14 23:46:10 2022 ] Batch(175/243) done. Loss: 0.0297 lr:0.010000 network_time: 0.0259 +[ Wed Sep 14 23:46:59 2022 ] Eval epoch: 69 +[ Wed Sep 14 23:48:33 2022 ] Mean test loss of 796 batches: 2.941301107406616. +[ Wed Sep 14 23:48:33 2022 ] Top1: 50.33% +[ Wed Sep 14 23:48:33 2022 ] Top5: 81.09% +[ Wed Sep 14 23:48:34 2022 ] Training epoch: 70 +[ Wed Sep 14 23:49:00 2022 ] Batch(32/243) done. Loss: 0.0141 lr:0.010000 network_time: 0.0259 +[ Wed Sep 14 23:50:13 2022 ] Batch(132/243) done. Loss: 0.0178 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 23:51:26 2022 ] Batch(232/243) done. Loss: 0.0090 lr:0.010000 network_time: 0.0488 +[ Wed Sep 14 23:51:34 2022 ] Eval epoch: 70 +[ Wed Sep 14 23:53:08 2022 ] Mean test loss of 796 batches: 3.021989107131958. +[ Wed Sep 14 23:53:08 2022 ] Top1: 50.85% +[ Wed Sep 14 23:53:09 2022 ] Top5: 81.28% +[ Wed Sep 14 23:53:09 2022 ] Training epoch: 71 +[ Wed Sep 14 23:54:17 2022 ] Batch(89/243) done. Loss: 0.0106 lr:0.010000 network_time: 0.0276 +[ Wed Sep 14 23:55:30 2022 ] Batch(189/243) done. Loss: 0.0188 lr:0.010000 network_time: 0.0271 +[ Wed Sep 14 23:56:09 2022 ] Eval epoch: 71 +[ Wed Sep 14 23:57:43 2022 ] Mean test loss of 796 batches: 3.085793972015381. +[ Wed Sep 14 23:57:43 2022 ] Top1: 47.16% +[ Wed Sep 14 23:57:43 2022 ] Top5: 79.04% +[ Wed Sep 14 23:57:44 2022 ] Training epoch: 72 +[ Wed Sep 14 23:58:21 2022 ] Batch(46/243) done. Loss: 0.0133 lr:0.010000 network_time: 0.0310 +[ Wed Sep 14 23:59:34 2022 ] Batch(146/243) done. Loss: 0.0632 lr:0.010000 network_time: 0.0265 +[ Thu Sep 15 00:00:44 2022 ] Eval epoch: 72 +[ Thu Sep 15 00:02:17 2022 ] Mean test loss of 796 batches: 2.990631580352783. +[ Thu Sep 15 00:02:18 2022 ] Top1: 50.22% +[ Thu Sep 15 00:02:18 2022 ] Top5: 81.02% +[ Thu Sep 15 00:02:18 2022 ] Training epoch: 73 +[ Thu Sep 15 00:02:24 2022 ] Batch(3/243) done. Loss: 0.0106 lr:0.010000 network_time: 0.0264 +[ Thu Sep 15 00:03:37 2022 ] Batch(103/243) done. Loss: 0.0126 lr:0.010000 network_time: 0.0268 +[ Thu Sep 15 00:04:50 2022 ] Batch(203/243) done. Loss: 0.0205 lr:0.010000 network_time: 0.0325 +[ Thu Sep 15 00:05:19 2022 ] Eval epoch: 73 +[ Thu Sep 15 00:06:52 2022 ] Mean test loss of 796 batches: 3.108754873275757. +[ Thu Sep 15 00:06:53 2022 ] Top1: 50.36% +[ Thu Sep 15 00:06:53 2022 ] Top5: 81.13% +[ Thu Sep 15 00:06:54 2022 ] Training epoch: 74 +[ Thu Sep 15 00:07:41 2022 ] Batch(60/243) done. Loss: 0.0145 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 00:08:54 2022 ] Batch(160/243) done. Loss: 0.0107 lr:0.010000 network_time: 0.0300 +[ Thu Sep 15 00:09:54 2022 ] Eval epoch: 74 +[ Thu Sep 15 00:11:28 2022 ] Mean test loss of 796 batches: 3.3555028438568115. +[ Thu Sep 15 00:11:28 2022 ] Top1: 51.40% +[ Thu Sep 15 00:11:29 2022 ] Top5: 81.48% +[ Thu Sep 15 00:11:29 2022 ] Training epoch: 75 +[ Thu Sep 15 00:11:45 2022 ] Batch(17/243) done. Loss: 0.0042 lr:0.010000 network_time: 0.0271 +[ Thu Sep 15 00:12:58 2022 ] Batch(117/243) done. Loss: 0.0155 lr:0.010000 network_time: 0.0277 +[ Thu Sep 15 00:14:10 2022 ] Batch(217/243) done. Loss: 0.0068 lr:0.010000 network_time: 0.0319 +[ Thu Sep 15 00:14:29 2022 ] Eval epoch: 75 +[ Thu Sep 15 00:16:03 2022 ] Mean test loss of 796 batches: 3.2424283027648926. +[ Thu Sep 15 00:16:03 2022 ] Top1: 50.99% +[ Thu Sep 15 00:16:03 2022 ] Top5: 81.46% +[ Thu Sep 15 00:16:04 2022 ] Training epoch: 76 +[ Thu Sep 15 00:17:01 2022 ] Batch(74/243) done. Loss: 0.0112 lr:0.010000 network_time: 0.0265 +[ Thu Sep 15 00:18:14 2022 ] Batch(174/243) done. Loss: 0.0177 lr:0.010000 network_time: 0.0265 +[ Thu Sep 15 00:19:04 2022 ] Eval epoch: 76 +[ Thu Sep 15 00:20:38 2022 ] Mean test loss of 796 batches: 2.9780657291412354. +[ Thu Sep 15 00:20:38 2022 ] Top1: 50.22% +[ Thu Sep 15 00:20:39 2022 ] Top5: 80.99% +[ Thu Sep 15 00:20:39 2022 ] Training epoch: 77 +[ Thu Sep 15 00:21:05 2022 ] Batch(31/243) done. Loss: 0.0079 lr:0.010000 network_time: 0.0268 +[ Thu Sep 15 00:22:18 2022 ] Batch(131/243) done. Loss: 0.0068 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 00:23:31 2022 ] Batch(231/243) done. Loss: 0.0154 lr:0.010000 network_time: 0.0319 +[ Thu Sep 15 00:23:39 2022 ] Eval epoch: 77 +[ Thu Sep 15 00:25:13 2022 ] Mean test loss of 796 batches: 3.0957376956939697. +[ Thu Sep 15 00:25:13 2022 ] Top1: 49.45% +[ Thu Sep 15 00:25:14 2022 ] Top5: 80.83% +[ Thu Sep 15 00:25:14 2022 ] Training epoch: 78 +[ Thu Sep 15 00:26:22 2022 ] Batch(88/243) done. Loss: 0.0191 lr:0.010000 network_time: 0.0254 +[ Thu Sep 15 00:27:34 2022 ] Batch(188/243) done. Loss: 0.0111 lr:0.010000 network_time: 0.0311 +[ Thu Sep 15 00:28:14 2022 ] Eval epoch: 78 +[ Thu Sep 15 00:29:48 2022 ] Mean test loss of 796 batches: 3.047116994857788. +[ Thu Sep 15 00:29:49 2022 ] Top1: 50.42% +[ Thu Sep 15 00:29:49 2022 ] Top5: 81.01% +[ Thu Sep 15 00:29:49 2022 ] Training epoch: 79 +[ Thu Sep 15 00:30:26 2022 ] Batch(45/243) done. Loss: 0.0089 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 00:31:39 2022 ] Batch(145/243) done. Loss: 0.0070 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 00:32:49 2022 ] Eval epoch: 79 +[ Thu Sep 15 00:34:23 2022 ] Mean test loss of 796 batches: 3.4280073642730713. +[ Thu Sep 15 00:34:24 2022 ] Top1: 50.84% +[ Thu Sep 15 00:34:24 2022 ] Top5: 81.25% +[ Thu Sep 15 00:34:24 2022 ] Training epoch: 80 +[ Thu Sep 15 00:34:29 2022 ] Batch(2/243) done. Loss: 0.0102 lr:0.010000 network_time: 0.0335 +[ Thu Sep 15 00:35:42 2022 ] Batch(102/243) done. Loss: 0.0069 lr:0.010000 network_time: 0.0271 +[ Thu Sep 15 00:36:55 2022 ] Batch(202/243) done. Loss: 0.0106 lr:0.010000 network_time: 0.0268 +[ Thu Sep 15 00:37:25 2022 ] Eval epoch: 80 +[ Thu Sep 15 00:38:59 2022 ] Mean test loss of 796 batches: 3.1443278789520264. +[ Thu Sep 15 00:38:59 2022 ] Top1: 50.66% +[ Thu Sep 15 00:38:59 2022 ] Top5: 81.21% +[ Thu Sep 15 00:39:00 2022 ] Training epoch: 81 +[ Thu Sep 15 00:39:46 2022 ] Batch(59/243) done. Loss: 0.0107 lr:0.001000 network_time: 0.0325 +[ Thu Sep 15 00:40:59 2022 ] Batch(159/243) done. Loss: 0.0050 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 00:42:00 2022 ] Eval epoch: 81 +[ Thu Sep 15 00:43:33 2022 ] Mean test loss of 796 batches: 3.0960147380828857. +[ Thu Sep 15 00:43:34 2022 ] Top1: 50.55% +[ Thu Sep 15 00:43:34 2022 ] Top5: 81.34% +[ Thu Sep 15 00:43:34 2022 ] Training epoch: 82 +[ Thu Sep 15 00:43:49 2022 ] Batch(16/243) done. Loss: 0.0087 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 00:45:02 2022 ] Batch(116/243) done. Loss: 0.0053 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 00:46:15 2022 ] Batch(216/243) done. Loss: 0.0135 lr:0.001000 network_time: 0.0360 +[ Thu Sep 15 00:46:34 2022 ] Eval epoch: 82 +[ Thu Sep 15 00:48:08 2022 ] Mean test loss of 796 batches: 3.263976812362671. +[ Thu Sep 15 00:48:08 2022 ] Top1: 50.82% +[ Thu Sep 15 00:48:09 2022 ] Top5: 81.38% +[ Thu Sep 15 00:48:09 2022 ] Training epoch: 83 +[ Thu Sep 15 00:49:06 2022 ] Batch(73/243) done. Loss: 0.0114 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 00:50:19 2022 ] Batch(173/243) done. Loss: 0.0032 lr:0.001000 network_time: 0.0356 +[ Thu Sep 15 00:51:09 2022 ] Eval epoch: 83 +[ Thu Sep 15 00:52:43 2022 ] Mean test loss of 796 batches: 3.1671903133392334. +[ Thu Sep 15 00:52:44 2022 ] Top1: 51.15% +[ Thu Sep 15 00:52:44 2022 ] Top5: 81.39% +[ Thu Sep 15 00:52:44 2022 ] Training epoch: 84 +[ Thu Sep 15 00:53:10 2022 ] Batch(30/243) done. Loss: 0.0066 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 00:54:22 2022 ] Batch(130/243) done. Loss: 0.0092 lr:0.001000 network_time: 0.0322 +[ Thu Sep 15 00:55:35 2022 ] Batch(230/243) done. Loss: 0.0094 lr:0.001000 network_time: 0.0291 +[ Thu Sep 15 00:55:44 2022 ] Eval epoch: 84 +[ Thu Sep 15 00:57:18 2022 ] Mean test loss of 796 batches: 3.1958792209625244. +[ Thu Sep 15 00:57:19 2022 ] Top1: 51.77% +[ Thu Sep 15 00:57:19 2022 ] Top5: 81.87% +[ Thu Sep 15 00:57:19 2022 ] Training epoch: 85 +[ Thu Sep 15 00:58:26 2022 ] Batch(87/243) done. Loss: 0.0129 lr:0.001000 network_time: 0.0319 +[ Thu Sep 15 00:59:39 2022 ] Batch(187/243) done. Loss: 0.0328 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:00:19 2022 ] Eval epoch: 85 +[ Thu Sep 15 01:01:53 2022 ] Mean test loss of 796 batches: 3.0967118740081787. +[ Thu Sep 15 01:01:53 2022 ] Top1: 51.37% +[ Thu Sep 15 01:01:53 2022 ] Top5: 81.54% +[ Thu Sep 15 01:01:54 2022 ] Training epoch: 86 +[ Thu Sep 15 01:02:29 2022 ] Batch(44/243) done. Loss: 0.0091 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 01:03:42 2022 ] Batch(144/243) done. Loss: 0.0084 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 01:04:54 2022 ] Eval epoch: 86 +[ Thu Sep 15 01:06:28 2022 ] Mean test loss of 796 batches: 3.108445644378662. +[ Thu Sep 15 01:06:28 2022 ] Top1: 51.18% +[ Thu Sep 15 01:06:28 2022 ] Top5: 81.47% +[ Thu Sep 15 01:06:29 2022 ] Training epoch: 87 +[ Thu Sep 15 01:06:33 2022 ] Batch(1/243) done. Loss: 0.0121 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 01:07:46 2022 ] Batch(101/243) done. Loss: 0.0061 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 01:08:59 2022 ] Batch(201/243) done. Loss: 0.0068 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 01:09:29 2022 ] Eval epoch: 87 +[ Thu Sep 15 01:11:03 2022 ] Mean test loss of 796 batches: 3.0967729091644287. +[ Thu Sep 15 01:11:03 2022 ] Top1: 48.86% +[ Thu Sep 15 01:11:04 2022 ] Top5: 79.71% +[ Thu Sep 15 01:11:04 2022 ] Training epoch: 88 +[ Thu Sep 15 01:11:50 2022 ] Batch(58/243) done. Loss: 0.0042 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 01:13:03 2022 ] Batch(158/243) done. Loss: 0.0111 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 01:14:04 2022 ] Eval epoch: 88 +[ Thu Sep 15 01:15:38 2022 ] Mean test loss of 796 batches: 3.0789506435394287. +[ Thu Sep 15 01:15:38 2022 ] Top1: 50.65% +[ Thu Sep 15 01:15:39 2022 ] Top5: 81.25% +[ Thu Sep 15 01:15:39 2022 ] Training epoch: 89 +[ Thu Sep 15 01:15:53 2022 ] Batch(15/243) done. Loss: 0.0034 lr:0.001000 network_time: 0.0330 +[ Thu Sep 15 01:17:06 2022 ] Batch(115/243) done. Loss: 0.0077 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 01:18:19 2022 ] Batch(215/243) done. Loss: 0.0049 lr:0.001000 network_time: 0.0344 +[ Thu Sep 15 01:18:39 2022 ] Eval epoch: 89 +[ Thu Sep 15 01:20:12 2022 ] Mean test loss of 796 batches: 3.1689484119415283. +[ Thu Sep 15 01:20:13 2022 ] Top1: 50.95% +[ Thu Sep 15 01:20:13 2022 ] Top5: 81.37% +[ Thu Sep 15 01:20:13 2022 ] Training epoch: 90 +[ Thu Sep 15 01:21:09 2022 ] Batch(72/243) done. Loss: 0.0071 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 01:22:22 2022 ] Batch(172/243) done. Loss: 0.0092 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 01:23:14 2022 ] Eval epoch: 90 +[ Thu Sep 15 01:24:47 2022 ] Mean test loss of 796 batches: 3.1945176124572754. +[ Thu Sep 15 01:24:48 2022 ] Top1: 50.79% +[ Thu Sep 15 01:24:48 2022 ] Top5: 81.26% +[ Thu Sep 15 01:24:48 2022 ] Training epoch: 91 +[ Thu Sep 15 01:25:13 2022 ] Batch(29/243) done. Loss: 0.0064 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 01:26:26 2022 ] Batch(129/243) done. Loss: 0.0096 lr:0.001000 network_time: 0.0309 +[ Thu Sep 15 01:27:39 2022 ] Batch(229/243) done. Loss: 0.0052 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 01:27:48 2022 ] Eval epoch: 91 +[ Thu Sep 15 01:29:22 2022 ] Mean test loss of 796 batches: 3.0675299167633057. +[ Thu Sep 15 01:29:22 2022 ] Top1: 50.65% +[ Thu Sep 15 01:29:23 2022 ] Top5: 81.31% +[ Thu Sep 15 01:29:23 2022 ] Training epoch: 92 +[ Thu Sep 15 01:30:29 2022 ] Batch(86/243) done. Loss: 0.0084 lr:0.001000 network_time: 0.0284 +[ Thu Sep 15 01:31:42 2022 ] Batch(186/243) done. Loss: 0.0071 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 01:32:23 2022 ] Eval epoch: 92 +[ Thu Sep 15 01:33:57 2022 ] Mean test loss of 796 batches: 3.313861846923828. +[ Thu Sep 15 01:33:57 2022 ] Top1: 51.27% +[ Thu Sep 15 01:33:58 2022 ] Top5: 81.65% +[ Thu Sep 15 01:33:58 2022 ] Training epoch: 93 +[ Thu Sep 15 01:34:32 2022 ] Batch(43/243) done. Loss: 0.0032 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 01:35:45 2022 ] Batch(143/243) done. Loss: 0.0093 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 01:36:58 2022 ] Eval epoch: 93 +[ Thu Sep 15 01:38:31 2022 ] Mean test loss of 796 batches: 3.305858612060547. +[ Thu Sep 15 01:38:32 2022 ] Top1: 51.20% +[ Thu Sep 15 01:38:32 2022 ] Top5: 81.47% +[ Thu Sep 15 01:38:32 2022 ] Training epoch: 94 +[ Thu Sep 15 01:38:36 2022 ] Batch(0/243) done. Loss: 0.0093 lr:0.001000 network_time: 0.0681 +[ Thu Sep 15 01:39:49 2022 ] Batch(100/243) done. Loss: 0.0072 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 01:41:02 2022 ] Batch(200/243) done. Loss: 0.0085 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 01:41:33 2022 ] Eval epoch: 94 +[ Thu Sep 15 01:43:06 2022 ] Mean test loss of 796 batches: 3.0634562969207764. +[ Thu Sep 15 01:43:06 2022 ] Top1: 46.61% +[ Thu Sep 15 01:43:07 2022 ] Top5: 78.71% +[ Thu Sep 15 01:43:07 2022 ] Training epoch: 95 +[ Thu Sep 15 01:43:52 2022 ] Batch(57/243) done. Loss: 0.0061 lr:0.001000 network_time: 0.0297 +[ Thu Sep 15 01:45:05 2022 ] Batch(157/243) done. Loss: 0.0087 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:46:07 2022 ] Eval epoch: 95 +[ Thu Sep 15 01:47:41 2022 ] Mean test loss of 796 batches: 3.1093084812164307. +[ Thu Sep 15 01:47:41 2022 ] Top1: 51.30% +[ Thu Sep 15 01:47:41 2022 ] Top5: 81.50% +[ Thu Sep 15 01:47:41 2022 ] Training epoch: 96 +[ Thu Sep 15 01:47:55 2022 ] Batch(14/243) done. Loss: 0.0107 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 01:49:08 2022 ] Batch(114/243) done. Loss: 0.0090 lr:0.001000 network_time: 0.0309 +[ Thu Sep 15 01:50:21 2022 ] Batch(214/243) done. Loss: 0.0100 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 01:50:41 2022 ] Eval epoch: 96 +[ Thu Sep 15 01:52:15 2022 ] Mean test loss of 796 batches: 3.2472262382507324. +[ Thu Sep 15 01:52:15 2022 ] Top1: 50.70% +[ Thu Sep 15 01:52:15 2022 ] Top5: 81.35% +[ Thu Sep 15 01:52:16 2022 ] Training epoch: 97 +[ Thu Sep 15 01:53:11 2022 ] Batch(71/243) done. Loss: 0.0099 lr:0.001000 network_time: 0.0254 +[ Thu Sep 15 01:54:24 2022 ] Batch(171/243) done. Loss: 0.0050 lr:0.001000 network_time: 0.0284 +[ Thu Sep 15 01:55:16 2022 ] Eval epoch: 97 +[ Thu Sep 15 01:56:49 2022 ] Mean test loss of 796 batches: 3.1731739044189453. +[ Thu Sep 15 01:56:50 2022 ] Top1: 50.67% +[ Thu Sep 15 01:56:50 2022 ] Top5: 81.30% +[ Thu Sep 15 01:56:50 2022 ] Training epoch: 98 +[ Thu Sep 15 01:57:14 2022 ] Batch(28/243) done. Loss: 0.0051 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 01:58:27 2022 ] Batch(128/243) done. Loss: 0.0056 lr:0.001000 network_time: 0.0336 +[ Thu Sep 15 01:59:40 2022 ] Batch(228/243) done. Loss: 0.0081 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 01:59:50 2022 ] Eval epoch: 98 +[ Thu Sep 15 02:01:24 2022 ] Mean test loss of 796 batches: 3.215578556060791. +[ Thu Sep 15 02:01:25 2022 ] Top1: 51.38% +[ Thu Sep 15 02:01:25 2022 ] Top5: 81.51% +[ Thu Sep 15 02:01:26 2022 ] Training epoch: 99 +[ Thu Sep 15 02:02:31 2022 ] Batch(85/243) done. Loss: 0.0067 lr:0.001000 network_time: 0.0292 +[ Thu Sep 15 02:03:44 2022 ] Batch(185/243) done. Loss: 0.0486 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 02:04:26 2022 ] Eval epoch: 99 +[ Thu Sep 15 02:05:59 2022 ] Mean test loss of 796 batches: 3.295635223388672. +[ Thu Sep 15 02:06:00 2022 ] Top1: 50.86% +[ Thu Sep 15 02:06:00 2022 ] Top5: 81.37% +[ Thu Sep 15 02:06:00 2022 ] Training epoch: 100 +[ Thu Sep 15 02:06:35 2022 ] Batch(42/243) done. Loss: 0.0102 lr:0.001000 network_time: 0.0342 +[ Thu Sep 15 02:07:48 2022 ] Batch(142/243) done. Loss: 0.0025 lr:0.001000 network_time: 0.0255 +[ Thu Sep 15 02:09:01 2022 ] Batch(242/243) done. Loss: 0.0066 lr:0.001000 network_time: 0.0281 +[ Thu Sep 15 02:09:01 2022 ] Eval epoch: 100 +[ Thu Sep 15 02:10:35 2022 ] Mean test loss of 796 batches: 3.2508275508880615. +[ Thu Sep 15 02:10:35 2022 ] Top1: 51.27% +[ Thu Sep 15 02:10:36 2022 ] Top5: 81.60% +[ Thu Sep 15 02:10:36 2022 ] Training epoch: 101 +[ Thu Sep 15 02:11:52 2022 ] Batch(99/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 02:13:05 2022 ] Batch(199/243) done. Loss: 0.0052 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 02:13:36 2022 ] Eval epoch: 101 +[ Thu Sep 15 02:15:10 2022 ] Mean test loss of 796 batches: 3.3487186431884766. +[ Thu Sep 15 02:15:10 2022 ] Top1: 51.59% +[ Thu Sep 15 02:15:10 2022 ] Top5: 81.67% +[ Thu Sep 15 02:15:11 2022 ] Training epoch: 102 +[ Thu Sep 15 02:15:55 2022 ] Batch(56/243) done. Loss: 0.0076 lr:0.000100 network_time: 0.0260 +[ Thu Sep 15 02:17:08 2022 ] Batch(156/243) done. Loss: 0.0121 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 02:18:11 2022 ] Eval epoch: 102 +[ Thu Sep 15 02:19:44 2022 ] Mean test loss of 796 batches: 3.118297576904297. +[ Thu Sep 15 02:19:45 2022 ] Top1: 51.20% +[ Thu Sep 15 02:19:45 2022 ] Top5: 81.58% +[ Thu Sep 15 02:19:45 2022 ] Training epoch: 103 +[ Thu Sep 15 02:19:58 2022 ] Batch(13/243) done. Loss: 0.0033 lr:0.000100 network_time: 0.0313 +[ Thu Sep 15 02:21:11 2022 ] Batch(113/243) done. Loss: 0.0070 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 02:22:24 2022 ] Batch(213/243) done. Loss: 0.0042 lr:0.000100 network_time: 0.0308 +[ Thu Sep 15 02:22:45 2022 ] Eval epoch: 103 +[ Thu Sep 15 02:24:19 2022 ] Mean test loss of 796 batches: 3.057490825653076. +[ Thu Sep 15 02:24:19 2022 ] Top1: 49.14% +[ Thu Sep 15 02:24:20 2022 ] Top5: 80.00% +[ Thu Sep 15 02:24:20 2022 ] Training epoch: 104 +[ Thu Sep 15 02:25:15 2022 ] Batch(70/243) done. Loss: 0.0144 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 02:26:27 2022 ] Batch(170/243) done. Loss: 0.0054 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 02:27:20 2022 ] Eval epoch: 104 +[ Thu Sep 15 02:28:53 2022 ] Mean test loss of 796 batches: 3.3775713443756104. +[ Thu Sep 15 02:28:54 2022 ] Top1: 51.52% +[ Thu Sep 15 02:28:54 2022 ] Top5: 81.56% +[ Thu Sep 15 02:28:54 2022 ] Training epoch: 105 +[ Thu Sep 15 02:29:18 2022 ] Batch(27/243) done. Loss: 0.0240 lr:0.000100 network_time: 0.0318 +[ Thu Sep 15 02:30:30 2022 ] Batch(127/243) done. Loss: 0.0145 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 02:31:43 2022 ] Batch(227/243) done. Loss: 0.0197 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 02:31:54 2022 ] Eval epoch: 105 +[ Thu Sep 15 02:33:28 2022 ] Mean test loss of 796 batches: 3.1972925662994385. +[ Thu Sep 15 02:33:28 2022 ] Top1: 50.32% +[ Thu Sep 15 02:33:29 2022 ] Top5: 81.15% +[ Thu Sep 15 02:33:29 2022 ] Training epoch: 106 +[ Thu Sep 15 02:34:34 2022 ] Batch(84/243) done. Loss: 0.0108 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 02:35:47 2022 ] Batch(184/243) done. Loss: 0.0083 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 02:36:29 2022 ] Eval epoch: 106 +[ Thu Sep 15 02:38:02 2022 ] Mean test loss of 796 batches: 3.2501347064971924. +[ Thu Sep 15 02:38:03 2022 ] Top1: 51.36% +[ Thu Sep 15 02:38:03 2022 ] Top5: 81.57% +[ Thu Sep 15 02:38:03 2022 ] Training epoch: 107 +[ Thu Sep 15 02:38:36 2022 ] Batch(41/243) done. Loss: 0.0084 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 02:39:49 2022 ] Batch(141/243) done. Loss: 0.0061 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 02:41:02 2022 ] Batch(241/243) done. Loss: 0.0130 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 02:41:03 2022 ] Eval epoch: 107 +[ Thu Sep 15 02:42:37 2022 ] Mean test loss of 796 batches: 2.9681849479675293. +[ Thu Sep 15 02:42:37 2022 ] Top1: 50.28% +[ Thu Sep 15 02:42:38 2022 ] Top5: 81.15% +[ Thu Sep 15 02:42:38 2022 ] Training epoch: 108 +[ Thu Sep 15 02:43:53 2022 ] Batch(98/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 02:45:06 2022 ] Batch(198/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0259 +[ Thu Sep 15 02:45:38 2022 ] Eval epoch: 108 +[ Thu Sep 15 02:47:12 2022 ] Mean test loss of 796 batches: 3.1740822792053223. +[ Thu Sep 15 02:47:12 2022 ] Top1: 50.97% +[ Thu Sep 15 02:47:13 2022 ] Top5: 81.63% +[ Thu Sep 15 02:47:13 2022 ] Training epoch: 109 +[ Thu Sep 15 02:47:56 2022 ] Batch(55/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 02:49:09 2022 ] Batch(155/243) done. Loss: 0.0042 lr:0.000100 network_time: 0.0309 +[ Thu Sep 15 02:50:13 2022 ] Eval epoch: 109 +[ Thu Sep 15 02:51:47 2022 ] Mean test loss of 796 batches: 3.140623092651367. +[ Thu Sep 15 02:51:47 2022 ] Top1: 50.99% +[ Thu Sep 15 02:51:48 2022 ] Top5: 81.59% +[ Thu Sep 15 02:51:48 2022 ] Training epoch: 110 +[ Thu Sep 15 02:52:00 2022 ] Batch(12/243) done. Loss: 0.0076 lr:0.000100 network_time: 0.0300 +[ Thu Sep 15 02:53:13 2022 ] Batch(112/243) done. Loss: 0.0082 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 02:54:26 2022 ] Batch(212/243) done. Loss: 0.0047 lr:0.000100 network_time: 0.0286 +[ Thu Sep 15 02:54:48 2022 ] Eval epoch: 110 +[ Thu Sep 15 02:56:23 2022 ] Mean test loss of 796 batches: 3.0745203495025635. +[ Thu Sep 15 02:56:23 2022 ] Top1: 48.30% +[ Thu Sep 15 02:56:24 2022 ] Top5: 79.79% +[ Thu Sep 15 02:56:24 2022 ] Training epoch: 111 +[ Thu Sep 15 02:57:18 2022 ] Batch(69/243) done. Loss: 0.0094 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 02:58:30 2022 ] Batch(169/243) done. Loss: 0.0085 lr:0.000100 network_time: 0.0260 +[ Thu Sep 15 02:59:24 2022 ] Eval epoch: 111 +[ Thu Sep 15 03:00:57 2022 ] Mean test loss of 796 batches: 3.1857941150665283. +[ Thu Sep 15 03:00:58 2022 ] Top1: 51.34% +[ Thu Sep 15 03:00:58 2022 ] Top5: 81.53% +[ Thu Sep 15 03:00:58 2022 ] Training epoch: 112 +[ Thu Sep 15 03:01:21 2022 ] Batch(26/243) done. Loss: 0.0065 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:02:33 2022 ] Batch(126/243) done. Loss: 0.0094 lr:0.000100 network_time: 0.0304 +[ Thu Sep 15 03:03:46 2022 ] Batch(226/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 03:03:58 2022 ] Eval epoch: 112 +[ Thu Sep 15 03:05:32 2022 ] Mean test loss of 796 batches: 3.084721088409424. +[ Thu Sep 15 03:05:32 2022 ] Top1: 51.01% +[ Thu Sep 15 03:05:32 2022 ] Top5: 81.38% +[ Thu Sep 15 03:05:33 2022 ] Training epoch: 113 +[ Thu Sep 15 03:06:37 2022 ] Batch(83/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:07:50 2022 ] Batch(183/243) done. Loss: 0.0037 lr:0.000100 network_time: 0.0289 +[ Thu Sep 15 03:08:33 2022 ] Eval epoch: 113 +[ Thu Sep 15 03:10:06 2022 ] Mean test loss of 796 batches: 3.158820867538452. +[ Thu Sep 15 03:10:06 2022 ] Top1: 50.64% +[ Thu Sep 15 03:10:07 2022 ] Top5: 81.21% +[ Thu Sep 15 03:10:07 2022 ] Training epoch: 114 +[ Thu Sep 15 03:10:40 2022 ] Batch(40/243) done. Loss: 0.0080 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 03:11:53 2022 ] Batch(140/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 03:13:06 2022 ] Batch(240/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:13:07 2022 ] Eval epoch: 114 +[ Thu Sep 15 03:14:41 2022 ] Mean test loss of 796 batches: 3.095377206802368. +[ Thu Sep 15 03:14:41 2022 ] Top1: 51.16% +[ Thu Sep 15 03:14:42 2022 ] Top5: 81.58% +[ Thu Sep 15 03:14:42 2022 ] Training epoch: 115 +[ Thu Sep 15 03:15:56 2022 ] Batch(97/243) done. Loss: 0.0078 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 03:17:09 2022 ] Batch(197/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0259 +[ Thu Sep 15 03:17:42 2022 ] Eval epoch: 115 +[ Thu Sep 15 03:19:16 2022 ] Mean test loss of 796 batches: 3.1977145671844482. +[ Thu Sep 15 03:19:17 2022 ] Top1: 51.42% +[ Thu Sep 15 03:19:17 2022 ] Top5: 81.56% +[ Thu Sep 15 03:19:17 2022 ] Training epoch: 116 +[ Thu Sep 15 03:20:00 2022 ] Batch(54/243) done. Loss: 0.0090 lr:0.000100 network_time: 0.0260 +[ Thu Sep 15 03:21:13 2022 ] Batch(154/243) done. Loss: 0.0051 lr:0.000100 network_time: 0.0450 +[ Thu Sep 15 03:22:17 2022 ] Eval epoch: 116 +[ Thu Sep 15 03:23:51 2022 ] Mean test loss of 796 batches: 2.949658155441284. +[ Thu Sep 15 03:23:51 2022 ] Top1: 51.18% +[ Thu Sep 15 03:23:51 2022 ] Top5: 81.60% +[ Thu Sep 15 03:23:52 2022 ] Training epoch: 117 +[ Thu Sep 15 03:24:03 2022 ] Batch(11/243) done. Loss: 0.0100 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 03:25:16 2022 ] Batch(111/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:26:29 2022 ] Batch(211/243) done. Loss: 0.0068 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 03:26:52 2022 ] Eval epoch: 117 +[ Thu Sep 15 03:28:25 2022 ] Mean test loss of 796 batches: 3.296842575073242. +[ Thu Sep 15 03:28:26 2022 ] Top1: 51.69% +[ Thu Sep 15 03:28:26 2022 ] Top5: 81.48% +[ Thu Sep 15 03:28:27 2022 ] Training epoch: 118 +[ Thu Sep 15 03:29:20 2022 ] Batch(68/243) done. Loss: 0.0049 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 03:30:33 2022 ] Batch(168/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0320 +[ Thu Sep 15 03:31:27 2022 ] Eval epoch: 118 +[ Thu Sep 15 03:33:00 2022 ] Mean test loss of 796 batches: 3.214509963989258. +[ Thu Sep 15 03:33:01 2022 ] Top1: 49.90% +[ Thu Sep 15 03:33:01 2022 ] Top5: 80.65% +[ Thu Sep 15 03:33:01 2022 ] Training epoch: 119 +[ Thu Sep 15 03:33:23 2022 ] Batch(25/243) done. Loss: 0.0057 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 03:34:36 2022 ] Batch(125/243) done. Loss: 0.0098 lr:0.000100 network_time: 0.0308 +[ Thu Sep 15 03:35:49 2022 ] Batch(225/243) done. Loss: 0.0066 lr:0.000100 network_time: 0.0346 +[ Thu Sep 15 03:36:02 2022 ] Eval epoch: 119 +[ Thu Sep 15 03:37:36 2022 ] Mean test loss of 796 batches: 3.0812954902648926. +[ Thu Sep 15 03:37:36 2022 ] Top1: 50.24% +[ Thu Sep 15 03:37:36 2022 ] Top5: 81.20% +[ Thu Sep 15 03:37:37 2022 ] Training epoch: 120 +[ Thu Sep 15 03:38:40 2022 ] Batch(82/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 03:39:53 2022 ] Batch(182/243) done. Loss: 0.0082 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 03:40:37 2022 ] Eval epoch: 120 +[ Thu Sep 15 03:42:10 2022 ] Mean test loss of 796 batches: 3.254603624343872. +[ Thu Sep 15 03:42:10 2022 ] Top1: 51.25% +[ Thu Sep 15 03:42:11 2022 ] Top5: 81.58% +[ Thu Sep 15 03:42:11 2022 ] Training epoch: 121 +[ Thu Sep 15 03:42:43 2022 ] Batch(39/243) done. Loss: 0.0165 lr:0.000100 network_time: 0.0294 +[ Thu Sep 15 03:43:56 2022 ] Batch(139/243) done. Loss: 0.0096 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 03:45:09 2022 ] Batch(239/243) done. Loss: 0.0031 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 03:45:11 2022 ] Eval epoch: 121 +[ Thu Sep 15 03:46:44 2022 ] Mean test loss of 796 batches: 3.079324245452881. +[ Thu Sep 15 03:46:45 2022 ] Top1: 51.25% +[ Thu Sep 15 03:46:45 2022 ] Top5: 81.72% +[ Thu Sep 15 03:46:45 2022 ] Training epoch: 122 +[ Thu Sep 15 03:47:59 2022 ] Batch(96/243) done. Loss: 0.0052 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 03:49:12 2022 ] Batch(196/243) done. Loss: 0.0104 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 03:49:45 2022 ] Eval epoch: 122 +[ Thu Sep 15 03:51:19 2022 ] Mean test loss of 796 batches: 3.0649008750915527. +[ Thu Sep 15 03:51:19 2022 ] Top1: 50.67% +[ Thu Sep 15 03:51:20 2022 ] Top5: 81.14% +[ Thu Sep 15 03:51:20 2022 ] Training epoch: 123 +[ Thu Sep 15 03:52:02 2022 ] Batch(53/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 03:53:15 2022 ] Batch(153/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0296 +[ Thu Sep 15 03:54:20 2022 ] Eval epoch: 123 +[ Thu Sep 15 03:55:54 2022 ] Mean test loss of 796 batches: 3.03798508644104. +[ Thu Sep 15 03:55:54 2022 ] Top1: 50.79% +[ Thu Sep 15 03:55:55 2022 ] Top5: 81.41% +[ Thu Sep 15 03:55:55 2022 ] Training epoch: 124 +[ Thu Sep 15 03:56:06 2022 ] Batch(10/243) done. Loss: 0.0064 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 03:57:18 2022 ] Batch(110/243) done. Loss: 0.0061 lr:0.000100 network_time: 0.0284 +[ Thu Sep 15 03:58:31 2022 ] Batch(210/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 03:58:55 2022 ] Eval epoch: 124 +[ Thu Sep 15 04:00:29 2022 ] Mean test loss of 796 batches: 3.2211194038391113. +[ Thu Sep 15 04:00:29 2022 ] Top1: 51.45% +[ Thu Sep 15 04:00:30 2022 ] Top5: 81.65% +[ Thu Sep 15 04:00:30 2022 ] Training epoch: 125 +[ Thu Sep 15 04:01:22 2022 ] Batch(67/243) done. Loss: 0.0040 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 04:02:35 2022 ] Batch(167/243) done. Loss: 0.0201 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 04:03:30 2022 ] Eval epoch: 125 +[ Thu Sep 15 04:05:03 2022 ] Mean test loss of 796 batches: 3.167156219482422. +[ Thu Sep 15 04:05:04 2022 ] Top1: 48.77% +[ Thu Sep 15 04:05:04 2022 ] Top5: 79.97% +[ Thu Sep 15 04:05:04 2022 ] Training epoch: 126 +[ Thu Sep 15 04:05:26 2022 ] Batch(24/243) done. Loss: 0.0096 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 04:06:38 2022 ] Batch(124/243) done. Loss: 0.0086 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 04:07:51 2022 ] Batch(224/243) done. Loss: 0.0037 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 04:08:05 2022 ] Eval epoch: 126 +[ Thu Sep 15 04:09:38 2022 ] Mean test loss of 796 batches: 3.327735424041748. +[ Thu Sep 15 04:09:39 2022 ] Top1: 50.98% +[ Thu Sep 15 04:09:39 2022 ] Top5: 81.23% +[ Thu Sep 15 04:09:39 2022 ] Training epoch: 127 +[ Thu Sep 15 04:10:42 2022 ] Batch(81/243) done. Loss: 0.0064 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 04:11:55 2022 ] Batch(181/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 04:12:40 2022 ] Eval epoch: 127 +[ Thu Sep 15 04:14:13 2022 ] Mean test loss of 796 batches: 3.1817829608917236. +[ Thu Sep 15 04:14:14 2022 ] Top1: 50.90% +[ Thu Sep 15 04:14:14 2022 ] Top5: 81.36% +[ Thu Sep 15 04:14:14 2022 ] Training epoch: 128 +[ Thu Sep 15 04:14:46 2022 ] Batch(38/243) done. Loss: 0.0044 lr:0.000100 network_time: 0.0259 +[ Thu Sep 15 04:15:59 2022 ] Batch(138/243) done. Loss: 0.0055 lr:0.000100 network_time: 0.0424 +[ Thu Sep 15 04:17:12 2022 ] Batch(238/243) done. Loss: 0.0065 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 04:17:15 2022 ] Eval epoch: 128 +[ Thu Sep 15 04:18:49 2022 ] Mean test loss of 796 batches: 3.022956609725952. +[ Thu Sep 15 04:18:49 2022 ] Top1: 51.15% +[ Thu Sep 15 04:18:49 2022 ] Top5: 81.64% +[ Thu Sep 15 04:18:50 2022 ] Training epoch: 129 +[ Thu Sep 15 04:20:02 2022 ] Batch(95/243) done. Loss: 0.0065 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 04:21:15 2022 ] Batch(195/243) done. Loss: 0.0073 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 04:21:50 2022 ] Eval epoch: 129 +[ Thu Sep 15 04:23:23 2022 ] Mean test loss of 796 batches: 3.0225670337677. +[ Thu Sep 15 04:23:24 2022 ] Top1: 49.34% +[ Thu Sep 15 04:23:24 2022 ] Top5: 80.52% +[ Thu Sep 15 04:23:24 2022 ] Training epoch: 130 +[ Thu Sep 15 04:24:06 2022 ] Batch(52/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 04:25:19 2022 ] Batch(152/243) done. Loss: 0.0050 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 04:26:25 2022 ] Eval epoch: 130 +[ Thu Sep 15 04:27:59 2022 ] Mean test loss of 796 batches: 3.0677361488342285. +[ Thu Sep 15 04:27:59 2022 ] Top1: 51.32% +[ Thu Sep 15 04:28:00 2022 ] Top5: 81.61% +[ Thu Sep 15 04:28:00 2022 ] Training epoch: 131 +[ Thu Sep 15 04:28:10 2022 ] Batch(9/243) done. Loss: 0.0053 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 04:29:23 2022 ] Batch(109/243) done. Loss: 0.0057 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 04:30:36 2022 ] Batch(209/243) done. Loss: 0.0192 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 04:31:00 2022 ] Eval epoch: 131 +[ Thu Sep 15 04:32:34 2022 ] Mean test loss of 796 batches: 3.1217525005340576. +[ Thu Sep 15 04:32:34 2022 ] Top1: 50.86% +[ Thu Sep 15 04:32:35 2022 ] Top5: 81.28% +[ Thu Sep 15 04:32:35 2022 ] Training epoch: 132 +[ Thu Sep 15 04:33:26 2022 ] Batch(66/243) done. Loss: 0.0034 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:34:39 2022 ] Batch(166/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 04:35:35 2022 ] Eval epoch: 132 +[ Thu Sep 15 04:37:09 2022 ] Mean test loss of 796 batches: 2.9388933181762695. +[ Thu Sep 15 04:37:09 2022 ] Top1: 51.00% +[ Thu Sep 15 04:37:09 2022 ] Top5: 81.54% +[ Thu Sep 15 04:37:10 2022 ] Training epoch: 133 +[ Thu Sep 15 04:37:30 2022 ] Batch(23/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 04:38:43 2022 ] Batch(123/243) done. Loss: 0.0090 lr:0.000100 network_time: 0.0533 +[ Thu Sep 15 04:39:56 2022 ] Batch(223/243) done. Loss: 0.0128 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 04:40:10 2022 ] Eval epoch: 133 +[ Thu Sep 15 04:41:44 2022 ] Mean test loss of 796 batches: 3.247342109680176. +[ Thu Sep 15 04:41:44 2022 ] Top1: 46.12% +[ Thu Sep 15 04:41:45 2022 ] Top5: 78.11% +[ Thu Sep 15 04:41:45 2022 ] Training epoch: 134 +[ Thu Sep 15 04:42:46 2022 ] Batch(80/243) done. Loss: 0.0072 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 04:43:59 2022 ] Batch(180/243) done. Loss: 0.0043 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 04:44:45 2022 ] Eval epoch: 134 +[ Thu Sep 15 04:46:19 2022 ] Mean test loss of 796 batches: 3.0710413455963135. +[ Thu Sep 15 04:46:19 2022 ] Top1: 51.16% +[ Thu Sep 15 04:46:19 2022 ] Top5: 81.39% +[ Thu Sep 15 04:46:19 2022 ] Training epoch: 135 +[ Thu Sep 15 04:46:50 2022 ] Batch(37/243) done. Loss: 0.0047 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 04:48:03 2022 ] Batch(137/243) done. Loss: 0.0090 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 04:49:16 2022 ] Batch(237/243) done. Loss: 0.0177 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:49:20 2022 ] Eval epoch: 135 +[ Thu Sep 15 04:50:53 2022 ] Mean test loss of 796 batches: 3.1598618030548096. +[ Thu Sep 15 04:50:53 2022 ] Top1: 51.52% +[ Thu Sep 15 04:50:53 2022 ] Top5: 81.73% +[ Thu Sep 15 04:50:54 2022 ] Training epoch: 136 +[ Thu Sep 15 04:52:06 2022 ] Batch(94/243) done. Loss: 0.0199 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 04:53:19 2022 ] Batch(194/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0288 +[ Thu Sep 15 04:53:54 2022 ] Eval epoch: 136 +[ Thu Sep 15 04:55:27 2022 ] Mean test loss of 796 batches: 3.3396480083465576. +[ Thu Sep 15 04:55:27 2022 ] Top1: 51.16% +[ Thu Sep 15 04:55:28 2022 ] Top5: 81.43% +[ Thu Sep 15 04:55:28 2022 ] Training epoch: 137 +[ Thu Sep 15 04:56:09 2022 ] Batch(51/243) done. Loss: 0.0076 lr:0.000100 network_time: 0.0283 +[ Thu Sep 15 04:57:22 2022 ] Batch(151/243) done. Loss: 0.0110 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 04:58:28 2022 ] Eval epoch: 137 +[ Thu Sep 15 05:00:03 2022 ] Mean test loss of 796 batches: 3.2537786960601807. +[ Thu Sep 15 05:00:03 2022 ] Top1: 51.74% +[ Thu Sep 15 05:00:03 2022 ] Top5: 81.74% +[ Thu Sep 15 05:00:03 2022 ] Training epoch: 138 +[ Thu Sep 15 05:00:13 2022 ] Batch(8/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 05:01:26 2022 ] Batch(108/243) done. Loss: 0.0089 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 05:02:39 2022 ] Batch(208/243) done. Loss: 0.0198 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 05:03:04 2022 ] Eval epoch: 138 +[ Thu Sep 15 05:04:37 2022 ] Mean test loss of 796 batches: 3.079983711242676. +[ Thu Sep 15 05:04:37 2022 ] Top1: 50.78% +[ Thu Sep 15 05:04:38 2022 ] Top5: 81.26% +[ Thu Sep 15 05:04:38 2022 ] Training epoch: 139 +[ Thu Sep 15 05:05:29 2022 ] Batch(65/243) done. Loss: 0.0237 lr:0.000100 network_time: 0.0445 +[ Thu Sep 15 05:06:42 2022 ] Batch(165/243) done. Loss: 0.0038 lr:0.000100 network_time: 0.0308 +[ Thu Sep 15 05:07:38 2022 ] Eval epoch: 139 +[ Thu Sep 15 05:09:12 2022 ] Mean test loss of 796 batches: 3.1228296756744385. +[ Thu Sep 15 05:09:12 2022 ] Top1: 51.04% +[ Thu Sep 15 05:09:13 2022 ] Top5: 81.03% +[ Thu Sep 15 05:09:13 2022 ] Training epoch: 140 +[ Thu Sep 15 05:09:32 2022 ] Batch(22/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0322 +[ Thu Sep 15 05:10:45 2022 ] Batch(122/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0295 +[ Thu Sep 15 05:11:58 2022 ] Batch(222/243) done. Loss: 0.0060 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 05:12:13 2022 ] Eval epoch: 140 +[ Thu Sep 15 05:13:47 2022 ] Mean test loss of 796 batches: 3.206451892852783. +[ Thu Sep 15 05:13:47 2022 ] Top1: 51.22% +[ Thu Sep 15 05:13:48 2022 ] Top5: 81.53% diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_motion_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff7b496e983a16987de477db9b2baeb9b78ce904 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu120_joint_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/ntu120_xsub/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 120 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu120_joint_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu120_joint_xsub diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..783b5f9a4c44d56e30a99fe969bfaea0818757c7 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7032ac5d6594258fa0c42f8bd1e2317d14fdaa2bbd0c22e11038ce33d84f6f1d +size 29946137 diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4d3edeed91ad7c6b8e598ec71f51c212c12cfb0 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/log.txt @@ -0,0 +1,1043 @@ +[ Wed Sep 14 18:31:42 2022 ] Parameters: +{'work_dir': './work_dir/ntu120_joint_xsub', 'model_saved_name': './save_models/ntu120_joint_xsub', 'Experiment_name': 'ntu120_joint_xsub', 'config': './config/ntu120_xsub/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu120/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 120, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 18:31:42 2022 ] Training epoch: 1 +[ Wed Sep 14 18:33:02 2022 ] Batch(99/243) done. Loss: 3.7763 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 18:34:15 2022 ] Batch(199/243) done. Loss: 2.6228 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 18:34:46 2022 ] Eval epoch: 1 +[ Wed Sep 14 18:36:20 2022 ] Mean test loss of 796 batches: 5.046554088592529. +[ Wed Sep 14 18:36:20 2022 ] Top1: 9.24% +[ Wed Sep 14 18:36:20 2022 ] Top5: 25.14% +[ Wed Sep 14 18:36:21 2022 ] Training epoch: 2 +[ Wed Sep 14 18:37:05 2022 ] Batch(56/243) done. Loss: 2.6595 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 18:38:17 2022 ] Batch(156/243) done. Loss: 2.4710 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 18:39:20 2022 ] Eval epoch: 2 +[ Wed Sep 14 18:40:54 2022 ] Mean test loss of 796 batches: 3.9119958877563477. +[ Wed Sep 14 18:40:54 2022 ] Top1: 17.89% +[ Wed Sep 14 18:40:54 2022 ] Top5: 36.58% +[ Wed Sep 14 18:40:55 2022 ] Training epoch: 3 +[ Wed Sep 14 18:41:07 2022 ] Batch(13/243) done. Loss: 2.0612 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 18:42:20 2022 ] Batch(113/243) done. Loss: 1.8304 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 18:43:33 2022 ] Batch(213/243) done. Loss: 2.0863 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 18:43:54 2022 ] Eval epoch: 3 +[ Wed Sep 14 18:45:27 2022 ] Mean test loss of 796 batches: 4.149970531463623. +[ Wed Sep 14 18:45:28 2022 ] Top1: 18.78% +[ Wed Sep 14 18:45:28 2022 ] Top5: 42.45% +[ Wed Sep 14 18:45:28 2022 ] Training epoch: 4 +[ Wed Sep 14 18:46:23 2022 ] Batch(70/243) done. Loss: 1.4658 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 18:47:35 2022 ] Batch(170/243) done. Loss: 1.3747 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 18:48:28 2022 ] Eval epoch: 4 +[ Wed Sep 14 18:50:01 2022 ] Mean test loss of 796 batches: 3.2994141578674316. +[ Wed Sep 14 18:50:01 2022 ] Top1: 25.44% +[ Wed Sep 14 18:50:02 2022 ] Top5: 53.52% +[ Wed Sep 14 18:50:02 2022 ] Training epoch: 5 +[ Wed Sep 14 18:50:25 2022 ] Batch(27/243) done. Loss: 1.5292 lr:0.100000 network_time: 0.0295 +[ Wed Sep 14 18:51:38 2022 ] Batch(127/243) done. Loss: 1.5052 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 18:52:51 2022 ] Batch(227/243) done. Loss: 1.6211 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 18:53:02 2022 ] Eval epoch: 5 +[ Wed Sep 14 18:54:35 2022 ] Mean test loss of 796 batches: 3.1267082691192627. +[ Wed Sep 14 18:54:36 2022 ] Top1: 27.96% +[ Wed Sep 14 18:54:36 2022 ] Top5: 56.70% +[ Wed Sep 14 18:54:36 2022 ] Training epoch: 6 +[ Wed Sep 14 18:55:41 2022 ] Batch(84/243) done. Loss: 1.5147 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 18:56:53 2022 ] Batch(184/243) done. Loss: 1.0397 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 18:57:36 2022 ] Eval epoch: 6 +[ Wed Sep 14 18:59:09 2022 ] Mean test loss of 796 batches: 2.968212366104126. +[ Wed Sep 14 18:59:09 2022 ] Top1: 29.50% +[ Wed Sep 14 18:59:10 2022 ] Top5: 62.41% +[ Wed Sep 14 18:59:10 2022 ] Training epoch: 7 +[ Wed Sep 14 18:59:43 2022 ] Batch(41/243) done. Loss: 1.2200 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:00:56 2022 ] Batch(141/243) done. Loss: 0.8820 lr:0.100000 network_time: 0.0316 +[ Wed Sep 14 19:02:08 2022 ] Batch(241/243) done. Loss: 1.0603 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 19:02:09 2022 ] Eval epoch: 7 +[ Wed Sep 14 19:03:43 2022 ] Mean test loss of 796 batches: 2.7241861820220947. +[ Wed Sep 14 19:03:43 2022 ] Top1: 33.51% +[ Wed Sep 14 19:03:44 2022 ] Top5: 67.41% +[ Wed Sep 14 19:03:44 2022 ] Training epoch: 8 +[ Wed Sep 14 19:04:58 2022 ] Batch(98/243) done. Loss: 0.8214 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 19:06:11 2022 ] Batch(198/243) done. Loss: 0.8479 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:06:43 2022 ] Eval epoch: 8 +[ Wed Sep 14 19:08:17 2022 ] Mean test loss of 796 batches: 2.666290044784546. +[ Wed Sep 14 19:08:17 2022 ] Top1: 35.74% +[ Wed Sep 14 19:08:17 2022 ] Top5: 67.01% +[ Wed Sep 14 19:08:18 2022 ] Training epoch: 9 +[ Wed Sep 14 19:09:01 2022 ] Batch(55/243) done. Loss: 1.2169 lr:0.100000 network_time: 0.0288 +[ Wed Sep 14 19:10:14 2022 ] Batch(155/243) done. Loss: 1.0688 lr:0.100000 network_time: 0.0333 +[ Wed Sep 14 19:11:17 2022 ] Eval epoch: 9 +[ Wed Sep 14 19:12:51 2022 ] Mean test loss of 796 batches: 2.7376022338867188. +[ Wed Sep 14 19:12:51 2022 ] Top1: 35.14% +[ Wed Sep 14 19:12:52 2022 ] Top5: 66.76% +[ Wed Sep 14 19:12:52 2022 ] Training epoch: 10 +[ Wed Sep 14 19:13:04 2022 ] Batch(12/243) done. Loss: 0.9615 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:14:17 2022 ] Batch(112/243) done. Loss: 1.0427 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 19:15:30 2022 ] Batch(212/243) done. Loss: 1.2006 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 19:15:52 2022 ] Eval epoch: 10 +[ Wed Sep 14 19:17:26 2022 ] Mean test loss of 796 batches: 2.6587717533111572. +[ Wed Sep 14 19:17:26 2022 ] Top1: 36.70% +[ Wed Sep 14 19:17:26 2022 ] Top5: 70.12% +[ Wed Sep 14 19:17:27 2022 ] Training epoch: 11 +[ Wed Sep 14 19:18:20 2022 ] Batch(69/243) done. Loss: 0.8898 lr:0.100000 network_time: 0.0301 +[ Wed Sep 14 19:19:33 2022 ] Batch(169/243) done. Loss: 0.7702 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:20:26 2022 ] Eval epoch: 11 +[ Wed Sep 14 19:21:59 2022 ] Mean test loss of 796 batches: 2.6572091579437256. +[ Wed Sep 14 19:22:00 2022 ] Top1: 35.17% +[ Wed Sep 14 19:22:00 2022 ] Top5: 70.29% +[ Wed Sep 14 19:22:00 2022 ] Training epoch: 12 +[ Wed Sep 14 19:22:23 2022 ] Batch(26/243) done. Loss: 0.5439 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 19:23:36 2022 ] Batch(126/243) done. Loss: 0.9749 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 19:24:48 2022 ] Batch(226/243) done. Loss: 1.0488 lr:0.100000 network_time: 0.0316 +[ Wed Sep 14 19:25:00 2022 ] Eval epoch: 12 +[ Wed Sep 14 19:26:34 2022 ] Mean test loss of 796 batches: 2.824629783630371. +[ Wed Sep 14 19:26:34 2022 ] Top1: 35.29% +[ Wed Sep 14 19:26:35 2022 ] Top5: 67.09% +[ Wed Sep 14 19:26:35 2022 ] Training epoch: 13 +[ Wed Sep 14 19:27:39 2022 ] Batch(83/243) done. Loss: 0.7571 lr:0.100000 network_time: 0.0294 +[ Wed Sep 14 19:28:52 2022 ] Batch(183/243) done. Loss: 0.8261 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 19:29:35 2022 ] Eval epoch: 13 +[ Wed Sep 14 19:31:08 2022 ] Mean test loss of 796 batches: 2.3490281105041504. +[ Wed Sep 14 19:31:09 2022 ] Top1: 41.19% +[ Wed Sep 14 19:31:09 2022 ] Top5: 74.54% +[ Wed Sep 14 19:31:10 2022 ] Training epoch: 14 +[ Wed Sep 14 19:31:42 2022 ] Batch(40/243) done. Loss: 0.4956 lr:0.100000 network_time: 0.0235 +[ Wed Sep 14 19:32:55 2022 ] Batch(140/243) done. Loss: 0.9356 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:34:07 2022 ] Batch(240/243) done. Loss: 1.0499 lr:0.100000 network_time: 0.0434 +[ Wed Sep 14 19:34:09 2022 ] Eval epoch: 14 +[ Wed Sep 14 19:35:42 2022 ] Mean test loss of 796 batches: 2.4458417892456055. +[ Wed Sep 14 19:35:42 2022 ] Top1: 38.41% +[ Wed Sep 14 19:35:43 2022 ] Top5: 71.95% +[ Wed Sep 14 19:35:43 2022 ] Training epoch: 15 +[ Wed Sep 14 19:36:57 2022 ] Batch(97/243) done. Loss: 0.9353 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 19:38:10 2022 ] Batch(197/243) done. Loss: 0.5173 lr:0.100000 network_time: 0.0406 +[ Wed Sep 14 19:38:42 2022 ] Eval epoch: 15 +[ Wed Sep 14 19:40:16 2022 ] Mean test loss of 796 batches: 2.397752523422241. +[ Wed Sep 14 19:40:17 2022 ] Top1: 41.95% +[ Wed Sep 14 19:40:17 2022 ] Top5: 75.68% +[ Wed Sep 14 19:40:17 2022 ] Training epoch: 16 +[ Wed Sep 14 19:41:00 2022 ] Batch(54/243) done. Loss: 0.5062 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 19:42:13 2022 ] Batch(154/243) done. Loss: 0.6092 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 19:43:17 2022 ] Eval epoch: 16 +[ Wed Sep 14 19:44:50 2022 ] Mean test loss of 796 batches: 2.802043914794922. +[ Wed Sep 14 19:44:51 2022 ] Top1: 38.52% +[ Wed Sep 14 19:44:51 2022 ] Top5: 71.35% +[ Wed Sep 14 19:44:51 2022 ] Training epoch: 17 +[ Wed Sep 14 19:45:03 2022 ] Batch(11/243) done. Loss: 0.8011 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 19:46:15 2022 ] Batch(111/243) done. Loss: 0.6675 lr:0.100000 network_time: 0.0254 +[ Wed Sep 14 19:47:28 2022 ] Batch(211/243) done. Loss: 0.8174 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 19:47:51 2022 ] Eval epoch: 17 +[ Wed Sep 14 19:49:24 2022 ] Mean test loss of 796 batches: 2.7333076000213623. +[ Wed Sep 14 19:49:24 2022 ] Top1: 38.83% +[ Wed Sep 14 19:49:25 2022 ] Top5: 71.56% +[ Wed Sep 14 19:49:25 2022 ] Training epoch: 18 +[ Wed Sep 14 19:50:18 2022 ] Batch(68/243) done. Loss: 0.7092 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 19:51:31 2022 ] Batch(168/243) done. Loss: 0.6670 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 19:52:25 2022 ] Eval epoch: 18 +[ Wed Sep 14 19:53:58 2022 ] Mean test loss of 796 batches: 2.7909624576568604. +[ Wed Sep 14 19:53:59 2022 ] Top1: 39.14% +[ Wed Sep 14 19:54:00 2022 ] Top5: 72.63% +[ Wed Sep 14 19:54:00 2022 ] Training epoch: 19 +[ Wed Sep 14 19:54:21 2022 ] Batch(25/243) done. Loss: 0.6762 lr:0.100000 network_time: 0.0326 +[ Wed Sep 14 19:55:34 2022 ] Batch(125/243) done. Loss: 0.5623 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 19:56:47 2022 ] Batch(225/243) done. Loss: 0.7143 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 19:56:59 2022 ] Eval epoch: 19 +[ Wed Sep 14 19:58:33 2022 ] Mean test loss of 796 batches: 2.4540889263153076. +[ Wed Sep 14 19:58:33 2022 ] Top1: 42.82% +[ Wed Sep 14 19:58:34 2022 ] Top5: 75.10% +[ Wed Sep 14 19:58:34 2022 ] Training epoch: 20 +[ Wed Sep 14 19:59:37 2022 ] Batch(82/243) done. Loss: 0.5975 lr:0.100000 network_time: 0.0294 +[ Wed Sep 14 20:00:50 2022 ] Batch(182/243) done. Loss: 0.5475 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 20:01:33 2022 ] Eval epoch: 20 +[ Wed Sep 14 20:03:07 2022 ] Mean test loss of 796 batches: 2.881371259689331. +[ Wed Sep 14 20:03:07 2022 ] Top1: 41.01% +[ Wed Sep 14 20:03:07 2022 ] Top5: 71.62% +[ Wed Sep 14 20:03:08 2022 ] Training epoch: 21 +[ Wed Sep 14 20:03:40 2022 ] Batch(39/243) done. Loss: 0.3485 lr:0.100000 network_time: 0.0327 +[ Wed Sep 14 20:04:52 2022 ] Batch(139/243) done. Loss: 0.5871 lr:0.100000 network_time: 0.0248 +[ Wed Sep 14 20:06:05 2022 ] Batch(239/243) done. Loss: 0.4812 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:06:07 2022 ] Eval epoch: 21 +[ Wed Sep 14 20:07:41 2022 ] Mean test loss of 796 batches: 2.771573543548584. +[ Wed Sep 14 20:07:42 2022 ] Top1: 38.53% +[ Wed Sep 14 20:07:42 2022 ] Top5: 72.56% +[ Wed Sep 14 20:07:42 2022 ] Training epoch: 22 +[ Wed Sep 14 20:08:56 2022 ] Batch(96/243) done. Loss: 0.6043 lr:0.100000 network_time: 0.0308 +[ Wed Sep 14 20:10:08 2022 ] Batch(196/243) done. Loss: 0.6230 lr:0.100000 network_time: 0.0328 +[ Wed Sep 14 20:10:42 2022 ] Eval epoch: 22 +[ Wed Sep 14 20:12:16 2022 ] Mean test loss of 796 batches: 2.2919068336486816. +[ Wed Sep 14 20:12:16 2022 ] Top1: 46.96% +[ Wed Sep 14 20:12:17 2022 ] Top5: 78.88% +[ Wed Sep 14 20:12:17 2022 ] Training epoch: 23 +[ Wed Sep 14 20:12:59 2022 ] Batch(53/243) done. Loss: 0.3300 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 20:14:11 2022 ] Batch(153/243) done. Loss: 0.7194 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:15:16 2022 ] Eval epoch: 23 +[ Wed Sep 14 20:16:50 2022 ] Mean test loss of 796 batches: 2.9069929122924805. +[ Wed Sep 14 20:16:50 2022 ] Top1: 38.25% +[ Wed Sep 14 20:16:51 2022 ] Top5: 70.40% +[ Wed Sep 14 20:16:51 2022 ] Training epoch: 24 +[ Wed Sep 14 20:17:02 2022 ] Batch(10/243) done. Loss: 0.4030 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 20:18:14 2022 ] Batch(110/243) done. Loss: 0.5649 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 20:19:27 2022 ] Batch(210/243) done. Loss: 0.4009 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 20:19:50 2022 ] Eval epoch: 24 +[ Wed Sep 14 20:21:24 2022 ] Mean test loss of 796 batches: 2.52933931350708. +[ Wed Sep 14 20:21:24 2022 ] Top1: 43.88% +[ Wed Sep 14 20:21:25 2022 ] Top5: 75.25% +[ Wed Sep 14 20:21:25 2022 ] Training epoch: 25 +[ Wed Sep 14 20:22:17 2022 ] Batch(67/243) done. Loss: 0.4913 lr:0.100000 network_time: 0.0322 +[ Wed Sep 14 20:23:29 2022 ] Batch(167/243) done. Loss: 0.4311 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 20:24:24 2022 ] Eval epoch: 25 +[ Wed Sep 14 20:25:58 2022 ] Mean test loss of 796 batches: 2.5313878059387207. +[ Wed Sep 14 20:25:58 2022 ] Top1: 45.00% +[ Wed Sep 14 20:25:58 2022 ] Top5: 77.09% +[ Wed Sep 14 20:25:59 2022 ] Training epoch: 26 +[ Wed Sep 14 20:26:20 2022 ] Batch(24/243) done. Loss: 0.4224 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 20:27:32 2022 ] Batch(124/243) done. Loss: 0.3813 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 20:28:45 2022 ] Batch(224/243) done. Loss: 0.4928 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 20:28:58 2022 ] Eval epoch: 26 +[ Wed Sep 14 20:30:32 2022 ] Mean test loss of 796 batches: 2.687304973602295. +[ Wed Sep 14 20:30:32 2022 ] Top1: 43.42% +[ Wed Sep 14 20:30:33 2022 ] Top5: 75.80% +[ Wed Sep 14 20:30:33 2022 ] Training epoch: 27 +[ Wed Sep 14 20:31:35 2022 ] Batch(81/243) done. Loss: 0.3775 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 20:32:48 2022 ] Batch(181/243) done. Loss: 0.4857 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:33:33 2022 ] Eval epoch: 27 +[ Wed Sep 14 20:35:06 2022 ] Mean test loss of 796 batches: 2.4036240577697754. +[ Wed Sep 14 20:35:06 2022 ] Top1: 44.58% +[ Wed Sep 14 20:35:06 2022 ] Top5: 78.53% +[ Wed Sep 14 20:35:06 2022 ] Training epoch: 28 +[ Wed Sep 14 20:35:38 2022 ] Batch(38/243) done. Loss: 0.1812 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 20:36:50 2022 ] Batch(138/243) done. Loss: 0.4930 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 20:38:03 2022 ] Batch(238/243) done. Loss: 0.5984 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 20:38:06 2022 ] Eval epoch: 28 +[ Wed Sep 14 20:39:39 2022 ] Mean test loss of 796 batches: 2.756429433822632. +[ Wed Sep 14 20:39:40 2022 ] Top1: 41.86% +[ Wed Sep 14 20:39:40 2022 ] Top5: 74.64% +[ Wed Sep 14 20:39:40 2022 ] Training epoch: 29 +[ Wed Sep 14 20:40:53 2022 ] Batch(95/243) done. Loss: 0.4065 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 20:42:06 2022 ] Batch(195/243) done. Loss: 0.6086 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 20:42:40 2022 ] Eval epoch: 29 +[ Wed Sep 14 20:44:13 2022 ] Mean test loss of 796 batches: 2.7290701866149902. +[ Wed Sep 14 20:44:13 2022 ] Top1: 42.17% +[ Wed Sep 14 20:44:14 2022 ] Top5: 73.98% +[ Wed Sep 14 20:44:14 2022 ] Training epoch: 30 +[ Wed Sep 14 20:44:55 2022 ] Batch(52/243) done. Loss: 0.2625 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 20:46:08 2022 ] Batch(152/243) done. Loss: 0.4039 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 20:47:13 2022 ] Eval epoch: 30 +[ Wed Sep 14 20:48:46 2022 ] Mean test loss of 796 batches: 2.819765090942383. +[ Wed Sep 14 20:48:47 2022 ] Top1: 41.09% +[ Wed Sep 14 20:48:47 2022 ] Top5: 74.95% +[ Wed Sep 14 20:48:47 2022 ] Training epoch: 31 +[ Wed Sep 14 20:48:57 2022 ] Batch(9/243) done. Loss: 0.3648 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 20:50:10 2022 ] Batch(109/243) done. Loss: 0.2452 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 20:51:23 2022 ] Batch(209/243) done. Loss: 0.4705 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 20:51:47 2022 ] Eval epoch: 31 +[ Wed Sep 14 20:53:20 2022 ] Mean test loss of 796 batches: 2.6726372241973877. +[ Wed Sep 14 20:53:21 2022 ] Top1: 43.70% +[ Wed Sep 14 20:53:21 2022 ] Top5: 76.80% +[ Wed Sep 14 20:53:21 2022 ] Training epoch: 32 +[ Wed Sep 14 20:54:13 2022 ] Batch(66/243) done. Loss: 0.4521 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 20:55:25 2022 ] Batch(166/243) done. Loss: 0.7311 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 20:56:21 2022 ] Eval epoch: 32 +[ Wed Sep 14 20:57:55 2022 ] Mean test loss of 796 batches: 2.6101021766662598. +[ Wed Sep 14 20:57:55 2022 ] Top1: 44.74% +[ Wed Sep 14 20:57:55 2022 ] Top5: 75.74% +[ Wed Sep 14 20:57:56 2022 ] Training epoch: 33 +[ Wed Sep 14 20:58:16 2022 ] Batch(23/243) done. Loss: 0.4080 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 20:59:29 2022 ] Batch(123/243) done. Loss: 0.5928 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 21:00:41 2022 ] Batch(223/243) done. Loss: 0.5750 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 21:00:55 2022 ] Eval epoch: 33 +[ Wed Sep 14 21:02:29 2022 ] Mean test loss of 796 batches: 2.4076733589172363. +[ Wed Sep 14 21:02:29 2022 ] Top1: 46.05% +[ Wed Sep 14 21:02:30 2022 ] Top5: 77.02% +[ Wed Sep 14 21:02:30 2022 ] Training epoch: 34 +[ Wed Sep 14 21:03:32 2022 ] Batch(80/243) done. Loss: 0.3287 lr:0.100000 network_time: 0.0325 +[ Wed Sep 14 21:04:44 2022 ] Batch(180/243) done. Loss: 0.4008 lr:0.100000 network_time: 0.0434 +[ Wed Sep 14 21:05:30 2022 ] Eval epoch: 34 +[ Wed Sep 14 21:07:03 2022 ] Mean test loss of 796 batches: 2.933490037918091. +[ Wed Sep 14 21:07:03 2022 ] Top1: 42.85% +[ Wed Sep 14 21:07:04 2022 ] Top5: 74.90% +[ Wed Sep 14 21:07:04 2022 ] Training epoch: 35 +[ Wed Sep 14 21:07:34 2022 ] Batch(37/243) done. Loss: 0.1931 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 21:08:47 2022 ] Batch(137/243) done. Loss: 0.5033 lr:0.100000 network_time: 0.0258 +[ Wed Sep 14 21:10:00 2022 ] Batch(237/243) done. Loss: 0.7209 lr:0.100000 network_time: 0.0233 +[ Wed Sep 14 21:10:04 2022 ] Eval epoch: 35 +[ Wed Sep 14 21:11:37 2022 ] Mean test loss of 796 batches: 2.4481589794158936. +[ Wed Sep 14 21:11:37 2022 ] Top1: 46.19% +[ Wed Sep 14 21:11:37 2022 ] Top5: 77.78% +[ Wed Sep 14 21:11:38 2022 ] Training epoch: 36 +[ Wed Sep 14 21:12:49 2022 ] Batch(94/243) done. Loss: 0.3159 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 21:14:02 2022 ] Batch(194/243) done. Loss: 0.3204 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:14:37 2022 ] Eval epoch: 36 +[ Wed Sep 14 21:16:10 2022 ] Mean test loss of 796 batches: 2.854403495788574. +[ Wed Sep 14 21:16:11 2022 ] Top1: 43.88% +[ Wed Sep 14 21:16:11 2022 ] Top5: 76.34% +[ Wed Sep 14 21:16:11 2022 ] Training epoch: 37 +[ Wed Sep 14 21:16:52 2022 ] Batch(51/243) done. Loss: 0.3624 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 21:18:05 2022 ] Batch(151/243) done. Loss: 0.3338 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:19:11 2022 ] Eval epoch: 37 +[ Wed Sep 14 21:20:45 2022 ] Mean test loss of 796 batches: 2.4435677528381348. +[ Wed Sep 14 21:20:45 2022 ] Top1: 46.90% +[ Wed Sep 14 21:20:46 2022 ] Top5: 79.11% +[ Wed Sep 14 21:20:46 2022 ] Training epoch: 38 +[ Wed Sep 14 21:20:55 2022 ] Batch(8/243) done. Loss: 0.5167 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 21:22:08 2022 ] Batch(108/243) done. Loss: 0.4417 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 21:23:21 2022 ] Batch(208/243) done. Loss: 0.4753 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:23:46 2022 ] Eval epoch: 38 +[ Wed Sep 14 21:25:19 2022 ] Mean test loss of 796 batches: 2.551591396331787. +[ Wed Sep 14 21:25:20 2022 ] Top1: 46.89% +[ Wed Sep 14 21:25:20 2022 ] Top5: 77.84% +[ Wed Sep 14 21:25:20 2022 ] Training epoch: 39 +[ Wed Sep 14 21:26:11 2022 ] Batch(65/243) done. Loss: 0.3768 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 21:27:24 2022 ] Batch(165/243) done. Loss: 0.4046 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 21:28:20 2022 ] Eval epoch: 39 +[ Wed Sep 14 21:29:53 2022 ] Mean test loss of 796 batches: 2.5131995677948. +[ Wed Sep 14 21:29:54 2022 ] Top1: 45.77% +[ Wed Sep 14 21:29:54 2022 ] Top5: 78.91% +[ Wed Sep 14 21:29:54 2022 ] Training epoch: 40 +[ Wed Sep 14 21:30:14 2022 ] Batch(22/243) done. Loss: 0.3872 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 21:31:26 2022 ] Batch(122/243) done. Loss: 0.2603 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 21:32:39 2022 ] Batch(222/243) done. Loss: 0.2829 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 21:32:54 2022 ] Eval epoch: 40 +[ Wed Sep 14 21:34:27 2022 ] Mean test loss of 796 batches: 2.946911573410034. +[ Wed Sep 14 21:34:28 2022 ] Top1: 42.64% +[ Wed Sep 14 21:34:28 2022 ] Top5: 74.96% +[ Wed Sep 14 21:34:28 2022 ] Training epoch: 41 +[ Wed Sep 14 21:35:29 2022 ] Batch(79/243) done. Loss: 0.3274 lr:0.100000 network_time: 0.0298 +[ Wed Sep 14 21:36:42 2022 ] Batch(179/243) done. Loss: 0.2956 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 21:37:28 2022 ] Eval epoch: 41 +[ Wed Sep 14 21:39:02 2022 ] Mean test loss of 796 batches: 2.6605191230773926. +[ Wed Sep 14 21:39:02 2022 ] Top1: 43.38% +[ Wed Sep 14 21:39:03 2022 ] Top5: 75.65% +[ Wed Sep 14 21:39:03 2022 ] Training epoch: 42 +[ Wed Sep 14 21:39:33 2022 ] Batch(36/243) done. Loss: 0.2754 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 21:40:45 2022 ] Batch(136/243) done. Loss: 0.3378 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 21:41:58 2022 ] Batch(236/243) done. Loss: 0.3659 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 21:42:03 2022 ] Eval epoch: 42 +[ Wed Sep 14 21:43:36 2022 ] Mean test loss of 796 batches: 2.729982614517212. +[ Wed Sep 14 21:43:36 2022 ] Top1: 44.67% +[ Wed Sep 14 21:43:36 2022 ] Top5: 76.83% +[ Wed Sep 14 21:43:37 2022 ] Training epoch: 43 +[ Wed Sep 14 21:44:48 2022 ] Batch(93/243) done. Loss: 0.2193 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 21:46:00 2022 ] Batch(193/243) done. Loss: 0.4389 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 21:46:36 2022 ] Eval epoch: 43 +[ Wed Sep 14 21:48:10 2022 ] Mean test loss of 796 batches: 2.8778491020202637. +[ Wed Sep 14 21:48:10 2022 ] Top1: 41.51% +[ Wed Sep 14 21:48:11 2022 ] Top5: 73.55% +[ Wed Sep 14 21:48:11 2022 ] Training epoch: 44 +[ Wed Sep 14 21:48:51 2022 ] Batch(50/243) done. Loss: 0.3618 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 21:50:04 2022 ] Batch(150/243) done. Loss: 0.3614 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 21:51:11 2022 ] Eval epoch: 44 +[ Wed Sep 14 21:52:44 2022 ] Mean test loss of 796 batches: 2.6658835411071777. +[ Wed Sep 14 21:52:45 2022 ] Top1: 43.96% +[ Wed Sep 14 21:52:45 2022 ] Top5: 76.84% +[ Wed Sep 14 21:52:45 2022 ] Training epoch: 45 +[ Wed Sep 14 21:52:54 2022 ] Batch(7/243) done. Loss: 0.2149 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 21:54:07 2022 ] Batch(107/243) done. Loss: 0.4532 lr:0.100000 network_time: 0.0282 +[ Wed Sep 14 21:55:19 2022 ] Batch(207/243) done. Loss: 0.2903 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 21:55:45 2022 ] Eval epoch: 45 +[ Wed Sep 14 21:57:18 2022 ] Mean test loss of 796 batches: 2.703622579574585. +[ Wed Sep 14 21:57:19 2022 ] Top1: 46.68% +[ Wed Sep 14 21:57:19 2022 ] Top5: 78.64% +[ Wed Sep 14 21:57:19 2022 ] Training epoch: 46 +[ Wed Sep 14 21:58:09 2022 ] Batch(64/243) done. Loss: 0.1826 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 21:59:22 2022 ] Batch(164/243) done. Loss: 0.3657 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 22:00:19 2022 ] Eval epoch: 46 +[ Wed Sep 14 22:01:52 2022 ] Mean test loss of 796 batches: 3.0341153144836426. +[ Wed Sep 14 22:01:52 2022 ] Top1: 42.14% +[ Wed Sep 14 22:01:53 2022 ] Top5: 72.97% +[ Wed Sep 14 22:01:53 2022 ] Training epoch: 47 +[ Wed Sep 14 22:02:12 2022 ] Batch(21/243) done. Loss: 0.2316 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 22:03:25 2022 ] Batch(121/243) done. Loss: 0.3387 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 22:04:37 2022 ] Batch(221/243) done. Loss: 0.2902 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 22:04:53 2022 ] Eval epoch: 47 +[ Wed Sep 14 22:06:26 2022 ] Mean test loss of 796 batches: 2.495844602584839. +[ Wed Sep 14 22:06:26 2022 ] Top1: 46.27% +[ Wed Sep 14 22:06:27 2022 ] Top5: 78.13% +[ Wed Sep 14 22:06:27 2022 ] Training epoch: 48 +[ Wed Sep 14 22:07:27 2022 ] Batch(78/243) done. Loss: 0.2657 lr:0.100000 network_time: 0.0297 +[ Wed Sep 14 22:08:40 2022 ] Batch(178/243) done. Loss: 0.3470 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 22:09:27 2022 ] Eval epoch: 48 +[ Wed Sep 14 22:11:00 2022 ] Mean test loss of 796 batches: 2.7453110218048096. +[ Wed Sep 14 22:11:00 2022 ] Top1: 47.05% +[ Wed Sep 14 22:11:00 2022 ] Top5: 78.73% +[ Wed Sep 14 22:11:01 2022 ] Training epoch: 49 +[ Wed Sep 14 22:11:29 2022 ] Batch(35/243) done. Loss: 0.3071 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 22:12:42 2022 ] Batch(135/243) done. Loss: 0.3078 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 22:13:55 2022 ] Batch(235/243) done. Loss: 0.4095 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 22:14:00 2022 ] Eval epoch: 49 +[ Wed Sep 14 22:15:33 2022 ] Mean test loss of 796 batches: 2.788438081741333. +[ Wed Sep 14 22:15:34 2022 ] Top1: 45.89% +[ Wed Sep 14 22:15:34 2022 ] Top5: 76.79% +[ Wed Sep 14 22:15:34 2022 ] Training epoch: 50 +[ Wed Sep 14 22:16:45 2022 ] Batch(92/243) done. Loss: 0.3190 lr:0.100000 network_time: 0.0286 +[ Wed Sep 14 22:17:57 2022 ] Batch(192/243) done. Loss: 0.3876 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 22:18:34 2022 ] Eval epoch: 50 +[ Wed Sep 14 22:20:07 2022 ] Mean test loss of 796 batches: 2.6796646118164062. +[ Wed Sep 14 22:20:07 2022 ] Top1: 46.75% +[ Wed Sep 14 22:20:08 2022 ] Top5: 77.72% +[ Wed Sep 14 22:20:08 2022 ] Training epoch: 51 +[ Wed Sep 14 22:20:47 2022 ] Batch(49/243) done. Loss: 0.3189 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 22:22:00 2022 ] Batch(149/243) done. Loss: 0.3324 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 22:23:08 2022 ] Eval epoch: 51 +[ Wed Sep 14 22:24:41 2022 ] Mean test loss of 796 batches: 2.946209669113159. +[ Wed Sep 14 22:24:42 2022 ] Top1: 44.48% +[ Wed Sep 14 22:24:42 2022 ] Top5: 75.47% +[ Wed Sep 14 22:24:42 2022 ] Training epoch: 52 +[ Wed Sep 14 22:24:50 2022 ] Batch(6/243) done. Loss: 0.2887 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 22:26:03 2022 ] Batch(106/243) done. Loss: 0.1677 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 22:27:16 2022 ] Batch(206/243) done. Loss: 0.2791 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 22:27:42 2022 ] Eval epoch: 52 +[ Wed Sep 14 22:29:16 2022 ] Mean test loss of 796 batches: 2.9767680168151855. +[ Wed Sep 14 22:29:16 2022 ] Top1: 41.63% +[ Wed Sep 14 22:29:16 2022 ] Top5: 74.87% +[ Wed Sep 14 22:29:17 2022 ] Training epoch: 53 +[ Wed Sep 14 22:30:06 2022 ] Batch(63/243) done. Loss: 0.2952 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 22:31:19 2022 ] Batch(163/243) done. Loss: 0.4212 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:32:16 2022 ] Eval epoch: 53 +[ Wed Sep 14 22:33:50 2022 ] Mean test loss of 796 batches: 2.531792163848877. +[ Wed Sep 14 22:33:50 2022 ] Top1: 47.83% +[ Wed Sep 14 22:33:50 2022 ] Top5: 79.02% +[ Wed Sep 14 22:33:51 2022 ] Training epoch: 54 +[ Wed Sep 14 22:34:09 2022 ] Batch(20/243) done. Loss: 0.1304 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 22:35:21 2022 ] Batch(120/243) done. Loss: 0.2150 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:36:34 2022 ] Batch(220/243) done. Loss: 0.3651 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 22:36:50 2022 ] Eval epoch: 54 +[ Wed Sep 14 22:38:23 2022 ] Mean test loss of 796 batches: 2.5210258960723877. +[ Wed Sep 14 22:38:23 2022 ] Top1: 47.76% +[ Wed Sep 14 22:38:24 2022 ] Top5: 78.98% +[ Wed Sep 14 22:38:24 2022 ] Training epoch: 55 +[ Wed Sep 14 22:39:23 2022 ] Batch(77/243) done. Loss: 0.2751 lr:0.100000 network_time: 0.0326 +[ Wed Sep 14 22:40:36 2022 ] Batch(177/243) done. Loss: 0.3404 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 22:41:24 2022 ] Eval epoch: 55 +[ Wed Sep 14 22:42:57 2022 ] Mean test loss of 796 batches: 2.994967460632324. +[ Wed Sep 14 22:42:57 2022 ] Top1: 43.31% +[ Wed Sep 14 22:42:58 2022 ] Top5: 75.28% +[ Wed Sep 14 22:42:58 2022 ] Training epoch: 56 +[ Wed Sep 14 22:43:27 2022 ] Batch(34/243) done. Loss: 0.1377 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:44:39 2022 ] Batch(134/243) done. Loss: 0.3377 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:45:52 2022 ] Batch(234/243) done. Loss: 0.2269 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 22:45:58 2022 ] Eval epoch: 56 +[ Wed Sep 14 22:47:31 2022 ] Mean test loss of 796 batches: 2.9663078784942627. +[ Wed Sep 14 22:47:31 2022 ] Top1: 43.08% +[ Wed Sep 14 22:47:32 2022 ] Top5: 74.80% +[ Wed Sep 14 22:47:32 2022 ] Training epoch: 57 +[ Wed Sep 14 22:48:42 2022 ] Batch(91/243) done. Loss: 0.4191 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:49:55 2022 ] Batch(191/243) done. Loss: 0.2231 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 22:50:32 2022 ] Eval epoch: 57 +[ Wed Sep 14 22:52:05 2022 ] Mean test loss of 796 batches: 2.6926229000091553. +[ Wed Sep 14 22:52:05 2022 ] Top1: 47.01% +[ Wed Sep 14 22:52:06 2022 ] Top5: 77.91% +[ Wed Sep 14 22:52:06 2022 ] Training epoch: 58 +[ Wed Sep 14 22:52:44 2022 ] Batch(48/243) done. Loss: 0.3491 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 22:53:57 2022 ] Batch(148/243) done. Loss: 0.2899 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:55:06 2022 ] Eval epoch: 58 +[ Wed Sep 14 22:56:39 2022 ] Mean test loss of 796 batches: 2.6085290908813477. +[ Wed Sep 14 22:56:40 2022 ] Top1: 46.58% +[ Wed Sep 14 22:56:40 2022 ] Top5: 77.53% +[ Wed Sep 14 22:56:40 2022 ] Training epoch: 59 +[ Wed Sep 14 22:56:47 2022 ] Batch(5/243) done. Loss: 0.3443 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 22:58:00 2022 ] Batch(105/243) done. Loss: 0.2943 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 22:59:13 2022 ] Batch(205/243) done. Loss: 0.3257 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 22:59:40 2022 ] Eval epoch: 59 +[ Wed Sep 14 23:01:13 2022 ] Mean test loss of 796 batches: 2.7719128131866455. +[ Wed Sep 14 23:01:14 2022 ] Top1: 42.90% +[ Wed Sep 14 23:01:14 2022 ] Top5: 76.71% +[ Wed Sep 14 23:01:15 2022 ] Training epoch: 60 +[ Wed Sep 14 23:02:03 2022 ] Batch(62/243) done. Loss: 0.1431 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 23:03:16 2022 ] Batch(162/243) done. Loss: 0.2946 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 23:04:15 2022 ] Eval epoch: 60 +[ Wed Sep 14 23:05:48 2022 ] Mean test loss of 796 batches: 2.693972587585449. +[ Wed Sep 14 23:05:48 2022 ] Top1: 46.52% +[ Wed Sep 14 23:05:49 2022 ] Top5: 79.53% +[ Wed Sep 14 23:05:49 2022 ] Training epoch: 61 +[ Wed Sep 14 23:06:06 2022 ] Batch(19/243) done. Loss: 0.2575 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:07:19 2022 ] Batch(119/243) done. Loss: 0.1340 lr:0.010000 network_time: 0.0308 +[ Wed Sep 14 23:08:32 2022 ] Batch(219/243) done. Loss: 0.0472 lr:0.010000 network_time: 0.0275 +[ Wed Sep 14 23:08:48 2022 ] Eval epoch: 61 +[ Wed Sep 14 23:10:21 2022 ] Mean test loss of 796 batches: 2.258368492126465. +[ Wed Sep 14 23:10:22 2022 ] Top1: 52.89% +[ Wed Sep 14 23:10:22 2022 ] Top5: 82.95% +[ Wed Sep 14 23:10:23 2022 ] Training epoch: 62 +[ Wed Sep 14 23:11:21 2022 ] Batch(76/243) done. Loss: 0.0563 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 23:12:34 2022 ] Batch(176/243) done. Loss: 0.1186 lr:0.010000 network_time: 0.0261 +[ Wed Sep 14 23:13:22 2022 ] Eval epoch: 62 +[ Wed Sep 14 23:14:56 2022 ] Mean test loss of 796 batches: 2.279740810394287. +[ Wed Sep 14 23:14:56 2022 ] Top1: 53.11% +[ Wed Sep 14 23:14:56 2022 ] Top5: 82.97% +[ Wed Sep 14 23:14:57 2022 ] Training epoch: 63 +[ Wed Sep 14 23:15:24 2022 ] Batch(33/243) done. Loss: 0.1131 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:16:37 2022 ] Batch(133/243) done. Loss: 0.0521 lr:0.010000 network_time: 0.0284 +[ Wed Sep 14 23:17:50 2022 ] Batch(233/243) done. Loss: 0.0699 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:17:57 2022 ] Eval epoch: 63 +[ Wed Sep 14 23:19:30 2022 ] Mean test loss of 796 batches: 2.2916676998138428. +[ Wed Sep 14 23:19:30 2022 ] Top1: 53.36% +[ Wed Sep 14 23:19:31 2022 ] Top5: 83.03% +[ Wed Sep 14 23:19:31 2022 ] Training epoch: 64 +[ Wed Sep 14 23:20:40 2022 ] Batch(90/243) done. Loss: 0.0341 lr:0.010000 network_time: 0.0263 +[ Wed Sep 14 23:21:53 2022 ] Batch(190/243) done. Loss: 0.0185 lr:0.010000 network_time: 0.0279 +[ Wed Sep 14 23:22:31 2022 ] Eval epoch: 64 +[ Wed Sep 14 23:24:04 2022 ] Mean test loss of 796 batches: 2.2869088649749756. +[ Wed Sep 14 23:24:04 2022 ] Top1: 53.79% +[ Wed Sep 14 23:24:05 2022 ] Top5: 83.33% +[ Wed Sep 14 23:24:05 2022 ] Training epoch: 65 +[ Wed Sep 14 23:24:43 2022 ] Batch(47/243) done. Loss: 0.0168 lr:0.010000 network_time: 0.0316 +[ Wed Sep 14 23:25:56 2022 ] Batch(147/243) done. Loss: 0.1102 lr:0.010000 network_time: 0.0275 +[ Wed Sep 14 23:27:05 2022 ] Eval epoch: 65 +[ Wed Sep 14 23:28:38 2022 ] Mean test loss of 796 batches: 2.2452545166015625. +[ Wed Sep 14 23:28:38 2022 ] Top1: 54.37% +[ Wed Sep 14 23:28:39 2022 ] Top5: 83.70% +[ Wed Sep 14 23:28:39 2022 ] Training epoch: 66 +[ Wed Sep 14 23:28:45 2022 ] Batch(4/243) done. Loss: 0.0400 lr:0.010000 network_time: 0.0312 +[ Wed Sep 14 23:29:58 2022 ] Batch(104/243) done. Loss: 0.0174 lr:0.010000 network_time: 0.0328 +[ Wed Sep 14 23:31:11 2022 ] Batch(204/243) done. Loss: 0.0698 lr:0.010000 network_time: 0.0290 +[ Wed Sep 14 23:31:39 2022 ] Eval epoch: 66 +[ Wed Sep 14 23:33:11 2022 ] Mean test loss of 796 batches: 2.329601287841797. +[ Wed Sep 14 23:33:12 2022 ] Top1: 54.02% +[ Wed Sep 14 23:33:12 2022 ] Top5: 83.33% +[ Wed Sep 14 23:33:12 2022 ] Training epoch: 67 +[ Wed Sep 14 23:34:01 2022 ] Batch(61/243) done. Loss: 0.0428 lr:0.010000 network_time: 0.0375 +[ Wed Sep 14 23:35:13 2022 ] Batch(161/243) done. Loss: 0.0339 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 23:36:13 2022 ] Eval epoch: 67 +[ Wed Sep 14 23:37:46 2022 ] Mean test loss of 796 batches: 2.3496899604797363. +[ Wed Sep 14 23:37:46 2022 ] Top1: 53.81% +[ Wed Sep 14 23:37:46 2022 ] Top5: 83.23% +[ Wed Sep 14 23:37:47 2022 ] Training epoch: 68 +[ Wed Sep 14 23:38:04 2022 ] Batch(18/243) done. Loss: 0.0441 lr:0.010000 network_time: 0.0321 +[ Wed Sep 14 23:39:16 2022 ] Batch(118/243) done. Loss: 0.0339 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 23:40:29 2022 ] Batch(218/243) done. Loss: 0.0333 lr:0.010000 network_time: 0.0314 +[ Wed Sep 14 23:40:47 2022 ] Eval epoch: 68 +[ Wed Sep 14 23:42:20 2022 ] Mean test loss of 796 batches: 2.2746517658233643. +[ Wed Sep 14 23:42:20 2022 ] Top1: 54.56% +[ Wed Sep 14 23:42:21 2022 ] Top5: 83.64% +[ Wed Sep 14 23:42:21 2022 ] Training epoch: 69 +[ Wed Sep 14 23:43:19 2022 ] Batch(75/243) done. Loss: 0.0304 lr:0.010000 network_time: 0.0301 +[ Wed Sep 14 23:44:32 2022 ] Batch(175/243) done. Loss: 0.0474 lr:0.010000 network_time: 0.0283 +[ Wed Sep 14 23:45:21 2022 ] Eval epoch: 69 +[ Wed Sep 14 23:46:54 2022 ] Mean test loss of 796 batches: 2.3214352130889893. +[ Wed Sep 14 23:46:55 2022 ] Top1: 54.18% +[ Wed Sep 14 23:46:55 2022 ] Top5: 83.58% +[ Wed Sep 14 23:46:56 2022 ] Training epoch: 70 +[ Wed Sep 14 23:47:22 2022 ] Batch(32/243) done. Loss: 0.0169 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 23:48:35 2022 ] Batch(132/243) done. Loss: 0.0310 lr:0.010000 network_time: 0.0441 +[ Wed Sep 14 23:49:48 2022 ] Batch(232/243) done. Loss: 0.0086 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 23:49:55 2022 ] Eval epoch: 70 +[ Wed Sep 14 23:51:28 2022 ] Mean test loss of 796 batches: 2.321195125579834. +[ Wed Sep 14 23:51:29 2022 ] Top1: 54.32% +[ Wed Sep 14 23:51:29 2022 ] Top5: 83.51% +[ Wed Sep 14 23:51:29 2022 ] Training epoch: 71 +[ Wed Sep 14 23:52:38 2022 ] Batch(89/243) done. Loss: 0.0126 lr:0.010000 network_time: 0.0265 +[ Wed Sep 14 23:53:50 2022 ] Batch(189/243) done. Loss: 0.0313 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 23:54:29 2022 ] Eval epoch: 71 +[ Wed Sep 14 23:56:02 2022 ] Mean test loss of 796 batches: 2.401102304458618. +[ Wed Sep 14 23:56:02 2022 ] Top1: 53.23% +[ Wed Sep 14 23:56:03 2022 ] Top5: 82.83% +[ Wed Sep 14 23:56:03 2022 ] Training epoch: 72 +[ Wed Sep 14 23:56:40 2022 ] Batch(46/243) done. Loss: 0.0246 lr:0.010000 network_time: 0.0278 +[ Wed Sep 14 23:57:53 2022 ] Batch(146/243) done. Loss: 0.0394 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 23:59:03 2022 ] Eval epoch: 72 +[ Thu Sep 15 00:00:36 2022 ] Mean test loss of 796 batches: 2.392033576965332. +[ Thu Sep 15 00:00:37 2022 ] Top1: 54.03% +[ Thu Sep 15 00:00:37 2022 ] Top5: 83.16% +[ Thu Sep 15 00:00:37 2022 ] Training epoch: 73 +[ Thu Sep 15 00:00:43 2022 ] Batch(3/243) done. Loss: 0.0221 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 00:01:56 2022 ] Batch(103/243) done. Loss: 0.0159 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 00:03:08 2022 ] Batch(203/243) done. Loss: 0.0126 lr:0.010000 network_time: 0.0346 +[ Thu Sep 15 00:03:37 2022 ] Eval epoch: 73 +[ Thu Sep 15 00:05:10 2022 ] Mean test loss of 796 batches: 2.3562662601470947. +[ Thu Sep 15 00:05:10 2022 ] Top1: 54.12% +[ Thu Sep 15 00:05:11 2022 ] Top5: 83.33% +[ Thu Sep 15 00:05:11 2022 ] Training epoch: 74 +[ Thu Sep 15 00:05:58 2022 ] Batch(60/243) done. Loss: 0.0294 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 00:07:11 2022 ] Batch(160/243) done. Loss: 0.0119 lr:0.010000 network_time: 0.0279 +[ Thu Sep 15 00:08:11 2022 ] Eval epoch: 74 +[ Thu Sep 15 00:09:43 2022 ] Mean test loss of 796 batches: 2.4100046157836914. +[ Thu Sep 15 00:09:44 2022 ] Top1: 53.70% +[ Thu Sep 15 00:09:44 2022 ] Top5: 83.22% +[ Thu Sep 15 00:09:44 2022 ] Training epoch: 75 +[ Thu Sep 15 00:10:00 2022 ] Batch(17/243) done. Loss: 0.0336 lr:0.010000 network_time: 0.0269 +[ Thu Sep 15 00:11:13 2022 ] Batch(117/243) done. Loss: 0.0248 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 00:12:26 2022 ] Batch(217/243) done. Loss: 0.0063 lr:0.010000 network_time: 0.0320 +[ Thu Sep 15 00:12:44 2022 ] Eval epoch: 75 +[ Thu Sep 15 00:14:17 2022 ] Mean test loss of 796 batches: 2.353419065475464. +[ Thu Sep 15 00:14:17 2022 ] Top1: 54.40% +[ Thu Sep 15 00:14:18 2022 ] Top5: 83.38% +[ Thu Sep 15 00:14:18 2022 ] Training epoch: 76 +[ Thu Sep 15 00:15:15 2022 ] Batch(74/243) done. Loss: 0.0206 lr:0.010000 network_time: 0.0268 +[ Thu Sep 15 00:16:28 2022 ] Batch(174/243) done. Loss: 0.0125 lr:0.010000 network_time: 0.0327 +[ Thu Sep 15 00:17:18 2022 ] Eval epoch: 76 +[ Thu Sep 15 00:18:51 2022 ] Mean test loss of 796 batches: 2.3497185707092285. +[ Thu Sep 15 00:18:51 2022 ] Top1: 54.24% +[ Thu Sep 15 00:18:52 2022 ] Top5: 83.60% +[ Thu Sep 15 00:18:52 2022 ] Training epoch: 77 +[ Thu Sep 15 00:19:18 2022 ] Batch(31/243) done. Loss: 0.0244 lr:0.010000 network_time: 0.0286 +[ Thu Sep 15 00:20:31 2022 ] Batch(131/243) done. Loss: 0.0173 lr:0.010000 network_time: 0.0315 +[ Thu Sep 15 00:21:44 2022 ] Batch(231/243) done. Loss: 0.0246 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 00:21:52 2022 ] Eval epoch: 77 +[ Thu Sep 15 00:23:25 2022 ] Mean test loss of 796 batches: 2.3904168605804443. +[ Thu Sep 15 00:23:25 2022 ] Top1: 54.23% +[ Thu Sep 15 00:23:26 2022 ] Top5: 83.21% +[ Thu Sep 15 00:23:26 2022 ] Training epoch: 78 +[ Thu Sep 15 00:24:33 2022 ] Batch(88/243) done. Loss: 0.0248 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 00:25:46 2022 ] Batch(188/243) done. Loss: 0.0154 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 00:26:25 2022 ] Eval epoch: 78 +[ Thu Sep 15 00:27:58 2022 ] Mean test loss of 796 batches: 2.406836986541748. +[ Thu Sep 15 00:27:59 2022 ] Top1: 54.40% +[ Thu Sep 15 00:27:59 2022 ] Top5: 83.45% +[ Thu Sep 15 00:27:59 2022 ] Training epoch: 79 +[ Thu Sep 15 00:28:35 2022 ] Batch(45/243) done. Loss: 0.0187 lr:0.010000 network_time: 0.0270 +[ Thu Sep 15 00:29:48 2022 ] Batch(145/243) done. Loss: 0.0228 lr:0.010000 network_time: 0.0348 +[ Thu Sep 15 00:30:59 2022 ] Eval epoch: 79 +[ Thu Sep 15 00:32:32 2022 ] Mean test loss of 796 batches: 2.3902182579040527. +[ Thu Sep 15 00:32:32 2022 ] Top1: 54.54% +[ Thu Sep 15 00:32:33 2022 ] Top5: 83.39% +[ Thu Sep 15 00:32:33 2022 ] Training epoch: 80 +[ Thu Sep 15 00:32:38 2022 ] Batch(2/243) done. Loss: 0.0107 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 00:33:51 2022 ] Batch(102/243) done. Loss: 0.0209 lr:0.010000 network_time: 0.0309 +[ Thu Sep 15 00:35:04 2022 ] Batch(202/243) done. Loss: 0.0197 lr:0.010000 network_time: 0.0264 +[ Thu Sep 15 00:35:33 2022 ] Eval epoch: 80 +[ Thu Sep 15 00:37:05 2022 ] Mean test loss of 796 batches: 2.3967974185943604. +[ Thu Sep 15 00:37:06 2022 ] Top1: 54.35% +[ Thu Sep 15 00:37:06 2022 ] Top5: 83.47% +[ Thu Sep 15 00:37:06 2022 ] Training epoch: 81 +[ Thu Sep 15 00:37:53 2022 ] Batch(59/243) done. Loss: 0.0184 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 00:39:06 2022 ] Batch(159/243) done. Loss: 0.0099 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 00:40:06 2022 ] Eval epoch: 81 +[ Thu Sep 15 00:41:38 2022 ] Mean test loss of 796 batches: 2.3979151248931885. +[ Thu Sep 15 00:41:39 2022 ] Top1: 54.10% +[ Thu Sep 15 00:41:39 2022 ] Top5: 83.37% +[ Thu Sep 15 00:41:39 2022 ] Training epoch: 82 +[ Thu Sep 15 00:41:55 2022 ] Batch(16/243) done. Loss: 0.0092 lr:0.001000 network_time: 0.0291 +[ Thu Sep 15 00:43:08 2022 ] Batch(116/243) done. Loss: 0.0069 lr:0.001000 network_time: 0.0308 +[ Thu Sep 15 00:44:20 2022 ] Batch(216/243) done. Loss: 0.0327 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 00:44:40 2022 ] Eval epoch: 82 +[ Thu Sep 15 00:46:12 2022 ] Mean test loss of 796 batches: 2.402644395828247. +[ Thu Sep 15 00:46:12 2022 ] Top1: 54.29% +[ Thu Sep 15 00:46:13 2022 ] Top5: 83.50% +[ Thu Sep 15 00:46:13 2022 ] Training epoch: 83 +[ Thu Sep 15 00:47:09 2022 ] Batch(73/243) done. Loss: 0.0706 lr:0.001000 network_time: 0.0351 +[ Thu Sep 15 00:48:22 2022 ] Batch(173/243) done. Loss: 0.0146 lr:0.001000 network_time: 0.0319 +[ Thu Sep 15 00:49:13 2022 ] Eval epoch: 83 +[ Thu Sep 15 00:50:45 2022 ] Mean test loss of 796 batches: 2.4071590900421143. +[ Thu Sep 15 00:50:46 2022 ] Top1: 54.04% +[ Thu Sep 15 00:50:46 2022 ] Top5: 83.16% +[ Thu Sep 15 00:50:46 2022 ] Training epoch: 84 +[ Thu Sep 15 00:51:12 2022 ] Batch(30/243) done. Loss: 0.0100 lr:0.001000 network_time: 0.0293 +[ Thu Sep 15 00:52:24 2022 ] Batch(130/243) done. Loss: 0.0056 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 00:53:37 2022 ] Batch(230/243) done. Loss: 0.0132 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 00:53:46 2022 ] Eval epoch: 84 +[ Thu Sep 15 00:55:19 2022 ] Mean test loss of 796 batches: 2.37986159324646. +[ Thu Sep 15 00:55:19 2022 ] Top1: 54.50% +[ Thu Sep 15 00:55:20 2022 ] Top5: 83.56% +[ Thu Sep 15 00:55:20 2022 ] Training epoch: 85 +[ Thu Sep 15 00:56:27 2022 ] Batch(87/243) done. Loss: 0.0309 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 00:57:40 2022 ] Batch(187/243) done. Loss: 0.0386 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 00:58:20 2022 ] Eval epoch: 85 +[ Thu Sep 15 00:59:53 2022 ] Mean test loss of 796 batches: 2.370398759841919. +[ Thu Sep 15 00:59:53 2022 ] Top1: 54.49% +[ Thu Sep 15 00:59:54 2022 ] Top5: 83.59% +[ Thu Sep 15 00:59:54 2022 ] Training epoch: 86 +[ Thu Sep 15 01:00:30 2022 ] Batch(44/243) done. Loss: 0.0253 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 01:01:43 2022 ] Batch(144/243) done. Loss: 0.0103 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 01:02:54 2022 ] Eval epoch: 86 +[ Thu Sep 15 01:04:27 2022 ] Mean test loss of 796 batches: 2.41347599029541. +[ Thu Sep 15 01:04:27 2022 ] Top1: 54.08% +[ Thu Sep 15 01:04:28 2022 ] Top5: 83.20% +[ Thu Sep 15 01:04:28 2022 ] Training epoch: 87 +[ Thu Sep 15 01:04:32 2022 ] Batch(1/243) done. Loss: 0.0237 lr:0.001000 network_time: 0.0335 +[ Thu Sep 15 01:05:45 2022 ] Batch(101/243) done. Loss: 0.0139 lr:0.001000 network_time: 0.0310 +[ Thu Sep 15 01:06:58 2022 ] Batch(201/243) done. Loss: 0.0094 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 01:07:28 2022 ] Eval epoch: 87 +[ Thu Sep 15 01:09:01 2022 ] Mean test loss of 796 batches: 2.3874456882476807. +[ Thu Sep 15 01:09:01 2022 ] Top1: 54.32% +[ Thu Sep 15 01:09:01 2022 ] Top5: 83.40% +[ Thu Sep 15 01:09:02 2022 ] Training epoch: 88 +[ Thu Sep 15 01:09:47 2022 ] Batch(58/243) done. Loss: 0.0042 lr:0.001000 network_time: 0.0244 +[ Thu Sep 15 01:11:00 2022 ] Batch(158/243) done. Loss: 0.0253 lr:0.001000 network_time: 0.0332 +[ Thu Sep 15 01:12:01 2022 ] Eval epoch: 88 +[ Thu Sep 15 01:13:34 2022 ] Mean test loss of 796 batches: 2.40490460395813. +[ Thu Sep 15 01:13:35 2022 ] Top1: 54.15% +[ Thu Sep 15 01:13:35 2022 ] Top5: 83.23% +[ Thu Sep 15 01:13:35 2022 ] Training epoch: 89 +[ Thu Sep 15 01:13:50 2022 ] Batch(15/243) done. Loss: 0.0143 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 01:15:02 2022 ] Batch(115/243) done. Loss: 0.0147 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 01:16:15 2022 ] Batch(215/243) done. Loss: 0.0171 lr:0.001000 network_time: 0.0322 +[ Thu Sep 15 01:16:35 2022 ] Eval epoch: 89 +[ Thu Sep 15 01:18:08 2022 ] Mean test loss of 796 batches: 2.404038429260254. +[ Thu Sep 15 01:18:08 2022 ] Top1: 54.38% +[ Thu Sep 15 01:18:08 2022 ] Top5: 83.37% +[ Thu Sep 15 01:18:09 2022 ] Training epoch: 90 +[ Thu Sep 15 01:19:04 2022 ] Batch(72/243) done. Loss: 0.0041 lr:0.001000 network_time: 0.0265 +[ Thu Sep 15 01:20:17 2022 ] Batch(172/243) done. Loss: 0.0216 lr:0.001000 network_time: 0.0229 +[ Thu Sep 15 01:21:08 2022 ] Eval epoch: 90 +[ Thu Sep 15 01:22:42 2022 ] Mean test loss of 796 batches: 2.388312578201294. +[ Thu Sep 15 01:22:42 2022 ] Top1: 54.44% +[ Thu Sep 15 01:22:42 2022 ] Top5: 83.45% +[ Thu Sep 15 01:22:42 2022 ] Training epoch: 91 +[ Thu Sep 15 01:23:07 2022 ] Batch(29/243) done. Loss: 0.0074 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 01:24:20 2022 ] Batch(129/243) done. Loss: 0.0215 lr:0.001000 network_time: 0.0301 +[ Thu Sep 15 01:25:33 2022 ] Batch(229/243) done. Loss: 0.0123 lr:0.001000 network_time: 0.0303 +[ Thu Sep 15 01:25:42 2022 ] Eval epoch: 91 +[ Thu Sep 15 01:27:15 2022 ] Mean test loss of 796 batches: 2.387159824371338. +[ Thu Sep 15 01:27:15 2022 ] Top1: 54.49% +[ Thu Sep 15 01:27:16 2022 ] Top5: 83.50% +[ Thu Sep 15 01:27:16 2022 ] Training epoch: 92 +[ Thu Sep 15 01:28:22 2022 ] Batch(86/243) done. Loss: 0.0077 lr:0.001000 network_time: 0.0285 +[ Thu Sep 15 01:29:35 2022 ] Batch(186/243) done. Loss: 0.0153 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 01:30:16 2022 ] Eval epoch: 92 +[ Thu Sep 15 01:31:49 2022 ] Mean test loss of 796 batches: 2.400331735610962. +[ Thu Sep 15 01:31:49 2022 ] Top1: 54.54% +[ Thu Sep 15 01:31:49 2022 ] Top5: 83.64% +[ Thu Sep 15 01:31:50 2022 ] Training epoch: 93 +[ Thu Sep 15 01:32:24 2022 ] Batch(43/243) done. Loss: 0.0090 lr:0.001000 network_time: 0.0501 +[ Thu Sep 15 01:33:37 2022 ] Batch(143/243) done. Loss: 0.0139 lr:0.001000 network_time: 0.0314 +[ Thu Sep 15 01:34:49 2022 ] Eval epoch: 93 +[ Thu Sep 15 01:36:22 2022 ] Mean test loss of 796 batches: 2.417159080505371. +[ Thu Sep 15 01:36:22 2022 ] Top1: 54.29% +[ Thu Sep 15 01:36:23 2022 ] Top5: 83.38% +[ Thu Sep 15 01:36:23 2022 ] Training epoch: 94 +[ Thu Sep 15 01:36:27 2022 ] Batch(0/243) done. Loss: 0.0224 lr:0.001000 network_time: 0.0792 +[ Thu Sep 15 01:37:40 2022 ] Batch(100/243) done. Loss: 0.0111 lr:0.001000 network_time: 0.0296 +[ Thu Sep 15 01:38:52 2022 ] Batch(200/243) done. Loss: 0.0087 lr:0.001000 network_time: 0.0284 +[ Thu Sep 15 01:39:23 2022 ] Eval epoch: 94 +[ Thu Sep 15 01:40:56 2022 ] Mean test loss of 796 batches: 2.390749216079712. +[ Thu Sep 15 01:40:56 2022 ] Top1: 54.04% +[ Thu Sep 15 01:40:57 2022 ] Top5: 83.39% +[ Thu Sep 15 01:40:57 2022 ] Training epoch: 95 +[ Thu Sep 15 01:41:42 2022 ] Batch(57/243) done. Loss: 0.0137 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 01:42:55 2022 ] Batch(157/243) done. Loss: 0.0110 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:43:57 2022 ] Eval epoch: 95 +[ Thu Sep 15 01:45:29 2022 ] Mean test loss of 796 batches: 2.38088321685791. +[ Thu Sep 15 01:45:30 2022 ] Top1: 54.41% +[ Thu Sep 15 01:45:30 2022 ] Top5: 83.42% +[ Thu Sep 15 01:45:30 2022 ] Training epoch: 96 +[ Thu Sep 15 01:45:44 2022 ] Batch(14/243) done. Loss: 0.0109 lr:0.001000 network_time: 0.0302 +[ Thu Sep 15 01:46:57 2022 ] Batch(114/243) done. Loss: 0.0126 lr:0.001000 network_time: 0.0279 +[ Thu Sep 15 01:48:09 2022 ] Batch(214/243) done. Loss: 0.0165 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:48:30 2022 ] Eval epoch: 96 +[ Thu Sep 15 01:50:03 2022 ] Mean test loss of 796 batches: 2.4235730171203613. +[ Thu Sep 15 01:50:03 2022 ] Top1: 54.09% +[ Thu Sep 15 01:50:03 2022 ] Top5: 83.24% +[ Thu Sep 15 01:50:04 2022 ] Training epoch: 97 +[ Thu Sep 15 01:50:59 2022 ] Batch(71/243) done. Loss: 0.0114 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 01:52:12 2022 ] Batch(171/243) done. Loss: 0.0131 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 01:53:03 2022 ] Eval epoch: 97 +[ Thu Sep 15 01:54:36 2022 ] Mean test loss of 796 batches: 2.4135332107543945. +[ Thu Sep 15 01:54:37 2022 ] Top1: 54.24% +[ Thu Sep 15 01:54:37 2022 ] Top5: 83.38% +[ Thu Sep 15 01:54:37 2022 ] Training epoch: 98 +[ Thu Sep 15 01:55:01 2022 ] Batch(28/243) done. Loss: 0.0141 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 01:56:14 2022 ] Batch(128/243) done. Loss: 0.0163 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 01:57:27 2022 ] Batch(228/243) done. Loss: 0.0186 lr:0.001000 network_time: 0.0282 +[ Thu Sep 15 01:57:37 2022 ] Eval epoch: 98 +[ Thu Sep 15 01:59:10 2022 ] Mean test loss of 796 batches: 2.4122824668884277. +[ Thu Sep 15 01:59:10 2022 ] Top1: 54.46% +[ Thu Sep 15 01:59:10 2022 ] Top5: 83.42% +[ Thu Sep 15 01:59:11 2022 ] Training epoch: 99 +[ Thu Sep 15 02:00:16 2022 ] Batch(85/243) done. Loss: 0.0143 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 02:01:29 2022 ] Batch(185/243) done. Loss: 0.0190 lr:0.001000 network_time: 0.0283 +[ Thu Sep 15 02:02:11 2022 ] Eval epoch: 99 +[ Thu Sep 15 02:03:43 2022 ] Mean test loss of 796 batches: 2.417081117630005. +[ Thu Sep 15 02:03:44 2022 ] Top1: 54.22% +[ Thu Sep 15 02:03:44 2022 ] Top5: 83.36% +[ Thu Sep 15 02:03:44 2022 ] Training epoch: 100 +[ Thu Sep 15 02:04:18 2022 ] Batch(42/243) done. Loss: 0.0297 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 02:05:31 2022 ] Batch(142/243) done. Loss: 0.0040 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 02:06:44 2022 ] Batch(242/243) done. Loss: 0.0077 lr:0.001000 network_time: 0.0278 +[ Thu Sep 15 02:06:44 2022 ] Eval epoch: 100 +[ Thu Sep 15 02:08:16 2022 ] Mean test loss of 796 batches: 2.3927650451660156. +[ Thu Sep 15 02:08:17 2022 ] Top1: 54.51% +[ Thu Sep 15 02:08:17 2022 ] Top5: 83.63% +[ Thu Sep 15 02:08:17 2022 ] Training epoch: 101 +[ Thu Sep 15 02:09:33 2022 ] Batch(99/243) done. Loss: 0.0114 lr:0.000100 network_time: 0.0298 +[ Thu Sep 15 02:10:46 2022 ] Batch(199/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 02:11:17 2022 ] Eval epoch: 101 +[ Thu Sep 15 02:12:50 2022 ] Mean test loss of 796 batches: 2.3835721015930176. +[ Thu Sep 15 02:12:50 2022 ] Top1: 54.72% +[ Thu Sep 15 02:12:50 2022 ] Top5: 83.62% +[ Thu Sep 15 02:12:51 2022 ] Training epoch: 102 +[ Thu Sep 15 02:13:35 2022 ] Batch(56/243) done. Loss: 0.0081 lr:0.000100 network_time: 0.0329 +[ Thu Sep 15 02:14:48 2022 ] Batch(156/243) done. Loss: 0.0097 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 02:15:50 2022 ] Eval epoch: 102 +[ Thu Sep 15 02:17:23 2022 ] Mean test loss of 796 batches: 2.402935028076172. +[ Thu Sep 15 02:17:23 2022 ] Top1: 54.45% +[ Thu Sep 15 02:17:24 2022 ] Top5: 83.50% +[ Thu Sep 15 02:17:24 2022 ] Training epoch: 103 +[ Thu Sep 15 02:17:37 2022 ] Batch(13/243) done. Loss: 0.0062 lr:0.000100 network_time: 0.0318 +[ Thu Sep 15 02:18:50 2022 ] Batch(113/243) done. Loss: 0.0048 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 02:20:03 2022 ] Batch(213/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 02:20:24 2022 ] Eval epoch: 103 +[ Thu Sep 15 02:21:57 2022 ] Mean test loss of 796 batches: 2.4096858501434326. +[ Thu Sep 15 02:21:57 2022 ] Top1: 53.87% +[ Thu Sep 15 02:21:57 2022 ] Top5: 83.20% +[ Thu Sep 15 02:21:58 2022 ] Training epoch: 104 +[ Thu Sep 15 02:22:52 2022 ] Batch(70/243) done. Loss: 0.0091 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 02:24:05 2022 ] Batch(170/243) done. Loss: 0.0122 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 02:24:57 2022 ] Eval epoch: 104 +[ Thu Sep 15 02:26:30 2022 ] Mean test loss of 796 batches: 2.4239163398742676. +[ Thu Sep 15 02:26:30 2022 ] Top1: 54.40% +[ Thu Sep 15 02:26:31 2022 ] Top5: 83.51% +[ Thu Sep 15 02:26:31 2022 ] Training epoch: 105 +[ Thu Sep 15 02:26:54 2022 ] Batch(27/243) done. Loss: 0.0324 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 02:28:07 2022 ] Batch(127/243) done. Loss: 0.0078 lr:0.000100 network_time: 0.0313 +[ Thu Sep 15 02:29:20 2022 ] Batch(227/243) done. Loss: 0.0403 lr:0.000100 network_time: 0.0328 +[ Thu Sep 15 02:29:31 2022 ] Eval epoch: 105 +[ Thu Sep 15 02:31:03 2022 ] Mean test loss of 796 batches: 2.446220874786377. +[ Thu Sep 15 02:31:04 2022 ] Top1: 53.80% +[ Thu Sep 15 02:31:04 2022 ] Top5: 83.13% +[ Thu Sep 15 02:31:04 2022 ] Training epoch: 106 +[ Thu Sep 15 02:32:09 2022 ] Batch(84/243) done. Loss: 0.0101 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 02:33:22 2022 ] Batch(184/243) done. Loss: 0.0108 lr:0.000100 network_time: 0.0369 +[ Thu Sep 15 02:34:04 2022 ] Eval epoch: 106 +[ Thu Sep 15 02:35:37 2022 ] Mean test loss of 796 batches: 2.384615659713745. +[ Thu Sep 15 02:35:38 2022 ] Top1: 54.44% +[ Thu Sep 15 02:35:38 2022 ] Top5: 83.58% +[ Thu Sep 15 02:35:38 2022 ] Training epoch: 107 +[ Thu Sep 15 02:36:12 2022 ] Batch(41/243) done. Loss: 0.0059 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 02:37:24 2022 ] Batch(141/243) done. Loss: 0.0183 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 02:38:37 2022 ] Batch(241/243) done. Loss: 0.0087 lr:0.000100 network_time: 0.0357 +[ Thu Sep 15 02:38:38 2022 ] Eval epoch: 107 +[ Thu Sep 15 02:40:11 2022 ] Mean test loss of 796 batches: 2.3720390796661377. +[ Thu Sep 15 02:40:12 2022 ] Top1: 54.51% +[ Thu Sep 15 02:40:12 2022 ] Top5: 83.78% +[ Thu Sep 15 02:40:12 2022 ] Training epoch: 108 +[ Thu Sep 15 02:41:27 2022 ] Batch(98/243) done. Loss: 0.0098 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 02:42:40 2022 ] Batch(198/243) done. Loss: 0.0157 lr:0.000100 network_time: 0.0335 +[ Thu Sep 15 02:43:12 2022 ] Eval epoch: 108 +[ Thu Sep 15 02:44:45 2022 ] Mean test loss of 796 batches: 2.4088966846466064. +[ Thu Sep 15 02:44:45 2022 ] Top1: 54.23% +[ Thu Sep 15 02:44:46 2022 ] Top5: 83.49% +[ Thu Sep 15 02:44:46 2022 ] Training epoch: 109 +[ Thu Sep 15 02:45:29 2022 ] Batch(55/243) done. Loss: 0.0081 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 02:46:42 2022 ] Batch(155/243) done. Loss: 0.0096 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 02:47:45 2022 ] Eval epoch: 109 +[ Thu Sep 15 02:49:18 2022 ] Mean test loss of 796 batches: 2.4085729122161865. +[ Thu Sep 15 02:49:19 2022 ] Top1: 54.15% +[ Thu Sep 15 02:49:19 2022 ] Top5: 83.47% +[ Thu Sep 15 02:49:19 2022 ] Training epoch: 110 +[ Thu Sep 15 02:49:32 2022 ] Batch(12/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 02:50:44 2022 ] Batch(112/243) done. Loss: 0.0133 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 02:51:57 2022 ] Batch(212/243) done. Loss: 0.0086 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 02:52:19 2022 ] Eval epoch: 110 +[ Thu Sep 15 02:53:52 2022 ] Mean test loss of 796 batches: 2.4391942024230957. +[ Thu Sep 15 02:53:52 2022 ] Top1: 54.12% +[ Thu Sep 15 02:53:53 2022 ] Top5: 83.40% +[ Thu Sep 15 02:53:53 2022 ] Training epoch: 111 +[ Thu Sep 15 02:54:47 2022 ] Batch(69/243) done. Loss: 0.0309 lr:0.000100 network_time: 0.0285 +[ Thu Sep 15 02:55:59 2022 ] Batch(169/243) done. Loss: 0.0130 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 02:56:53 2022 ] Eval epoch: 111 +[ Thu Sep 15 02:58:25 2022 ] Mean test loss of 796 batches: 2.412464141845703. +[ Thu Sep 15 02:58:25 2022 ] Top1: 54.33% +[ Thu Sep 15 02:58:26 2022 ] Top5: 83.24% +[ Thu Sep 15 02:58:26 2022 ] Training epoch: 112 +[ Thu Sep 15 02:58:49 2022 ] Batch(26/243) done. Loss: 0.0087 lr:0.000100 network_time: 0.0325 +[ Thu Sep 15 03:00:01 2022 ] Batch(126/243) done. Loss: 0.0126 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 03:01:14 2022 ] Batch(226/243) done. Loss: 0.0311 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 03:01:26 2022 ] Eval epoch: 112 +[ Thu Sep 15 03:02:59 2022 ] Mean test loss of 796 batches: 2.444413900375366. +[ Thu Sep 15 03:02:59 2022 ] Top1: 53.70% +[ Thu Sep 15 03:02:59 2022 ] Top5: 82.84% +[ Thu Sep 15 03:03:00 2022 ] Training epoch: 113 +[ Thu Sep 15 03:04:04 2022 ] Batch(83/243) done. Loss: 0.0062 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 03:05:16 2022 ] Batch(183/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0313 +[ Thu Sep 15 03:06:00 2022 ] Eval epoch: 113 +[ Thu Sep 15 03:07:32 2022 ] Mean test loss of 796 batches: 2.4518001079559326. +[ Thu Sep 15 03:07:32 2022 ] Top1: 54.17% +[ Thu Sep 15 03:07:33 2022 ] Top5: 83.25% +[ Thu Sep 15 03:07:33 2022 ] Training epoch: 114 +[ Thu Sep 15 03:08:05 2022 ] Batch(40/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0309 +[ Thu Sep 15 03:09:18 2022 ] Batch(140/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0328 +[ Thu Sep 15 03:10:31 2022 ] Batch(240/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:10:33 2022 ] Eval epoch: 114 +[ Thu Sep 15 03:12:04 2022 ] Mean test loss of 796 batches: 2.400949478149414. +[ Thu Sep 15 03:12:05 2022 ] Top1: 54.15% +[ Thu Sep 15 03:12:05 2022 ] Top5: 83.42% +[ Thu Sep 15 03:12:05 2022 ] Training epoch: 115 +[ Thu Sep 15 03:13:19 2022 ] Batch(97/243) done. Loss: 0.0092 lr:0.000100 network_time: 0.0287 +[ Thu Sep 15 03:14:32 2022 ] Batch(197/243) done. Loss: 0.0152 lr:0.000100 network_time: 0.0226 +[ Thu Sep 15 03:15:05 2022 ] Eval epoch: 115 +[ Thu Sep 15 03:16:37 2022 ] Mean test loss of 796 batches: 2.374425172805786. +[ Thu Sep 15 03:16:37 2022 ] Top1: 54.36% +[ Thu Sep 15 03:16:37 2022 ] Top5: 83.42% +[ Thu Sep 15 03:16:38 2022 ] Training epoch: 116 +[ Thu Sep 15 03:17:20 2022 ] Batch(54/243) done. Loss: 0.0063 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 03:18:33 2022 ] Batch(154/243) done. Loss: 0.0058 lr:0.000100 network_time: 0.0323 +[ Thu Sep 15 03:19:37 2022 ] Eval epoch: 116 +[ Thu Sep 15 03:21:10 2022 ] Mean test loss of 796 batches: 2.3895974159240723. +[ Thu Sep 15 03:21:11 2022 ] Top1: 54.53% +[ Thu Sep 15 03:21:11 2022 ] Top5: 83.50% +[ Thu Sep 15 03:21:11 2022 ] Training epoch: 117 +[ Thu Sep 15 03:21:23 2022 ] Batch(11/243) done. Loss: 0.0143 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 03:22:35 2022 ] Batch(111/243) done. Loss: 0.0089 lr:0.000100 network_time: 0.0298 +[ Thu Sep 15 03:23:48 2022 ] Batch(211/243) done. Loss: 0.0094 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 03:24:11 2022 ] Eval epoch: 117 +[ Thu Sep 15 03:25:43 2022 ] Mean test loss of 796 batches: 2.401299476623535. +[ Thu Sep 15 03:25:44 2022 ] Top1: 54.57% +[ Thu Sep 15 03:25:44 2022 ] Top5: 83.51% +[ Thu Sep 15 03:25:44 2022 ] Training epoch: 118 +[ Thu Sep 15 03:26:37 2022 ] Batch(68/243) done. Loss: 0.0067 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 03:27:50 2022 ] Batch(168/243) done. Loss: 0.0088 lr:0.000100 network_time: 0.0352 +[ Thu Sep 15 03:28:44 2022 ] Eval epoch: 118 +[ Thu Sep 15 03:30:17 2022 ] Mean test loss of 796 batches: 2.439582586288452. +[ Thu Sep 15 03:30:17 2022 ] Top1: 53.99% +[ Thu Sep 15 03:30:17 2022 ] Top5: 83.18% +[ Thu Sep 15 03:30:18 2022 ] Training epoch: 119 +[ Thu Sep 15 03:30:39 2022 ] Batch(25/243) done. Loss: 0.0191 lr:0.000100 network_time: 0.0256 +[ Thu Sep 15 03:31:52 2022 ] Batch(125/243) done. Loss: 0.0259 lr:0.000100 network_time: 0.0326 +[ Thu Sep 15 03:33:05 2022 ] Batch(225/243) done. Loss: 0.0171 lr:0.000100 network_time: 0.0325 +[ Thu Sep 15 03:33:17 2022 ] Eval epoch: 119 +[ Thu Sep 15 03:34:50 2022 ] Mean test loss of 796 batches: 2.4420394897460938. +[ Thu Sep 15 03:34:51 2022 ] Top1: 53.83% +[ Thu Sep 15 03:34:51 2022 ] Top5: 83.19% +[ Thu Sep 15 03:34:51 2022 ] Training epoch: 120 +[ Thu Sep 15 03:35:54 2022 ] Batch(82/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 03:37:07 2022 ] Batch(182/243) done. Loss: 0.0215 lr:0.000100 network_time: 0.0474 +[ Thu Sep 15 03:37:51 2022 ] Eval epoch: 120 +[ Thu Sep 15 03:39:23 2022 ] Mean test loss of 796 batches: 2.4470932483673096. +[ Thu Sep 15 03:39:24 2022 ] Top1: 54.29% +[ Thu Sep 15 03:39:24 2022 ] Top5: 83.36% +[ Thu Sep 15 03:39:24 2022 ] Training epoch: 121 +[ Thu Sep 15 03:39:56 2022 ] Batch(39/243) done. Loss: 0.0121 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 03:41:09 2022 ] Batch(139/243) done. Loss: 0.0200 lr:0.000100 network_time: 0.0289 +[ Thu Sep 15 03:42:22 2022 ] Batch(239/243) done. Loss: 0.0115 lr:0.000100 network_time: 0.0284 +[ Thu Sep 15 03:42:24 2022 ] Eval epoch: 121 +[ Thu Sep 15 03:43:57 2022 ] Mean test loss of 796 batches: 2.3847100734710693. +[ Thu Sep 15 03:43:57 2022 ] Top1: 54.56% +[ Thu Sep 15 03:43:57 2022 ] Top5: 83.73% +[ Thu Sep 15 03:43:58 2022 ] Training epoch: 122 +[ Thu Sep 15 03:45:11 2022 ] Batch(96/243) done. Loss: 0.0116 lr:0.000100 network_time: 0.0288 +[ Thu Sep 15 03:46:24 2022 ] Batch(196/243) done. Loss: 0.0115 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 03:46:57 2022 ] Eval epoch: 122 +[ Thu Sep 15 03:48:30 2022 ] Mean test loss of 796 batches: 2.420030355453491. +[ Thu Sep 15 03:48:31 2022 ] Top1: 54.13% +[ Thu Sep 15 03:48:31 2022 ] Top5: 83.32% +[ Thu Sep 15 03:48:31 2022 ] Training epoch: 123 +[ Thu Sep 15 03:49:13 2022 ] Batch(53/243) done. Loss: 0.0080 lr:0.000100 network_time: 0.0324 +[ Thu Sep 15 03:50:26 2022 ] Batch(153/243) done. Loss: 0.0132 lr:0.000100 network_time: 0.0288 +[ Thu Sep 15 03:51:31 2022 ] Eval epoch: 123 +[ Thu Sep 15 03:53:03 2022 ] Mean test loss of 796 batches: 2.4071152210235596. +[ Thu Sep 15 03:53:04 2022 ] Top1: 54.40% +[ Thu Sep 15 03:53:04 2022 ] Top5: 83.43% +[ Thu Sep 15 03:53:04 2022 ] Training epoch: 124 +[ Thu Sep 15 03:53:15 2022 ] Batch(10/243) done. Loss: 0.0096 lr:0.000100 network_time: 0.0536 +[ Thu Sep 15 03:54:28 2022 ] Batch(110/243) done. Loss: 0.0061 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 03:55:41 2022 ] Batch(210/243) done. Loss: 0.0115 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 03:56:05 2022 ] Eval epoch: 124 +[ Thu Sep 15 03:57:38 2022 ] Mean test loss of 796 batches: 2.3790695667266846. +[ Thu Sep 15 03:57:38 2022 ] Top1: 54.58% +[ Thu Sep 15 03:57:38 2022 ] Top5: 83.58% +[ Thu Sep 15 03:57:38 2022 ] Training epoch: 125 +[ Thu Sep 15 03:58:31 2022 ] Batch(67/243) done. Loss: 0.0104 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 03:59:44 2022 ] Batch(167/243) done. Loss: 0.0475 lr:0.000100 network_time: 0.0301 +[ Thu Sep 15 04:00:38 2022 ] Eval epoch: 125 +[ Thu Sep 15 04:02:11 2022 ] Mean test loss of 796 batches: 2.4057438373565674. +[ Thu Sep 15 04:02:11 2022 ] Top1: 54.09% +[ Thu Sep 15 04:02:11 2022 ] Top5: 83.32% +[ Thu Sep 15 04:02:12 2022 ] Training epoch: 126 +[ Thu Sep 15 04:02:33 2022 ] Batch(24/243) done. Loss: 0.0141 lr:0.000100 network_time: 0.0558 +[ Thu Sep 15 04:03:45 2022 ] Batch(124/243) done. Loss: 0.0090 lr:0.000100 network_time: 0.0287 +[ Thu Sep 15 04:04:58 2022 ] Batch(224/243) done. Loss: 0.0155 lr:0.000100 network_time: 0.0302 +[ Thu Sep 15 04:05:11 2022 ] Eval epoch: 126 +[ Thu Sep 15 04:06:43 2022 ] Mean test loss of 796 batches: 2.4487736225128174. +[ Thu Sep 15 04:06:44 2022 ] Top1: 54.18% +[ Thu Sep 15 04:06:44 2022 ] Top5: 83.42% +[ Thu Sep 15 04:06:44 2022 ] Training epoch: 127 +[ Thu Sep 15 04:07:47 2022 ] Batch(81/243) done. Loss: 0.0202 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:09:00 2022 ] Batch(181/243) done. Loss: 0.0151 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 04:09:44 2022 ] Eval epoch: 127 +[ Thu Sep 15 04:11:16 2022 ] Mean test loss of 796 batches: 2.4265387058258057. +[ Thu Sep 15 04:11:16 2022 ] Top1: 54.10% +[ Thu Sep 15 04:11:17 2022 ] Top5: 83.26% +[ Thu Sep 15 04:11:17 2022 ] Training epoch: 128 +[ Thu Sep 15 04:11:49 2022 ] Batch(38/243) done. Loss: 0.0314 lr:0.000100 network_time: 0.0346 +[ Thu Sep 15 04:13:01 2022 ] Batch(138/243) done. Loss: 0.0061 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 04:14:14 2022 ] Batch(238/243) done. Loss: 0.0216 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 04:14:17 2022 ] Eval epoch: 128 +[ Thu Sep 15 04:15:50 2022 ] Mean test loss of 796 batches: 2.398974895477295. +[ Thu Sep 15 04:15:50 2022 ] Top1: 54.33% +[ Thu Sep 15 04:15:51 2022 ] Top5: 83.47% +[ Thu Sep 15 04:15:51 2022 ] Training epoch: 129 +[ Thu Sep 15 04:17:04 2022 ] Batch(95/243) done. Loss: 0.0208 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 04:18:17 2022 ] Batch(195/243) done. Loss: 0.0156 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 04:18:51 2022 ] Eval epoch: 129 +[ Thu Sep 15 04:20:23 2022 ] Mean test loss of 796 batches: 2.408602714538574. +[ Thu Sep 15 04:20:24 2022 ] Top1: 54.14% +[ Thu Sep 15 04:20:24 2022 ] Top5: 83.28% +[ Thu Sep 15 04:20:24 2022 ] Training epoch: 130 +[ Thu Sep 15 04:21:06 2022 ] Batch(52/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 04:22:18 2022 ] Batch(152/243) done. Loss: 0.0065 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 04:23:24 2022 ] Eval epoch: 130 +[ Thu Sep 15 04:24:57 2022 ] Mean test loss of 796 batches: 2.363591194152832. +[ Thu Sep 15 04:24:58 2022 ] Top1: 54.93% +[ Thu Sep 15 04:24:58 2022 ] Top5: 83.73% +[ Thu Sep 15 04:24:58 2022 ] Training epoch: 131 +[ Thu Sep 15 04:25:08 2022 ] Batch(9/243) done. Loss: 0.0161 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 04:26:21 2022 ] Batch(109/243) done. Loss: 0.0075 lr:0.000100 network_time: 0.0283 +[ Thu Sep 15 04:27:34 2022 ] Batch(209/243) done. Loss: 0.0127 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 04:27:58 2022 ] Eval epoch: 131 +[ Thu Sep 15 04:29:31 2022 ] Mean test loss of 796 batches: 2.4064884185791016. +[ Thu Sep 15 04:29:31 2022 ] Top1: 54.13% +[ Thu Sep 15 04:29:32 2022 ] Top5: 83.39% +[ Thu Sep 15 04:29:32 2022 ] Training epoch: 132 +[ Thu Sep 15 04:30:23 2022 ] Batch(66/243) done. Loss: 0.0080 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 04:31:36 2022 ] Batch(166/243) done. Loss: 0.0110 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 04:32:32 2022 ] Eval epoch: 132 +[ Thu Sep 15 04:34:05 2022 ] Mean test loss of 796 batches: 2.361076593399048. +[ Thu Sep 15 04:34:06 2022 ] Top1: 54.92% +[ Thu Sep 15 04:34:06 2022 ] Top5: 83.69% +[ Thu Sep 15 04:34:06 2022 ] Training epoch: 133 +[ Thu Sep 15 04:34:27 2022 ] Batch(23/243) done. Loss: 0.0093 lr:0.000100 network_time: 0.0285 +[ Thu Sep 15 04:35:40 2022 ] Batch(123/243) done. Loss: 0.0074 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 04:36:52 2022 ] Batch(223/243) done. Loss: 0.0105 lr:0.000100 network_time: 0.0310 +[ Thu Sep 15 04:37:06 2022 ] Eval epoch: 133 +[ Thu Sep 15 04:38:40 2022 ] Mean test loss of 796 batches: 2.408871650695801. +[ Thu Sep 15 04:38:40 2022 ] Top1: 54.18% +[ Thu Sep 15 04:38:40 2022 ] Top5: 83.23% +[ Thu Sep 15 04:38:41 2022 ] Training epoch: 134 +[ Thu Sep 15 04:39:42 2022 ] Batch(80/243) done. Loss: 0.0139 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 04:40:55 2022 ] Batch(180/243) done. Loss: 0.0066 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 04:41:40 2022 ] Eval epoch: 134 +[ Thu Sep 15 04:43:13 2022 ] Mean test loss of 796 batches: 2.3809244632720947. +[ Thu Sep 15 04:43:13 2022 ] Top1: 54.55% +[ Thu Sep 15 04:43:13 2022 ] Top5: 83.58% +[ Thu Sep 15 04:43:13 2022 ] Training epoch: 135 +[ Thu Sep 15 04:43:44 2022 ] Batch(37/243) done. Loss: 0.0068 lr:0.000100 network_time: 0.0288 +[ Thu Sep 15 04:44:57 2022 ] Batch(137/243) done. Loss: 0.0115 lr:0.000100 network_time: 0.0331 +[ Thu Sep 15 04:46:10 2022 ] Batch(237/243) done. Loss: 0.0098 lr:0.000100 network_time: 0.0309 +[ Thu Sep 15 04:46:14 2022 ] Eval epoch: 135 +[ Thu Sep 15 04:47:46 2022 ] Mean test loss of 796 batches: 2.4107375144958496. +[ Thu Sep 15 04:47:46 2022 ] Top1: 54.19% +[ Thu Sep 15 04:47:47 2022 ] Top5: 83.28% +[ Thu Sep 15 04:47:47 2022 ] Training epoch: 136 +[ Thu Sep 15 04:48:58 2022 ] Batch(94/243) done. Loss: 0.0069 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 04:50:11 2022 ] Batch(194/243) done. Loss: 0.0253 lr:0.000100 network_time: 0.0259 +[ Thu Sep 15 04:50:46 2022 ] Eval epoch: 136 +[ Thu Sep 15 04:52:19 2022 ] Mean test loss of 796 batches: 2.412553310394287. +[ Thu Sep 15 04:52:19 2022 ] Top1: 54.30% +[ Thu Sep 15 04:52:19 2022 ] Top5: 83.34% +[ Thu Sep 15 04:52:20 2022 ] Training epoch: 137 +[ Thu Sep 15 04:53:00 2022 ] Batch(51/243) done. Loss: 0.0092 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 04:54:13 2022 ] Batch(151/243) done. Loss: 0.0118 lr:0.000100 network_time: 0.0291 +[ Thu Sep 15 04:55:19 2022 ] Eval epoch: 137 +[ Thu Sep 15 04:56:52 2022 ] Mean test loss of 796 batches: 2.393608331680298. +[ Thu Sep 15 04:56:52 2022 ] Top1: 54.69% +[ Thu Sep 15 04:56:53 2022 ] Top5: 83.61% +[ Thu Sep 15 04:56:53 2022 ] Training epoch: 138 +[ Thu Sep 15 04:57:02 2022 ] Batch(8/243) done. Loss: 0.0195 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 04:58:15 2022 ] Batch(108/243) done. Loss: 0.0269 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 04:59:28 2022 ] Batch(208/243) done. Loss: 0.0116 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 04:59:53 2022 ] Eval epoch: 138 +[ Thu Sep 15 05:01:26 2022 ] Mean test loss of 796 batches: 2.412797212600708. +[ Thu Sep 15 05:01:26 2022 ] Top1: 53.92% +[ Thu Sep 15 05:01:26 2022 ] Top5: 83.29% +[ Thu Sep 15 05:01:27 2022 ] Training epoch: 139 +[ Thu Sep 15 05:02:18 2022 ] Batch(65/243) done. Loss: 0.0478 lr:0.000100 network_time: 0.0294 +[ Thu Sep 15 05:03:30 2022 ] Batch(165/243) done. Loss: 0.0116 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 05:04:27 2022 ] Eval epoch: 139 +[ Thu Sep 15 05:05:59 2022 ] Mean test loss of 796 batches: 2.435910701751709. +[ Thu Sep 15 05:06:00 2022 ] Top1: 54.09% +[ Thu Sep 15 05:06:00 2022 ] Top5: 83.17% +[ Thu Sep 15 05:06:00 2022 ] Training epoch: 140 +[ Thu Sep 15 05:06:20 2022 ] Batch(22/243) done. Loss: 0.0075 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 05:07:33 2022 ] Batch(122/243) done. Loss: 0.0352 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 05:08:45 2022 ] Batch(222/243) done. Loss: 0.0075 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 05:09:00 2022 ] Eval epoch: 140 +[ Thu Sep 15 05:10:33 2022 ] Mean test loss of 796 batches: 2.3889408111572266. +[ Thu Sep 15 05:10:33 2022 ] Top1: 54.65% +[ Thu Sep 15 05:10:34 2022 ] Top5: 83.62% diff --git a/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu120_xsub/ntu120_joint_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26e53ad958dc161be0478c218ced883e9a8cfdde --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/config.yaml @@ -0,0 +1,58 @@ +Experiment_name: ntu_ShiftGCN_bone_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_bone_motion.yaml +device: +- 4 +- 5 +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_bone_motion_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_bone_motion_xsub diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..9881ce7b96a140cc84035f99b093bdfc9f810f83 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8d96c42be1b3bb086a94b5fc3ff17cda1a170098b51a4909889e7109b7b5f8c +size 4979902 diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d138097e67e2a2590db9271f93499886ad7285a --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 17:47:56 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_bone_motion_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_bone_motion_xsub', 'Experiment_name': 'ntu_ShiftGCN_bone_motion_xsub', 'config': './config/nturgbd-cross-subject/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5, 6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 17:47:56 2022 ] Training epoch: 1 +[ Thu Sep 15 17:48:44 2022 ] Batch(99/123) done. Loss: 2.3221 lr:0.100000 network_time: 0.0504 +[ Thu Sep 15 17:48:52 2022 ] Eval epoch: 1 +[ Thu Sep 15 17:49:15 2022 ] Mean test loss of 258 batches: 5.581438064575195. +[ Thu Sep 15 17:49:15 2022 ] Top1: 11.46% +[ Thu Sep 15 17:49:15 2022 ] Top5: 35.39% +[ Thu Sep 15 17:49:15 2022 ] Training epoch: 2 +[ Thu Sep 15 17:49:47 2022 ] Batch(76/123) done. Loss: 1.9718 lr:0.100000 network_time: 0.0507 +[ Thu Sep 15 17:50:05 2022 ] Eval epoch: 2 +[ Thu Sep 15 17:50:27 2022 ] Mean test loss of 258 batches: 4.6655802726745605. +[ Thu Sep 15 17:50:28 2022 ] Top1: 22.79% +[ Thu Sep 15 17:50:28 2022 ] Top5: 53.54% +[ Thu Sep 15 17:50:28 2022 ] Training epoch: 3 +[ Thu Sep 15 17:50:51 2022 ] Batch(53/123) done. Loss: 1.8279 lr:0.100000 network_time: 0.0490 +[ Thu Sep 15 17:51:17 2022 ] Eval epoch: 3 +[ Thu Sep 15 17:51:40 2022 ] Mean test loss of 258 batches: 4.853720188140869. +[ Thu Sep 15 17:51:40 2022 ] Top1: 23.39% +[ Thu Sep 15 17:51:40 2022 ] Top5: 53.48% +[ Thu Sep 15 17:51:41 2022 ] Training epoch: 4 +[ Thu Sep 15 17:51:56 2022 ] Batch(30/123) done. Loss: 1.7001 lr:0.100000 network_time: 0.0515 +[ Thu Sep 15 17:52:30 2022 ] Eval epoch: 4 +[ Thu Sep 15 17:52:53 2022 ] Mean test loss of 258 batches: 4.209709644317627. +[ Thu Sep 15 17:52:53 2022 ] Top1: 28.01% +[ Thu Sep 15 17:52:53 2022 ] Top5: 59.43% +[ Thu Sep 15 17:52:53 2022 ] Training epoch: 5 +[ Thu Sep 15 17:53:00 2022 ] Batch(7/123) done. Loss: 1.2025 lr:0.100000 network_time: 0.0503 +[ Thu Sep 15 17:53:37 2022 ] Batch(107/123) done. Loss: 1.0203 lr:0.100000 network_time: 0.0518 +[ Thu Sep 15 17:53:43 2022 ] Eval epoch: 5 +[ Thu Sep 15 17:54:05 2022 ] Mean test loss of 258 batches: 4.861428260803223. +[ Thu Sep 15 17:54:05 2022 ] Top1: 19.81% +[ Thu Sep 15 17:54:05 2022 ] Top5: 47.30% +[ Thu Sep 15 17:54:05 2022 ] Training epoch: 6 +[ Thu Sep 15 17:54:40 2022 ] Batch(84/123) done. Loss: 0.9977 lr:0.100000 network_time: 0.0506 +[ Thu Sep 15 17:54:55 2022 ] Eval epoch: 6 +[ Thu Sep 15 17:55:17 2022 ] Mean test loss of 258 batches: 3.8953726291656494. +[ Thu Sep 15 17:55:17 2022 ] Top1: 30.00% +[ Thu Sep 15 17:55:17 2022 ] Top5: 62.61% +[ Thu Sep 15 17:55:18 2022 ] Training epoch: 7 +[ Thu Sep 15 17:55:44 2022 ] Batch(61/123) done. Loss: 0.9246 lr:0.100000 network_time: 0.0473 +[ Thu Sep 15 17:56:06 2022 ] Eval epoch: 7 +[ Thu Sep 15 17:56:29 2022 ] Mean test loss of 258 batches: 4.290779113769531. +[ Thu Sep 15 17:56:29 2022 ] Top1: 23.05% +[ Thu Sep 15 17:56:29 2022 ] Top5: 54.76% +[ Thu Sep 15 17:56:29 2022 ] Training epoch: 8 +[ Thu Sep 15 17:56:48 2022 ] Batch(38/123) done. Loss: 0.7861 lr:0.100000 network_time: 0.0654 +[ Thu Sep 15 17:57:19 2022 ] Eval epoch: 8 +[ Thu Sep 15 17:57:41 2022 ] Mean test loss of 258 batches: 4.356398582458496. +[ Thu Sep 15 17:57:42 2022 ] Top1: 30.18% +[ Thu Sep 15 17:57:42 2022 ] Top5: 60.22% +[ Thu Sep 15 17:57:42 2022 ] Training epoch: 9 +[ Thu Sep 15 17:57:52 2022 ] Batch(15/123) done. Loss: 0.7096 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 17:58:28 2022 ] Batch(115/123) done. Loss: 0.8358 lr:0.100000 network_time: 0.0508 +[ Thu Sep 15 17:58:31 2022 ] Eval epoch: 9 +[ Thu Sep 15 17:58:54 2022 ] Mean test loss of 258 batches: 3.7923290729522705. +[ Thu Sep 15 17:58:55 2022 ] Top1: 36.03% +[ Thu Sep 15 17:58:55 2022 ] Top5: 67.22% +[ Thu Sep 15 17:58:55 2022 ] Training epoch: 10 +[ Thu Sep 15 17:59:33 2022 ] Batch(92/123) done. Loss: 0.6539 lr:0.100000 network_time: 0.0534 +[ Thu Sep 15 17:59:44 2022 ] Eval epoch: 10 +[ Thu Sep 15 18:00:07 2022 ] Mean test loss of 258 batches: 2.477128744125366. +[ Thu Sep 15 18:00:07 2022 ] Top1: 43.65% +[ Thu Sep 15 18:00:07 2022 ] Top5: 77.15% +[ Thu Sep 15 18:00:07 2022 ] Training epoch: 11 +[ Thu Sep 15 18:00:37 2022 ] Batch(69/123) done. Loss: 0.6608 lr:0.100000 network_time: 0.0509 +[ Thu Sep 15 18:00:56 2022 ] Eval epoch: 11 +[ Thu Sep 15 18:01:19 2022 ] Mean test loss of 258 batches: 2.566040515899658. +[ Thu Sep 15 18:01:19 2022 ] Top1: 45.59% +[ Thu Sep 15 18:01:19 2022 ] Top5: 80.48% +[ Thu Sep 15 18:01:19 2022 ] Training epoch: 12 +[ Thu Sep 15 18:01:41 2022 ] Batch(46/123) done. Loss: 0.8870 lr:0.100000 network_time: 0.0499 +[ Thu Sep 15 18:02:09 2022 ] Eval epoch: 12 +[ Thu Sep 15 18:02:32 2022 ] Mean test loss of 258 batches: 2.7983829975128174. +[ Thu Sep 15 18:02:32 2022 ] Top1: 46.18% +[ Thu Sep 15 18:02:32 2022 ] Top5: 81.09% +[ Thu Sep 15 18:02:32 2022 ] Training epoch: 13 +[ Thu Sep 15 18:02:45 2022 ] Batch(23/123) done. Loss: 0.5512 lr:0.100000 network_time: 0.0504 +[ Thu Sep 15 18:03:21 2022 ] Eval epoch: 13 +[ Thu Sep 15 18:03:44 2022 ] Mean test loss of 258 batches: 3.2849953174591064. +[ Thu Sep 15 18:03:44 2022 ] Top1: 40.53% +[ Thu Sep 15 18:03:44 2022 ] Top5: 75.00% +[ Thu Sep 15 18:03:44 2022 ] Training epoch: 14 +[ Thu Sep 15 18:03:48 2022 ] Batch(0/123) done. Loss: 0.5177 lr:0.100000 network_time: 0.0880 +[ Thu Sep 15 18:04:25 2022 ] Batch(100/123) done. Loss: 0.6363 lr:0.100000 network_time: 0.0537 +[ Thu Sep 15 18:04:33 2022 ] Eval epoch: 14 +[ Thu Sep 15 18:04:56 2022 ] Mean test loss of 258 batches: 2.9385926723480225. +[ Thu Sep 15 18:04:56 2022 ] Top1: 43.08% +[ Thu Sep 15 18:04:56 2022 ] Top5: 77.56% +[ Thu Sep 15 18:04:56 2022 ] Training epoch: 15 +[ Thu Sep 15 18:05:29 2022 ] Batch(77/123) done. Loss: 0.6975 lr:0.100000 network_time: 0.0499 +[ Thu Sep 15 18:05:46 2022 ] Eval epoch: 15 +[ Thu Sep 15 18:06:09 2022 ] Mean test loss of 258 batches: 3.6805732250213623. +[ Thu Sep 15 18:06:09 2022 ] Top1: 37.67% +[ Thu Sep 15 18:06:09 2022 ] Top5: 71.69% +[ Thu Sep 15 18:06:09 2022 ] Training epoch: 16 +[ Thu Sep 15 18:06:33 2022 ] Batch(54/123) done. Loss: 0.5238 lr:0.100000 network_time: 0.0507 +[ Thu Sep 15 18:06:58 2022 ] Eval epoch: 16 +[ Thu Sep 15 18:07:21 2022 ] Mean test loss of 258 batches: 3.097450017929077. +[ Thu Sep 15 18:07:21 2022 ] Top1: 37.42% +[ Thu Sep 15 18:07:21 2022 ] Top5: 76.38% +[ Thu Sep 15 18:07:21 2022 ] Training epoch: 17 +[ Thu Sep 15 18:07:37 2022 ] Batch(31/123) done. Loss: 0.6358 lr:0.100000 network_time: 0.0481 +[ Thu Sep 15 18:08:11 2022 ] Eval epoch: 17 +[ Thu Sep 15 18:08:33 2022 ] Mean test loss of 258 batches: 3.424067497253418. +[ Thu Sep 15 18:08:33 2022 ] Top1: 42.69% +[ Thu Sep 15 18:08:34 2022 ] Top5: 77.68% +[ Thu Sep 15 18:08:34 2022 ] Training epoch: 18 +[ Thu Sep 15 18:08:41 2022 ] Batch(8/123) done. Loss: 0.3318 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 18:09:18 2022 ] Batch(108/123) done. Loss: 0.4070 lr:0.100000 network_time: 0.0516 +[ Thu Sep 15 18:09:23 2022 ] Eval epoch: 18 +[ Thu Sep 15 18:09:46 2022 ] Mean test loss of 258 batches: 2.758646011352539. +[ Thu Sep 15 18:09:46 2022 ] Top1: 45.64% +[ Thu Sep 15 18:09:46 2022 ] Top5: 77.97% +[ Thu Sep 15 18:09:46 2022 ] Training epoch: 19 +[ Thu Sep 15 18:10:22 2022 ] Batch(85/123) done. Loss: 0.5093 lr:0.100000 network_time: 0.0535 +[ Thu Sep 15 18:10:36 2022 ] Eval epoch: 19 +[ Thu Sep 15 18:10:59 2022 ] Mean test loss of 258 batches: 2.7793309688568115. +[ Thu Sep 15 18:10:59 2022 ] Top1: 46.62% +[ Thu Sep 15 18:10:59 2022 ] Top5: 79.77% +[ Thu Sep 15 18:10:59 2022 ] Training epoch: 20 +[ Thu Sep 15 18:11:26 2022 ] Batch(62/123) done. Loss: 0.3249 lr:0.100000 network_time: 0.0485 +[ Thu Sep 15 18:11:48 2022 ] Eval epoch: 20 +[ Thu Sep 15 18:12:11 2022 ] Mean test loss of 258 batches: 3.609997034072876. +[ Thu Sep 15 18:12:11 2022 ] Top1: 39.23% +[ Thu Sep 15 18:12:11 2022 ] Top5: 69.32% +[ Thu Sep 15 18:12:11 2022 ] Training epoch: 21 +[ Thu Sep 15 18:12:30 2022 ] Batch(39/123) done. Loss: 0.4764 lr:0.100000 network_time: 0.0517 +[ Thu Sep 15 18:13:01 2022 ] Eval epoch: 21 +[ Thu Sep 15 18:13:24 2022 ] Mean test loss of 258 batches: 2.8876986503601074. +[ Thu Sep 15 18:13:24 2022 ] Top1: 46.73% +[ Thu Sep 15 18:13:24 2022 ] Top5: 80.28% +[ Thu Sep 15 18:13:24 2022 ] Training epoch: 22 +[ Thu Sep 15 18:13:34 2022 ] Batch(16/123) done. Loss: 0.3911 lr:0.100000 network_time: 0.0489 +[ Thu Sep 15 18:14:11 2022 ] Batch(116/123) done. Loss: 0.3523 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 18:14:13 2022 ] Eval epoch: 22 +[ Thu Sep 15 18:14:36 2022 ] Mean test loss of 258 batches: 3.206068992614746. +[ Thu Sep 15 18:14:36 2022 ] Top1: 40.12% +[ Thu Sep 15 18:14:36 2022 ] Top5: 75.93% +[ Thu Sep 15 18:14:36 2022 ] Training epoch: 23 +[ Thu Sep 15 18:15:15 2022 ] Batch(93/123) done. Loss: 0.6690 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:15:26 2022 ] Eval epoch: 23 +[ Thu Sep 15 18:15:49 2022 ] Mean test loss of 258 batches: 2.487531900405884. +[ Thu Sep 15 18:15:49 2022 ] Top1: 50.01% +[ Thu Sep 15 18:15:49 2022 ] Top5: 81.78% +[ Thu Sep 15 18:15:49 2022 ] Training epoch: 24 +[ Thu Sep 15 18:16:19 2022 ] Batch(70/123) done. Loss: 0.3341 lr:0.100000 network_time: 0.0536 +[ Thu Sep 15 18:16:38 2022 ] Eval epoch: 24 +[ Thu Sep 15 18:17:00 2022 ] Mean test loss of 258 batches: 3.2020022869110107. +[ Thu Sep 15 18:17:00 2022 ] Top1: 46.64% +[ Thu Sep 15 18:17:00 2022 ] Top5: 81.62% +[ Thu Sep 15 18:17:00 2022 ] Training epoch: 25 +[ Thu Sep 15 18:17:22 2022 ] Batch(47/123) done. Loss: 0.2572 lr:0.100000 network_time: 0.0522 +[ Thu Sep 15 18:17:50 2022 ] Eval epoch: 25 +[ Thu Sep 15 18:18:13 2022 ] Mean test loss of 258 batches: 2.5256783962249756. +[ Thu Sep 15 18:18:13 2022 ] Top1: 53.64% +[ Thu Sep 15 18:18:13 2022 ] Top5: 85.21% +[ Thu Sep 15 18:18:13 2022 ] Training epoch: 26 +[ Thu Sep 15 18:18:27 2022 ] Batch(24/123) done. Loss: 0.4852 lr:0.100000 network_time: 0.0493 +[ Thu Sep 15 18:19:03 2022 ] Eval epoch: 26 +[ Thu Sep 15 18:19:26 2022 ] Mean test loss of 258 batches: 2.4896974563598633. +[ Thu Sep 15 18:19:26 2022 ] Top1: 49.52% +[ Thu Sep 15 18:19:26 2022 ] Top5: 81.65% +[ Thu Sep 15 18:19:26 2022 ] Training epoch: 27 +[ Thu Sep 15 18:19:30 2022 ] Batch(1/123) done. Loss: 0.1621 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 18:20:07 2022 ] Batch(101/123) done. Loss: 0.4268 lr:0.100000 network_time: 0.0532 +[ Thu Sep 15 18:20:15 2022 ] Eval epoch: 27 +[ Thu Sep 15 18:20:38 2022 ] Mean test loss of 258 batches: 2.5876340866088867. +[ Thu Sep 15 18:20:38 2022 ] Top1: 50.75% +[ Thu Sep 15 18:20:38 2022 ] Top5: 83.54% +[ Thu Sep 15 18:20:38 2022 ] Training epoch: 28 +[ Thu Sep 15 18:21:11 2022 ] Batch(78/123) done. Loss: 0.6055 lr:0.100000 network_time: 0.0523 +[ Thu Sep 15 18:21:27 2022 ] Eval epoch: 28 +[ Thu Sep 15 18:21:50 2022 ] Mean test loss of 258 batches: 2.8039703369140625. +[ Thu Sep 15 18:21:50 2022 ] Top1: 49.68% +[ Thu Sep 15 18:21:50 2022 ] Top5: 83.48% +[ Thu Sep 15 18:21:50 2022 ] Training epoch: 29 +[ Thu Sep 15 18:22:15 2022 ] Batch(55/123) done. Loss: 0.2838 lr:0.100000 network_time: 0.0543 +[ Thu Sep 15 18:22:40 2022 ] Eval epoch: 29 +[ Thu Sep 15 18:23:03 2022 ] Mean test loss of 258 batches: 2.4521985054016113. +[ Thu Sep 15 18:23:03 2022 ] Top1: 51.04% +[ Thu Sep 15 18:23:03 2022 ] Top5: 84.35% +[ Thu Sep 15 18:23:03 2022 ] Training epoch: 30 +[ Thu Sep 15 18:23:19 2022 ] Batch(32/123) done. Loss: 0.3162 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:23:52 2022 ] Eval epoch: 30 +[ Thu Sep 15 18:24:15 2022 ] Mean test loss of 258 batches: 2.2596545219421387. +[ Thu Sep 15 18:24:15 2022 ] Top1: 54.98% +[ Thu Sep 15 18:24:15 2022 ] Top5: 84.12% +[ Thu Sep 15 18:24:15 2022 ] Training epoch: 31 +[ Thu Sep 15 18:24:23 2022 ] Batch(9/123) done. Loss: 0.2001 lr:0.100000 network_time: 0.0504 +[ Thu Sep 15 18:24:59 2022 ] Batch(109/123) done. Loss: 0.2150 lr:0.100000 network_time: 0.0578 +[ Thu Sep 15 18:25:05 2022 ] Eval epoch: 31 +[ Thu Sep 15 18:25:27 2022 ] Mean test loss of 258 batches: 2.3336315155029297. +[ Thu Sep 15 18:25:27 2022 ] Top1: 53.68% +[ Thu Sep 15 18:25:27 2022 ] Top5: 83.42% +[ Thu Sep 15 18:25:27 2022 ] Training epoch: 32 +[ Thu Sep 15 18:26:03 2022 ] Batch(86/123) done. Loss: 0.2091 lr:0.100000 network_time: 0.0493 +[ Thu Sep 15 18:26:17 2022 ] Eval epoch: 32 +[ Thu Sep 15 18:26:39 2022 ] Mean test loss of 258 batches: 3.5852153301239014. +[ Thu Sep 15 18:26:39 2022 ] Top1: 39.52% +[ Thu Sep 15 18:26:39 2022 ] Top5: 75.45% +[ Thu Sep 15 18:26:39 2022 ] Training epoch: 33 +[ Thu Sep 15 18:27:07 2022 ] Batch(63/123) done. Loss: 0.1526 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 18:27:29 2022 ] Eval epoch: 33 +[ Thu Sep 15 18:27:52 2022 ] Mean test loss of 258 batches: 2.2551028728485107. +[ Thu Sep 15 18:27:52 2022 ] Top1: 55.06% +[ Thu Sep 15 18:27:52 2022 ] Top5: 85.82% +[ Thu Sep 15 18:27:52 2022 ] Training epoch: 34 +[ Thu Sep 15 18:28:11 2022 ] Batch(40/123) done. Loss: 0.3211 lr:0.100000 network_time: 0.0549 +[ Thu Sep 15 18:28:42 2022 ] Eval epoch: 34 +[ Thu Sep 15 18:29:05 2022 ] Mean test loss of 258 batches: 2.2728934288024902. +[ Thu Sep 15 18:29:05 2022 ] Top1: 52.71% +[ Thu Sep 15 18:29:05 2022 ] Top5: 85.59% +[ Thu Sep 15 18:29:05 2022 ] Training epoch: 35 +[ Thu Sep 15 18:29:16 2022 ] Batch(17/123) done. Loss: 0.1437 lr:0.100000 network_time: 0.0499 +[ Thu Sep 15 18:29:52 2022 ] Batch(117/123) done. Loss: 0.2225 lr:0.100000 network_time: 0.0519 +[ Thu Sep 15 18:29:54 2022 ] Eval epoch: 35 +[ Thu Sep 15 18:30:17 2022 ] Mean test loss of 258 batches: 2.5030672550201416. +[ Thu Sep 15 18:30:17 2022 ] Top1: 49.17% +[ Thu Sep 15 18:30:17 2022 ] Top5: 82.40% +[ Thu Sep 15 18:30:17 2022 ] Training epoch: 36 +[ Thu Sep 15 18:30:56 2022 ] Batch(94/123) done. Loss: 0.1613 lr:0.100000 network_time: 0.0522 +[ Thu Sep 15 18:31:06 2022 ] Eval epoch: 36 +[ Thu Sep 15 18:31:29 2022 ] Mean test loss of 258 batches: 3.4875588417053223. +[ Thu Sep 15 18:31:29 2022 ] Top1: 38.61% +[ Thu Sep 15 18:31:29 2022 ] Top5: 70.11% +[ Thu Sep 15 18:31:29 2022 ] Training epoch: 37 +[ Thu Sep 15 18:31:59 2022 ] Batch(71/123) done. Loss: 0.3926 lr:0.100000 network_time: 0.0515 +[ Thu Sep 15 18:32:18 2022 ] Eval epoch: 37 +[ Thu Sep 15 18:32:41 2022 ] Mean test loss of 258 batches: 2.094217300415039. +[ Thu Sep 15 18:32:41 2022 ] Top1: 56.10% +[ Thu Sep 15 18:32:41 2022 ] Top5: 86.88% +[ Thu Sep 15 18:32:41 2022 ] Training epoch: 38 +[ Thu Sep 15 18:33:03 2022 ] Batch(48/123) done. Loss: 0.1567 lr:0.100000 network_time: 0.0518 +[ Thu Sep 15 18:33:31 2022 ] Eval epoch: 38 +[ Thu Sep 15 18:33:53 2022 ] Mean test loss of 258 batches: 3.110715866088867. +[ Thu Sep 15 18:33:53 2022 ] Top1: 43.77% +[ Thu Sep 15 18:33:53 2022 ] Top5: 78.10% +[ Thu Sep 15 18:33:53 2022 ] Training epoch: 39 +[ Thu Sep 15 18:34:07 2022 ] Batch(25/123) done. Loss: 0.3969 lr:0.100000 network_time: 0.0491 +[ Thu Sep 15 18:34:43 2022 ] Eval epoch: 39 +[ Thu Sep 15 18:35:06 2022 ] Mean test loss of 258 batches: 2.423691987991333. +[ Thu Sep 15 18:35:06 2022 ] Top1: 50.49% +[ Thu Sep 15 18:35:06 2022 ] Top5: 85.05% +[ Thu Sep 15 18:35:06 2022 ] Training epoch: 40 +[ Thu Sep 15 18:35:11 2022 ] Batch(2/123) done. Loss: 0.3346 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:35:47 2022 ] Batch(102/123) done. Loss: 0.2849 lr:0.100000 network_time: 0.0472 +[ Thu Sep 15 18:35:55 2022 ] Eval epoch: 40 +[ Thu Sep 15 18:36:18 2022 ] Mean test loss of 258 batches: 1.968823790550232. +[ Thu Sep 15 18:36:18 2022 ] Top1: 56.49% +[ Thu Sep 15 18:36:18 2022 ] Top5: 86.26% +[ Thu Sep 15 18:36:18 2022 ] Training epoch: 41 +[ Thu Sep 15 18:36:52 2022 ] Batch(79/123) done. Loss: 0.2500 lr:0.100000 network_time: 0.0508 +[ Thu Sep 15 18:37:08 2022 ] Eval epoch: 41 +[ Thu Sep 15 18:37:30 2022 ] Mean test loss of 258 batches: 3.1059272289276123. +[ Thu Sep 15 18:37:30 2022 ] Top1: 46.44% +[ Thu Sep 15 18:37:30 2022 ] Top5: 79.26% +[ Thu Sep 15 18:37:30 2022 ] Training epoch: 42 +[ Thu Sep 15 18:37:56 2022 ] Batch(56/123) done. Loss: 0.2638 lr:0.100000 network_time: 0.0531 +[ Thu Sep 15 18:38:20 2022 ] Eval epoch: 42 +[ Thu Sep 15 18:38:43 2022 ] Mean test loss of 258 batches: 3.350965976715088. +[ Thu Sep 15 18:38:43 2022 ] Top1: 46.13% +[ Thu Sep 15 18:38:43 2022 ] Top5: 79.26% +[ Thu Sep 15 18:38:43 2022 ] Training epoch: 43 +[ Thu Sep 15 18:39:00 2022 ] Batch(33/123) done. Loss: 0.1191 lr:0.100000 network_time: 0.0537 +[ Thu Sep 15 18:39:33 2022 ] Eval epoch: 43 +[ Thu Sep 15 18:39:56 2022 ] Mean test loss of 258 batches: 2.9140000343322754. +[ Thu Sep 15 18:39:56 2022 ] Top1: 49.75% +[ Thu Sep 15 18:39:56 2022 ] Top5: 83.55% +[ Thu Sep 15 18:39:56 2022 ] Training epoch: 44 +[ Thu Sep 15 18:40:04 2022 ] Batch(10/123) done. Loss: 0.1089 lr:0.100000 network_time: 0.0501 +[ Thu Sep 15 18:40:41 2022 ] Batch(110/123) done. Loss: 0.1441 lr:0.100000 network_time: 0.0509 +[ Thu Sep 15 18:40:45 2022 ] Eval epoch: 44 +[ Thu Sep 15 18:41:08 2022 ] Mean test loss of 258 batches: 2.5323615074157715. +[ Thu Sep 15 18:41:08 2022 ] Top1: 49.69% +[ Thu Sep 15 18:41:08 2022 ] Top5: 81.96% +[ Thu Sep 15 18:41:08 2022 ] Training epoch: 45 +[ Thu Sep 15 18:41:44 2022 ] Batch(87/123) done. Loss: 0.0759 lr:0.100000 network_time: 0.0525 +[ Thu Sep 15 18:41:57 2022 ] Eval epoch: 45 +[ Thu Sep 15 18:42:20 2022 ] Mean test loss of 258 batches: 2.6945436000823975. +[ Thu Sep 15 18:42:20 2022 ] Top1: 48.29% +[ Thu Sep 15 18:42:20 2022 ] Top5: 80.38% +[ Thu Sep 15 18:42:20 2022 ] Training epoch: 46 +[ Thu Sep 15 18:42:48 2022 ] Batch(64/123) done. Loss: 0.1732 lr:0.100000 network_time: 0.0540 +[ Thu Sep 15 18:43:10 2022 ] Eval epoch: 46 +[ Thu Sep 15 18:43:33 2022 ] Mean test loss of 258 batches: 3.0958104133605957. +[ Thu Sep 15 18:43:33 2022 ] Top1: 48.04% +[ Thu Sep 15 18:43:33 2022 ] Top5: 81.45% +[ Thu Sep 15 18:43:33 2022 ] Training epoch: 47 +[ Thu Sep 15 18:43:53 2022 ] Batch(41/123) done. Loss: 0.3669 lr:0.100000 network_time: 0.0520 +[ Thu Sep 15 18:44:23 2022 ] Eval epoch: 47 +[ Thu Sep 15 18:44:46 2022 ] Mean test loss of 258 batches: 3.1610655784606934. +[ Thu Sep 15 18:44:46 2022 ] Top1: 48.95% +[ Thu Sep 15 18:44:46 2022 ] Top5: 82.23% +[ Thu Sep 15 18:44:46 2022 ] Training epoch: 48 +[ Thu Sep 15 18:44:57 2022 ] Batch(18/123) done. Loss: 0.2774 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 18:45:33 2022 ] Batch(118/123) done. Loss: 0.1066 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:45:35 2022 ] Eval epoch: 48 +[ Thu Sep 15 18:45:58 2022 ] Mean test loss of 258 batches: 2.4199440479278564. +[ Thu Sep 15 18:45:58 2022 ] Top1: 53.91% +[ Thu Sep 15 18:45:58 2022 ] Top5: 84.36% +[ Thu Sep 15 18:45:58 2022 ] Training epoch: 49 +[ Thu Sep 15 18:46:37 2022 ] Batch(95/123) done. Loss: 0.1835 lr:0.100000 network_time: 0.0520 +[ Thu Sep 15 18:46:47 2022 ] Eval epoch: 49 +[ Thu Sep 15 18:47:10 2022 ] Mean test loss of 258 batches: 2.5721118450164795. +[ Thu Sep 15 18:47:10 2022 ] Top1: 53.58% +[ Thu Sep 15 18:47:10 2022 ] Top5: 84.75% +[ Thu Sep 15 18:47:10 2022 ] Training epoch: 50 +[ Thu Sep 15 18:47:41 2022 ] Batch(72/123) done. Loss: 0.1424 lr:0.100000 network_time: 0.0493 +[ Thu Sep 15 18:47:59 2022 ] Eval epoch: 50 +[ Thu Sep 15 18:48:22 2022 ] Mean test loss of 258 batches: 2.8164403438568115. +[ Thu Sep 15 18:48:22 2022 ] Top1: 49.89% +[ Thu Sep 15 18:48:22 2022 ] Top5: 83.20% +[ Thu Sep 15 18:48:22 2022 ] Training epoch: 51 +[ Thu Sep 15 18:48:45 2022 ] Batch(49/123) done. Loss: 0.2530 lr:0.100000 network_time: 0.0525 +[ Thu Sep 15 18:49:11 2022 ] Eval epoch: 51 +[ Thu Sep 15 18:49:34 2022 ] Mean test loss of 258 batches: 3.0064849853515625. +[ Thu Sep 15 18:49:34 2022 ] Top1: 48.81% +[ Thu Sep 15 18:49:34 2022 ] Top5: 81.22% +[ Thu Sep 15 18:49:34 2022 ] Training epoch: 52 +[ Thu Sep 15 18:49:48 2022 ] Batch(26/123) done. Loss: 0.2124 lr:0.100000 network_time: 0.0564 +[ Thu Sep 15 18:50:23 2022 ] Eval epoch: 52 +[ Thu Sep 15 18:50:46 2022 ] Mean test loss of 258 batches: 2.5954577922821045. +[ Thu Sep 15 18:50:47 2022 ] Top1: 51.74% +[ Thu Sep 15 18:50:47 2022 ] Top5: 84.36% +[ Thu Sep 15 18:50:47 2022 ] Training epoch: 53 +[ Thu Sep 15 18:50:52 2022 ] Batch(3/123) done. Loss: 0.1510 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 18:51:29 2022 ] Batch(103/123) done. Loss: 0.1510 lr:0.100000 network_time: 0.0507 +[ Thu Sep 15 18:51:36 2022 ] Eval epoch: 53 +[ Thu Sep 15 18:51:59 2022 ] Mean test loss of 258 batches: 2.450814723968506. +[ Thu Sep 15 18:51:59 2022 ] Top1: 52.99% +[ Thu Sep 15 18:51:59 2022 ] Top5: 84.93% +[ Thu Sep 15 18:51:59 2022 ] Training epoch: 54 +[ Thu Sep 15 18:52:33 2022 ] Batch(80/123) done. Loss: 0.4379 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:52:49 2022 ] Eval epoch: 54 +[ Thu Sep 15 18:53:11 2022 ] Mean test loss of 258 batches: 2.9808332920074463. +[ Thu Sep 15 18:53:12 2022 ] Top1: 49.61% +[ Thu Sep 15 18:53:12 2022 ] Top5: 82.16% +[ Thu Sep 15 18:53:12 2022 ] Training epoch: 55 +[ Thu Sep 15 18:53:37 2022 ] Batch(57/123) done. Loss: 0.1202 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 18:54:01 2022 ] Eval epoch: 55 +[ Thu Sep 15 18:54:24 2022 ] Mean test loss of 258 batches: 2.49838924407959. +[ Thu Sep 15 18:54:24 2022 ] Top1: 51.74% +[ Thu Sep 15 18:54:24 2022 ] Top5: 84.11% +[ Thu Sep 15 18:54:24 2022 ] Training epoch: 56 +[ Thu Sep 15 18:54:41 2022 ] Batch(34/123) done. Loss: 0.1587 lr:0.100000 network_time: 0.0521 +[ Thu Sep 15 18:55:13 2022 ] Eval epoch: 56 +[ Thu Sep 15 18:55:36 2022 ] Mean test loss of 258 batches: 2.4359495639801025. +[ Thu Sep 15 18:55:36 2022 ] Top1: 53.91% +[ Thu Sep 15 18:55:36 2022 ] Top5: 84.18% +[ Thu Sep 15 18:55:36 2022 ] Training epoch: 57 +[ Thu Sep 15 18:55:45 2022 ] Batch(11/123) done. Loss: 0.1714 lr:0.100000 network_time: 0.0544 +[ Thu Sep 15 18:56:21 2022 ] Batch(111/123) done. Loss: 0.1316 lr:0.100000 network_time: 0.0676 +[ Thu Sep 15 18:56:26 2022 ] Eval epoch: 57 +[ Thu Sep 15 18:56:49 2022 ] Mean test loss of 258 batches: 2.2723708152770996. +[ Thu Sep 15 18:56:49 2022 ] Top1: 55.72% +[ Thu Sep 15 18:56:49 2022 ] Top5: 86.43% +[ Thu Sep 15 18:56:49 2022 ] Training epoch: 58 +[ Thu Sep 15 18:57:26 2022 ] Batch(88/123) done. Loss: 0.0955 lr:0.100000 network_time: 0.0541 +[ Thu Sep 15 18:57:38 2022 ] Eval epoch: 58 +[ Thu Sep 15 18:58:01 2022 ] Mean test loss of 258 batches: 3.134904623031616. +[ Thu Sep 15 18:58:01 2022 ] Top1: 47.00% +[ Thu Sep 15 18:58:01 2022 ] Top5: 79.91% +[ Thu Sep 15 18:58:01 2022 ] Training epoch: 59 +[ Thu Sep 15 18:58:30 2022 ] Batch(65/123) done. Loss: 0.1218 lr:0.100000 network_time: 0.0520 +[ Thu Sep 15 18:58:51 2022 ] Eval epoch: 59 +[ Thu Sep 15 18:59:14 2022 ] Mean test loss of 258 batches: 2.147733449935913. +[ Thu Sep 15 18:59:14 2022 ] Top1: 57.50% +[ Thu Sep 15 18:59:14 2022 ] Top5: 88.17% +[ Thu Sep 15 18:59:14 2022 ] Training epoch: 60 +[ Thu Sep 15 18:59:34 2022 ] Batch(42/123) done. Loss: 0.0591 lr:0.100000 network_time: 0.0594 +[ Thu Sep 15 19:00:03 2022 ] Eval epoch: 60 +[ Thu Sep 15 19:00:26 2022 ] Mean test loss of 258 batches: 2.7868356704711914. +[ Thu Sep 15 19:00:26 2022 ] Top1: 52.62% +[ Thu Sep 15 19:00:26 2022 ] Top5: 84.46% +[ Thu Sep 15 19:00:26 2022 ] Training epoch: 61 +[ Thu Sep 15 19:00:38 2022 ] Batch(19/123) done. Loss: 0.1423 lr:0.010000 network_time: 0.0548 +[ Thu Sep 15 19:01:14 2022 ] Batch(119/123) done. Loss: 0.0520 lr:0.010000 network_time: 0.0510 +[ Thu Sep 15 19:01:16 2022 ] Eval epoch: 61 +[ Thu Sep 15 19:01:39 2022 ] Mean test loss of 258 batches: 1.9882783889770508. +[ Thu Sep 15 19:01:39 2022 ] Top1: 61.74% +[ Thu Sep 15 19:01:39 2022 ] Top5: 90.26% +[ Thu Sep 15 19:01:39 2022 ] Training epoch: 62 +[ Thu Sep 15 19:02:18 2022 ] Batch(96/123) done. Loss: 0.0392 lr:0.010000 network_time: 0.0556 +[ Thu Sep 15 19:02:28 2022 ] Eval epoch: 62 +[ Thu Sep 15 19:02:51 2022 ] Mean test loss of 258 batches: 1.8952713012695312. +[ Thu Sep 15 19:02:51 2022 ] Top1: 62.84% +[ Thu Sep 15 19:02:51 2022 ] Top5: 90.74% +[ Thu Sep 15 19:02:51 2022 ] Training epoch: 63 +[ Thu Sep 15 19:03:23 2022 ] Batch(73/123) done. Loss: 0.0216 lr:0.010000 network_time: 0.0502 +[ Thu Sep 15 19:03:41 2022 ] Eval epoch: 63 +[ Thu Sep 15 19:04:04 2022 ] Mean test loss of 258 batches: 2.003861665725708. +[ Thu Sep 15 19:04:04 2022 ] Top1: 62.39% +[ Thu Sep 15 19:04:04 2022 ] Top5: 90.72% +[ Thu Sep 15 19:04:04 2022 ] Training epoch: 64 +[ Thu Sep 15 19:04:27 2022 ] Batch(50/123) done. Loss: 0.0158 lr:0.010000 network_time: 0.0504 +[ Thu Sep 15 19:04:53 2022 ] Eval epoch: 64 +[ Thu Sep 15 19:05:17 2022 ] Mean test loss of 258 batches: 1.8763132095336914. +[ Thu Sep 15 19:05:17 2022 ] Top1: 63.09% +[ Thu Sep 15 19:05:17 2022 ] Top5: 90.86% +[ Thu Sep 15 19:05:17 2022 ] Training epoch: 65 +[ Thu Sep 15 19:05:31 2022 ] Batch(27/123) done. Loss: 0.0109 lr:0.010000 network_time: 0.0502 +[ Thu Sep 15 19:06:06 2022 ] Eval epoch: 65 +[ Thu Sep 15 19:06:29 2022 ] Mean test loss of 258 batches: 1.8442339897155762. +[ Thu Sep 15 19:06:29 2022 ] Top1: 63.47% +[ Thu Sep 15 19:06:29 2022 ] Top5: 91.11% +[ Thu Sep 15 19:06:29 2022 ] Training epoch: 66 +[ Thu Sep 15 19:06:35 2022 ] Batch(4/123) done. Loss: 0.0147 lr:0.010000 network_time: 0.0531 +[ Thu Sep 15 19:07:12 2022 ] Batch(104/123) done. Loss: 0.0279 lr:0.010000 network_time: 0.0553 +[ Thu Sep 15 19:07:18 2022 ] Eval epoch: 66 +[ Thu Sep 15 19:07:41 2022 ] Mean test loss of 258 batches: 1.8917242288589478. +[ Thu Sep 15 19:07:41 2022 ] Top1: 63.57% +[ Thu Sep 15 19:07:41 2022 ] Top5: 90.88% +[ Thu Sep 15 19:07:41 2022 ] Training epoch: 67 +[ Thu Sep 15 19:08:16 2022 ] Batch(81/123) done. Loss: 0.0362 lr:0.010000 network_time: 0.0500 +[ Thu Sep 15 19:08:31 2022 ] Eval epoch: 67 +[ Thu Sep 15 19:08:54 2022 ] Mean test loss of 258 batches: 2.1126983165740967. +[ Thu Sep 15 19:08:54 2022 ] Top1: 59.51% +[ Thu Sep 15 19:08:54 2022 ] Top5: 89.03% +[ Thu Sep 15 19:08:54 2022 ] Training epoch: 68 +[ Thu Sep 15 19:09:21 2022 ] Batch(58/123) done. Loss: 0.0134 lr:0.010000 network_time: 0.0545 +[ Thu Sep 15 19:09:44 2022 ] Eval epoch: 68 +[ Thu Sep 15 19:10:07 2022 ] Mean test loss of 258 batches: 1.7624281644821167. +[ Thu Sep 15 19:10:07 2022 ] Top1: 63.74% +[ Thu Sep 15 19:10:07 2022 ] Top5: 91.11% +[ Thu Sep 15 19:10:07 2022 ] Training epoch: 69 +[ Thu Sep 15 19:10:25 2022 ] Batch(35/123) done. Loss: 0.0205 lr:0.010000 network_time: 0.0537 +[ Thu Sep 15 19:10:57 2022 ] Eval epoch: 69 +[ Thu Sep 15 19:11:20 2022 ] Mean test loss of 258 batches: 1.856307864189148. +[ Thu Sep 15 19:11:20 2022 ] Top1: 63.09% +[ Thu Sep 15 19:11:20 2022 ] Top5: 90.88% +[ Thu Sep 15 19:11:20 2022 ] Training epoch: 70 +[ Thu Sep 15 19:11:29 2022 ] Batch(12/123) done. Loss: 0.0041 lr:0.010000 network_time: 0.0510 +[ Thu Sep 15 19:12:05 2022 ] Batch(112/123) done. Loss: 0.0076 lr:0.010000 network_time: 0.0499 +[ Thu Sep 15 19:12:09 2022 ] Eval epoch: 70 +[ Thu Sep 15 19:12:32 2022 ] Mean test loss of 258 batches: 1.915732502937317. +[ Thu Sep 15 19:12:32 2022 ] Top1: 63.19% +[ Thu Sep 15 19:12:32 2022 ] Top5: 90.90% +[ Thu Sep 15 19:12:32 2022 ] Training epoch: 71 +[ Thu Sep 15 19:13:09 2022 ] Batch(89/123) done. Loss: 0.0098 lr:0.010000 network_time: 0.0484 +[ Thu Sep 15 19:13:22 2022 ] Eval epoch: 71 +[ Thu Sep 15 19:13:45 2022 ] Mean test loss of 258 batches: 1.8895395994186401. +[ Thu Sep 15 19:13:45 2022 ] Top1: 63.75% +[ Thu Sep 15 19:13:45 2022 ] Top5: 91.01% +[ Thu Sep 15 19:13:45 2022 ] Training epoch: 72 +[ Thu Sep 15 19:14:13 2022 ] Batch(66/123) done. Loss: 0.0059 lr:0.010000 network_time: 0.0547 +[ Thu Sep 15 19:14:34 2022 ] Eval epoch: 72 +[ Thu Sep 15 19:14:57 2022 ] Mean test loss of 258 batches: 1.7950252294540405. +[ Thu Sep 15 19:14:58 2022 ] Top1: 64.19% +[ Thu Sep 15 19:14:58 2022 ] Top5: 91.25% +[ Thu Sep 15 19:14:58 2022 ] Training epoch: 73 +[ Thu Sep 15 19:15:17 2022 ] Batch(43/123) done. Loss: 0.0060 lr:0.010000 network_time: 0.0522 +[ Thu Sep 15 19:15:47 2022 ] Eval epoch: 73 +[ Thu Sep 15 19:16:09 2022 ] Mean test loss of 258 batches: 1.905718445777893. +[ Thu Sep 15 19:16:09 2022 ] Top1: 62.58% +[ Thu Sep 15 19:16:09 2022 ] Top5: 90.58% +[ Thu Sep 15 19:16:10 2022 ] Training epoch: 74 +[ Thu Sep 15 19:16:21 2022 ] Batch(20/123) done. Loss: 0.0056 lr:0.010000 network_time: 0.0519 +[ Thu Sep 15 19:16:58 2022 ] Batch(120/123) done. Loss: 0.0050 lr:0.010000 network_time: 0.0489 +[ Thu Sep 15 19:16:59 2022 ] Eval epoch: 74 +[ Thu Sep 15 19:17:21 2022 ] Mean test loss of 258 batches: 2.057525873184204. +[ Thu Sep 15 19:17:21 2022 ] Top1: 62.66% +[ Thu Sep 15 19:17:21 2022 ] Top5: 90.36% +[ Thu Sep 15 19:17:21 2022 ] Training epoch: 75 +[ Thu Sep 15 19:18:01 2022 ] Batch(97/123) done. Loss: 0.0047 lr:0.010000 network_time: 0.0507 +[ Thu Sep 15 19:18:11 2022 ] Eval epoch: 75 +[ Thu Sep 15 19:18:34 2022 ] Mean test loss of 258 batches: 1.8387532234191895. +[ Thu Sep 15 19:18:34 2022 ] Top1: 63.74% +[ Thu Sep 15 19:18:34 2022 ] Top5: 91.15% +[ Thu Sep 15 19:18:34 2022 ] Training epoch: 76 +[ Thu Sep 15 19:19:05 2022 ] Batch(74/123) done. Loss: 0.0057 lr:0.010000 network_time: 0.0508 +[ Thu Sep 15 19:19:23 2022 ] Eval epoch: 76 +[ Thu Sep 15 19:19:46 2022 ] Mean test loss of 258 batches: 1.8769611120224. +[ Thu Sep 15 19:19:46 2022 ] Top1: 62.75% +[ Thu Sep 15 19:19:46 2022 ] Top5: 90.65% +[ Thu Sep 15 19:19:46 2022 ] Training epoch: 77 +[ Thu Sep 15 19:20:09 2022 ] Batch(51/123) done. Loss: 0.0035 lr:0.010000 network_time: 0.0497 +[ Thu Sep 15 19:20:35 2022 ] Eval epoch: 77 +[ Thu Sep 15 19:20:58 2022 ] Mean test loss of 258 batches: 2.2678627967834473. +[ Thu Sep 15 19:20:58 2022 ] Top1: 57.30% +[ Thu Sep 15 19:20:58 2022 ] Top5: 87.72% +[ Thu Sep 15 19:20:59 2022 ] Training epoch: 78 +[ Thu Sep 15 19:21:13 2022 ] Batch(28/123) done. Loss: 0.0167 lr:0.010000 network_time: 0.0475 +[ Thu Sep 15 19:21:47 2022 ] Eval epoch: 78 +[ Thu Sep 15 19:22:10 2022 ] Mean test loss of 258 batches: 1.913560152053833. +[ Thu Sep 15 19:22:10 2022 ] Top1: 63.26% +[ Thu Sep 15 19:22:10 2022 ] Top5: 90.94% +[ Thu Sep 15 19:22:11 2022 ] Training epoch: 79 +[ Thu Sep 15 19:22:17 2022 ] Batch(5/123) done. Loss: 0.0013 lr:0.010000 network_time: 0.0480 +[ Thu Sep 15 19:22:53 2022 ] Batch(105/123) done. Loss: 0.0101 lr:0.010000 network_time: 0.0585 +[ Thu Sep 15 19:23:00 2022 ] Eval epoch: 79 +[ Thu Sep 15 19:23:23 2022 ] Mean test loss of 258 batches: 2.011691093444824. +[ Thu Sep 15 19:23:23 2022 ] Top1: 60.81% +[ Thu Sep 15 19:23:23 2022 ] Top5: 89.62% +[ Thu Sep 15 19:23:23 2022 ] Training epoch: 80 +[ Thu Sep 15 19:23:57 2022 ] Batch(82/123) done. Loss: 0.0062 lr:0.010000 network_time: 0.0535 +[ Thu Sep 15 19:24:12 2022 ] Eval epoch: 80 +[ Thu Sep 15 19:24:36 2022 ] Mean test loss of 258 batches: 1.7865822315216064. +[ Thu Sep 15 19:24:36 2022 ] Top1: 64.81% +[ Thu Sep 15 19:24:36 2022 ] Top5: 91.50% +[ Thu Sep 15 19:24:36 2022 ] Training epoch: 81 +[ Thu Sep 15 19:25:02 2022 ] Batch(59/123) done. Loss: 0.0120 lr:0.001000 network_time: 0.0486 +[ Thu Sep 15 19:25:25 2022 ] Eval epoch: 81 +[ Thu Sep 15 19:25:47 2022 ] Mean test loss of 258 batches: 1.9796819686889648. +[ Thu Sep 15 19:25:47 2022 ] Top1: 63.44% +[ Thu Sep 15 19:25:48 2022 ] Top5: 90.97% +[ Thu Sep 15 19:25:48 2022 ] Training epoch: 82 +[ Thu Sep 15 19:26:05 2022 ] Batch(36/123) done. Loss: 0.0259 lr:0.001000 network_time: 0.0502 +[ Thu Sep 15 19:26:37 2022 ] Eval epoch: 82 +[ Thu Sep 15 19:27:00 2022 ] Mean test loss of 258 batches: 1.9062645435333252. +[ Thu Sep 15 19:27:00 2022 ] Top1: 63.77% +[ Thu Sep 15 19:27:00 2022 ] Top5: 90.79% +[ Thu Sep 15 19:27:00 2022 ] Training epoch: 83 +[ Thu Sep 15 19:27:10 2022 ] Batch(13/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0514 +[ Thu Sep 15 19:27:46 2022 ] Batch(113/123) done. Loss: 0.0037 lr:0.001000 network_time: 0.0515 +[ Thu Sep 15 19:27:50 2022 ] Eval epoch: 83 +[ Thu Sep 15 19:28:13 2022 ] Mean test loss of 258 batches: 1.767254114151001. +[ Thu Sep 15 19:28:13 2022 ] Top1: 64.40% +[ Thu Sep 15 19:28:13 2022 ] Top5: 91.42% +[ Thu Sep 15 19:28:13 2022 ] Training epoch: 84 +[ Thu Sep 15 19:28:50 2022 ] Batch(90/123) done. Loss: 0.0036 lr:0.001000 network_time: 0.0538 +[ Thu Sep 15 19:29:02 2022 ] Eval epoch: 84 +[ Thu Sep 15 19:29:24 2022 ] Mean test loss of 258 batches: 1.8931705951690674. +[ Thu Sep 15 19:29:25 2022 ] Top1: 63.83% +[ Thu Sep 15 19:29:25 2022 ] Top5: 91.12% +[ Thu Sep 15 19:29:25 2022 ] Training epoch: 85 +[ Thu Sep 15 19:29:53 2022 ] Batch(67/123) done. Loss: 0.0086 lr:0.001000 network_time: 0.0503 +[ Thu Sep 15 19:30:14 2022 ] Eval epoch: 85 +[ Thu Sep 15 19:30:37 2022 ] Mean test loss of 258 batches: 1.8183932304382324. +[ Thu Sep 15 19:30:37 2022 ] Top1: 64.40% +[ Thu Sep 15 19:30:37 2022 ] Top5: 91.35% +[ Thu Sep 15 19:30:37 2022 ] Training epoch: 86 +[ Thu Sep 15 19:30:57 2022 ] Batch(44/123) done. Loss: 0.0045 lr:0.001000 network_time: 0.0499 +[ Thu Sep 15 19:31:26 2022 ] Eval epoch: 86 +[ Thu Sep 15 19:31:49 2022 ] Mean test loss of 258 batches: 1.9683622121810913. +[ Thu Sep 15 19:31:49 2022 ] Top1: 62.00% +[ Thu Sep 15 19:31:49 2022 ] Top5: 89.91% +[ Thu Sep 15 19:31:49 2022 ] Training epoch: 87 +[ Thu Sep 15 19:32:01 2022 ] Batch(21/123) done. Loss: 0.0036 lr:0.001000 network_time: 0.0562 +[ Thu Sep 15 19:32:38 2022 ] Batch(121/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0530 +[ Thu Sep 15 19:32:38 2022 ] Eval epoch: 87 +[ Thu Sep 15 19:33:01 2022 ] Mean test loss of 258 batches: 1.8199169635772705. +[ Thu Sep 15 19:33:01 2022 ] Top1: 64.49% +[ Thu Sep 15 19:33:01 2022 ] Top5: 91.22% +[ Thu Sep 15 19:33:01 2022 ] Training epoch: 88 +[ Thu Sep 15 19:33:42 2022 ] Batch(98/123) done. Loss: 0.0041 lr:0.001000 network_time: 0.0566 +[ Thu Sep 15 19:33:51 2022 ] Eval epoch: 88 +[ Thu Sep 15 19:34:14 2022 ] Mean test loss of 258 batches: 1.8484878540039062. +[ Thu Sep 15 19:34:14 2022 ] Top1: 63.99% +[ Thu Sep 15 19:34:14 2022 ] Top5: 91.19% +[ Thu Sep 15 19:34:14 2022 ] Training epoch: 89 +[ Thu Sep 15 19:34:45 2022 ] Batch(75/123) done. Loss: 0.0067 lr:0.001000 network_time: 0.0500 +[ Thu Sep 15 19:35:03 2022 ] Eval epoch: 89 +[ Thu Sep 15 19:35:26 2022 ] Mean test loss of 258 batches: 1.8942618370056152. +[ Thu Sep 15 19:35:26 2022 ] Top1: 62.35% +[ Thu Sep 15 19:35:26 2022 ] Top5: 90.16% +[ Thu Sep 15 19:35:26 2022 ] Training epoch: 90 +[ Thu Sep 15 19:35:50 2022 ] Batch(52/123) done. Loss: 0.0045 lr:0.001000 network_time: 0.0503 +[ Thu Sep 15 19:36:16 2022 ] Eval epoch: 90 +[ Thu Sep 15 19:36:38 2022 ] Mean test loss of 258 batches: 1.8252038955688477. +[ Thu Sep 15 19:36:38 2022 ] Top1: 64.66% +[ Thu Sep 15 19:36:39 2022 ] Top5: 91.42% +[ Thu Sep 15 19:36:39 2022 ] Training epoch: 91 +[ Thu Sep 15 19:36:54 2022 ] Batch(29/123) done. Loss: 0.0031 lr:0.001000 network_time: 0.0505 +[ Thu Sep 15 19:37:28 2022 ] Eval epoch: 91 +[ Thu Sep 15 19:37:51 2022 ] Mean test loss of 258 batches: 1.8925552368164062. +[ Thu Sep 15 19:37:51 2022 ] Top1: 63.90% +[ Thu Sep 15 19:37:52 2022 ] Top5: 90.99% +[ Thu Sep 15 19:37:52 2022 ] Training epoch: 92 +[ Thu Sep 15 19:37:58 2022 ] Batch(6/123) done. Loss: 0.0046 lr:0.001000 network_time: 0.0621 +[ Thu Sep 15 19:38:35 2022 ] Batch(106/123) done. Loss: 0.0084 lr:0.001000 network_time: 0.0526 +[ Thu Sep 15 19:38:41 2022 ] Eval epoch: 92 +[ Thu Sep 15 19:39:04 2022 ] Mean test loss of 258 batches: 1.8438127040863037. +[ Thu Sep 15 19:39:04 2022 ] Top1: 64.34% +[ Thu Sep 15 19:39:04 2022 ] Top5: 91.21% +[ Thu Sep 15 19:39:04 2022 ] Training epoch: 93 +[ Thu Sep 15 19:39:39 2022 ] Batch(83/123) done. Loss: 0.0102 lr:0.001000 network_time: 0.0537 +[ Thu Sep 15 19:39:54 2022 ] Eval epoch: 93 +[ Thu Sep 15 19:40:16 2022 ] Mean test loss of 258 batches: 1.8425955772399902. +[ Thu Sep 15 19:40:16 2022 ] Top1: 64.31% +[ Thu Sep 15 19:40:16 2022 ] Top5: 91.17% +[ Thu Sep 15 19:40:17 2022 ] Training epoch: 94 +[ Thu Sep 15 19:40:43 2022 ] Batch(60/123) done. Loss: 0.0067 lr:0.001000 network_time: 0.0549 +[ Thu Sep 15 19:41:06 2022 ] Eval epoch: 94 +[ Thu Sep 15 19:41:29 2022 ] Mean test loss of 258 batches: 1.8022571802139282. +[ Thu Sep 15 19:41:29 2022 ] Top1: 64.40% +[ Thu Sep 15 19:41:29 2022 ] Top5: 91.19% +[ Thu Sep 15 19:41:29 2022 ] Training epoch: 95 +[ Thu Sep 15 19:41:47 2022 ] Batch(37/123) done. Loss: 0.0038 lr:0.001000 network_time: 0.0516 +[ Thu Sep 15 19:42:18 2022 ] Eval epoch: 95 +[ Thu Sep 15 19:42:41 2022 ] Mean test loss of 258 batches: 1.8983581066131592. +[ Thu Sep 15 19:42:41 2022 ] Top1: 63.29% +[ Thu Sep 15 19:42:41 2022 ] Top5: 90.73% +[ Thu Sep 15 19:42:41 2022 ] Training epoch: 96 +[ Thu Sep 15 19:42:51 2022 ] Batch(14/123) done. Loss: 0.0238 lr:0.001000 network_time: 0.0616 +[ Thu Sep 15 19:43:27 2022 ] Batch(114/123) done. Loss: 0.0055 lr:0.001000 network_time: 0.0515 +[ Thu Sep 15 19:43:30 2022 ] Eval epoch: 96 +[ Thu Sep 15 19:43:53 2022 ] Mean test loss of 258 batches: 1.8857086896896362. +[ Thu Sep 15 19:43:53 2022 ] Top1: 63.26% +[ Thu Sep 15 19:43:53 2022 ] Top5: 90.91% +[ Thu Sep 15 19:43:53 2022 ] Training epoch: 97 +[ Thu Sep 15 19:44:31 2022 ] Batch(91/123) done. Loss: 0.0085 lr:0.001000 network_time: 0.0533 +[ Thu Sep 15 19:44:43 2022 ] Eval epoch: 97 +[ Thu Sep 15 19:45:06 2022 ] Mean test loss of 258 batches: 1.876707911491394. +[ Thu Sep 15 19:45:06 2022 ] Top1: 64.09% +[ Thu Sep 15 19:45:06 2022 ] Top5: 91.19% +[ Thu Sep 15 19:45:06 2022 ] Training epoch: 98 +[ Thu Sep 15 19:45:35 2022 ] Batch(68/123) done. Loss: 0.0062 lr:0.001000 network_time: 0.0534 +[ Thu Sep 15 19:45:55 2022 ] Eval epoch: 98 +[ Thu Sep 15 19:46:18 2022 ] Mean test loss of 258 batches: 1.9330939054489136. +[ Thu Sep 15 19:46:18 2022 ] Top1: 63.33% +[ Thu Sep 15 19:46:18 2022 ] Top5: 90.91% +[ Thu Sep 15 19:46:18 2022 ] Training epoch: 99 +[ Thu Sep 15 19:46:39 2022 ] Batch(45/123) done. Loss: 0.0026 lr:0.001000 network_time: 0.0587 +[ Thu Sep 15 19:47:08 2022 ] Eval epoch: 99 +[ Thu Sep 15 19:47:30 2022 ] Mean test loss of 258 batches: 1.8569103479385376. +[ Thu Sep 15 19:47:30 2022 ] Top1: 64.52% +[ Thu Sep 15 19:47:30 2022 ] Top5: 91.11% +[ Thu Sep 15 19:47:30 2022 ] Training epoch: 100 +[ Thu Sep 15 19:47:43 2022 ] Batch(22/123) done. Loss: 0.0122 lr:0.001000 network_time: 0.0563 +[ Thu Sep 15 19:48:19 2022 ] Batch(122/123) done. Loss: 0.0033 lr:0.001000 network_time: 0.0531 +[ Thu Sep 15 19:48:20 2022 ] Eval epoch: 100 +[ Thu Sep 15 19:48:43 2022 ] Mean test loss of 258 batches: 1.8815741539001465. +[ Thu Sep 15 19:48:43 2022 ] Top1: 63.68% +[ Thu Sep 15 19:48:43 2022 ] Top5: 90.88% +[ Thu Sep 15 19:48:43 2022 ] Training epoch: 101 +[ Thu Sep 15 19:49:24 2022 ] Batch(99/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0554 +[ Thu Sep 15 19:49:32 2022 ] Eval epoch: 101 +[ Thu Sep 15 19:49:55 2022 ] Mean test loss of 258 batches: 1.9553409814834595. +[ Thu Sep 15 19:49:55 2022 ] Top1: 63.68% +[ Thu Sep 15 19:49:55 2022 ] Top5: 90.88% +[ Thu Sep 15 19:49:56 2022 ] Training epoch: 102 +[ Thu Sep 15 19:50:28 2022 ] Batch(76/123) done. Loss: 0.0070 lr:0.000100 network_time: 0.0523 +[ Thu Sep 15 19:50:45 2022 ] Eval epoch: 102 +[ Thu Sep 15 19:51:08 2022 ] Mean test loss of 258 batches: 2.0229227542877197. +[ Thu Sep 15 19:51:08 2022 ] Top1: 61.29% +[ Thu Sep 15 19:51:08 2022 ] Top5: 89.88% +[ Thu Sep 15 19:51:08 2022 ] Training epoch: 103 +[ Thu Sep 15 19:51:31 2022 ] Batch(53/123) done. Loss: 0.0044 lr:0.000100 network_time: 0.0533 +[ Thu Sep 15 19:51:57 2022 ] Eval epoch: 103 +[ Thu Sep 15 19:52:20 2022 ] Mean test loss of 258 batches: 1.82876455783844. +[ Thu Sep 15 19:52:20 2022 ] Top1: 64.74% +[ Thu Sep 15 19:52:20 2022 ] Top5: 91.20% +[ Thu Sep 15 19:52:20 2022 ] Training epoch: 104 +[ Thu Sep 15 19:52:36 2022 ] Batch(30/123) done. Loss: 0.0089 lr:0.000100 network_time: 0.0563 +[ Thu Sep 15 19:53:10 2022 ] Eval epoch: 104 +[ Thu Sep 15 19:53:32 2022 ] Mean test loss of 258 batches: 1.8227325677871704. +[ Thu Sep 15 19:53:32 2022 ] Top1: 64.23% +[ Thu Sep 15 19:53:32 2022 ] Top5: 91.13% +[ Thu Sep 15 19:53:32 2022 ] Training epoch: 105 +[ Thu Sep 15 19:53:39 2022 ] Batch(7/123) done. Loss: 0.0048 lr:0.000100 network_time: 0.0492 +[ Thu Sep 15 19:54:16 2022 ] Batch(107/123) done. Loss: 0.0035 lr:0.000100 network_time: 0.0532 +[ Thu Sep 15 19:54:22 2022 ] Eval epoch: 105 +[ Thu Sep 15 19:54:44 2022 ] Mean test loss of 258 batches: 1.8803633451461792. +[ Thu Sep 15 19:54:44 2022 ] Top1: 63.32% +[ Thu Sep 15 19:54:44 2022 ] Top5: 90.86% +[ Thu Sep 15 19:54:44 2022 ] Training epoch: 106 +[ Thu Sep 15 19:55:19 2022 ] Batch(84/123) done. Loss: 0.0057 lr:0.000100 network_time: 0.0537 +[ Thu Sep 15 19:55:34 2022 ] Eval epoch: 106 +[ Thu Sep 15 19:55:57 2022 ] Mean test loss of 258 batches: 2.0569188594818115. +[ Thu Sep 15 19:55:57 2022 ] Top1: 61.10% +[ Thu Sep 15 19:55:57 2022 ] Top5: 89.51% +[ Thu Sep 15 19:55:57 2022 ] Training epoch: 107 +[ Thu Sep 15 19:56:24 2022 ] Batch(61/123) done. Loss: 0.0156 lr:0.000100 network_time: 0.0561 +[ Thu Sep 15 19:56:46 2022 ] Eval epoch: 107 +[ Thu Sep 15 19:57:09 2022 ] Mean test loss of 258 batches: 1.8778842687606812. +[ Thu Sep 15 19:57:09 2022 ] Top1: 63.09% +[ Thu Sep 15 19:57:09 2022 ] Top5: 90.79% +[ Thu Sep 15 19:57:09 2022 ] Training epoch: 108 +[ Thu Sep 15 19:57:28 2022 ] Batch(38/123) done. Loss: 0.0017 lr:0.000100 network_time: 0.0534 +[ Thu Sep 15 19:57:59 2022 ] Eval epoch: 108 +[ Thu Sep 15 19:58:22 2022 ] Mean test loss of 258 batches: 1.8048086166381836. +[ Thu Sep 15 19:58:22 2022 ] Top1: 64.37% +[ Thu Sep 15 19:58:22 2022 ] Top5: 91.36% +[ Thu Sep 15 19:58:22 2022 ] Training epoch: 109 +[ Thu Sep 15 19:58:32 2022 ] Batch(15/123) done. Loss: 0.0029 lr:0.000100 network_time: 0.0545 +[ Thu Sep 15 19:59:08 2022 ] Batch(115/123) done. Loss: 0.0072 lr:0.000100 network_time: 0.0506 +[ Thu Sep 15 19:59:11 2022 ] Eval epoch: 109 +[ Thu Sep 15 19:59:34 2022 ] Mean test loss of 258 batches: 1.9837099313735962. +[ Thu Sep 15 19:59:34 2022 ] Top1: 62.79% +[ Thu Sep 15 19:59:34 2022 ] Top5: 90.65% +[ Thu Sep 15 19:59:34 2022 ] Training epoch: 110 +[ Thu Sep 15 20:00:12 2022 ] Batch(92/123) done. Loss: 0.0081 lr:0.000100 network_time: 0.0531 +[ Thu Sep 15 20:00:23 2022 ] Eval epoch: 110 +[ Thu Sep 15 20:00:46 2022 ] Mean test loss of 258 batches: 1.920461893081665. +[ Thu Sep 15 20:00:46 2022 ] Top1: 63.84% +[ Thu Sep 15 20:00:46 2022 ] Top5: 90.85% +[ Thu Sep 15 20:00:46 2022 ] Training epoch: 111 +[ Thu Sep 15 20:01:16 2022 ] Batch(69/123) done. Loss: 0.0067 lr:0.000100 network_time: 0.0588 +[ Thu Sep 15 20:01:36 2022 ] Eval epoch: 111 +[ Thu Sep 15 20:01:58 2022 ] Mean test loss of 258 batches: 1.7909228801727295. +[ Thu Sep 15 20:01:58 2022 ] Top1: 64.79% +[ Thu Sep 15 20:01:58 2022 ] Top5: 91.53% +[ Thu Sep 15 20:01:58 2022 ] Training epoch: 112 +[ Thu Sep 15 20:02:19 2022 ] Batch(46/123) done. Loss: 0.0023 lr:0.000100 network_time: 0.0538 +[ Thu Sep 15 20:02:48 2022 ] Eval epoch: 112 +[ Thu Sep 15 20:03:11 2022 ] Mean test loss of 258 batches: 1.8119471073150635. +[ Thu Sep 15 20:03:11 2022 ] Top1: 64.38% +[ Thu Sep 15 20:03:11 2022 ] Top5: 91.19% +[ Thu Sep 15 20:03:11 2022 ] Training epoch: 113 +[ Thu Sep 15 20:03:24 2022 ] Batch(23/123) done. Loss: 0.0052 lr:0.000100 network_time: 0.0573 +[ Thu Sep 15 20:04:00 2022 ] Eval epoch: 113 +[ Thu Sep 15 20:04:23 2022 ] Mean test loss of 258 batches: 1.8072237968444824. +[ Thu Sep 15 20:04:23 2022 ] Top1: 64.49% +[ Thu Sep 15 20:04:23 2022 ] Top5: 91.28% +[ Thu Sep 15 20:04:23 2022 ] Training epoch: 114 +[ Thu Sep 15 20:04:28 2022 ] Batch(0/123) done. Loss: 0.0142 lr:0.000100 network_time: 0.0936 +[ Thu Sep 15 20:05:04 2022 ] Batch(100/123) done. Loss: 0.0016 lr:0.000100 network_time: 0.0529 +[ Thu Sep 15 20:05:13 2022 ] Eval epoch: 114 +[ Thu Sep 15 20:05:35 2022 ] Mean test loss of 258 batches: 2.0189363956451416. +[ Thu Sep 15 20:05:36 2022 ] Top1: 61.37% +[ Thu Sep 15 20:05:36 2022 ] Top5: 89.63% +[ Thu Sep 15 20:05:36 2022 ] Training epoch: 115 +[ Thu Sep 15 20:06:08 2022 ] Batch(77/123) done. Loss: 0.0031 lr:0.000100 network_time: 0.0503 +[ Thu Sep 15 20:06:25 2022 ] Eval epoch: 115 +[ Thu Sep 15 20:06:48 2022 ] Mean test loss of 258 batches: 1.9490886926651. +[ Thu Sep 15 20:06:48 2022 ] Top1: 62.44% +[ Thu Sep 15 20:06:48 2022 ] Top5: 90.64% +[ Thu Sep 15 20:06:48 2022 ] Training epoch: 116 +[ Thu Sep 15 20:07:13 2022 ] Batch(54/123) done. Loss: 0.0228 lr:0.000100 network_time: 0.0544 +[ Thu Sep 15 20:07:38 2022 ] Eval epoch: 116 +[ Thu Sep 15 20:08:01 2022 ] Mean test loss of 258 batches: 1.801637887954712. +[ Thu Sep 15 20:08:01 2022 ] Top1: 64.53% +[ Thu Sep 15 20:08:01 2022 ] Top5: 91.27% +[ Thu Sep 15 20:08:01 2022 ] Training epoch: 117 +[ Thu Sep 15 20:08:17 2022 ] Batch(31/123) done. Loss: 0.0068 lr:0.000100 network_time: 0.0506 +[ Thu Sep 15 20:08:50 2022 ] Eval epoch: 117 +[ Thu Sep 15 20:09:13 2022 ] Mean test loss of 258 batches: 1.8541256189346313. +[ Thu Sep 15 20:09:13 2022 ] Top1: 63.92% +[ Thu Sep 15 20:09:13 2022 ] Top5: 91.17% +[ Thu Sep 15 20:09:13 2022 ] Training epoch: 118 +[ Thu Sep 15 20:09:21 2022 ] Batch(8/123) done. Loss: 0.0072 lr:0.000100 network_time: 0.0568 +[ Thu Sep 15 20:09:58 2022 ] Batch(108/123) done. Loss: 0.0101 lr:0.000100 network_time: 0.0597 +[ Thu Sep 15 20:10:03 2022 ] Eval epoch: 118 +[ Thu Sep 15 20:10:26 2022 ] Mean test loss of 258 batches: 2.009375810623169. +[ Thu Sep 15 20:10:26 2022 ] Top1: 62.33% +[ Thu Sep 15 20:10:26 2022 ] Top5: 90.37% +[ Thu Sep 15 20:10:26 2022 ] Training epoch: 119 +[ Thu Sep 15 20:11:02 2022 ] Batch(85/123) done. Loss: 0.0018 lr:0.000100 network_time: 0.0489 +[ Thu Sep 15 20:11:15 2022 ] Eval epoch: 119 +[ Thu Sep 15 20:11:38 2022 ] Mean test loss of 258 batches: 1.7464431524276733. +[ Thu Sep 15 20:11:38 2022 ] Top1: 64.95% +[ Thu Sep 15 20:11:38 2022 ] Top5: 91.34% +[ Thu Sep 15 20:11:38 2022 ] Training epoch: 120 +[ Thu Sep 15 20:12:05 2022 ] Batch(62/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0513 +[ Thu Sep 15 20:12:28 2022 ] Eval epoch: 120 +[ Thu Sep 15 20:12:51 2022 ] Mean test loss of 258 batches: 1.892992377281189. +[ Thu Sep 15 20:12:51 2022 ] Top1: 63.81% +[ Thu Sep 15 20:12:51 2022 ] Top5: 90.92% +[ Thu Sep 15 20:12:51 2022 ] Training epoch: 121 +[ Thu Sep 15 20:13:09 2022 ] Batch(39/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0722 +[ Thu Sep 15 20:13:40 2022 ] Eval epoch: 121 +[ Thu Sep 15 20:14:03 2022 ] Mean test loss of 258 batches: 1.9506547451019287. +[ Thu Sep 15 20:14:03 2022 ] Top1: 63.22% +[ Thu Sep 15 20:14:03 2022 ] Top5: 90.80% +[ Thu Sep 15 20:14:03 2022 ] Training epoch: 122 +[ Thu Sep 15 20:14:14 2022 ] Batch(16/123) done. Loss: 0.0073 lr:0.000100 network_time: 0.0579 +[ Thu Sep 15 20:14:50 2022 ] Batch(116/123) done. Loss: 0.0041 lr:0.000100 network_time: 0.0492 +[ Thu Sep 15 20:14:53 2022 ] Eval epoch: 122 +[ Thu Sep 15 20:15:15 2022 ] Mean test loss of 258 batches: 1.9415020942687988. +[ Thu Sep 15 20:15:16 2022 ] Top1: 62.73% +[ Thu Sep 15 20:15:16 2022 ] Top5: 90.48% +[ Thu Sep 15 20:15:16 2022 ] Training epoch: 123 +[ Thu Sep 15 20:15:54 2022 ] Batch(93/123) done. Loss: 0.0050 lr:0.000100 network_time: 0.0531 +[ Thu Sep 15 20:16:05 2022 ] Eval epoch: 123 +[ Thu Sep 15 20:16:28 2022 ] Mean test loss of 258 batches: 1.7792094945907593. +[ Thu Sep 15 20:16:28 2022 ] Top1: 64.38% +[ Thu Sep 15 20:16:28 2022 ] Top5: 91.42% +[ Thu Sep 15 20:16:28 2022 ] Training epoch: 124 +[ Thu Sep 15 20:16:58 2022 ] Batch(70/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0531 +[ Thu Sep 15 20:17:18 2022 ] Eval epoch: 124 +[ Thu Sep 15 20:17:41 2022 ] Mean test loss of 258 batches: 1.924527645111084. +[ Thu Sep 15 20:17:41 2022 ] Top1: 63.71% +[ Thu Sep 15 20:17:41 2022 ] Top5: 90.88% +[ Thu Sep 15 20:17:41 2022 ] Training epoch: 125 +[ Thu Sep 15 20:18:03 2022 ] Batch(47/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0517 +[ Thu Sep 15 20:18:30 2022 ] Eval epoch: 125 +[ Thu Sep 15 20:18:53 2022 ] Mean test loss of 258 batches: 1.797371745109558. +[ Thu Sep 15 20:18:53 2022 ] Top1: 64.73% +[ Thu Sep 15 20:18:53 2022 ] Top5: 91.55% +[ Thu Sep 15 20:18:53 2022 ] Training epoch: 126 +[ Thu Sep 15 20:19:07 2022 ] Batch(24/123) done. Loss: 0.0054 lr:0.000100 network_time: 0.0543 +[ Thu Sep 15 20:19:42 2022 ] Eval epoch: 126 +[ Thu Sep 15 20:20:06 2022 ] Mean test loss of 258 batches: 1.8714008331298828. +[ Thu Sep 15 20:20:06 2022 ] Top1: 63.84% +[ Thu Sep 15 20:20:06 2022 ] Top5: 91.07% +[ Thu Sep 15 20:20:06 2022 ] Training epoch: 127 +[ Thu Sep 15 20:20:11 2022 ] Batch(1/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0559 +[ Thu Sep 15 20:20:47 2022 ] Batch(101/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0472 +[ Thu Sep 15 20:20:55 2022 ] Eval epoch: 127 +[ Thu Sep 15 20:21:18 2022 ] Mean test loss of 258 batches: 1.848433494567871. +[ Thu Sep 15 20:21:18 2022 ] Top1: 64.39% +[ Thu Sep 15 20:21:18 2022 ] Top5: 91.31% +[ Thu Sep 15 20:21:18 2022 ] Training epoch: 128 +[ Thu Sep 15 20:21:52 2022 ] Batch(78/123) done. Loss: 0.0029 lr:0.000100 network_time: 0.0501 +[ Thu Sep 15 20:22:08 2022 ] Eval epoch: 128 +[ Thu Sep 15 20:22:31 2022 ] Mean test loss of 258 batches: 1.849387288093567. +[ Thu Sep 15 20:22:31 2022 ] Top1: 64.44% +[ Thu Sep 15 20:22:31 2022 ] Top5: 91.35% +[ Thu Sep 15 20:22:31 2022 ] Training epoch: 129 +[ Thu Sep 15 20:22:56 2022 ] Batch(55/123) done. Loss: 0.0056 lr:0.000100 network_time: 0.0462 +[ Thu Sep 15 20:23:20 2022 ] Eval epoch: 129 +[ Thu Sep 15 20:23:43 2022 ] Mean test loss of 258 batches: 1.9221100807189941. +[ Thu Sep 15 20:23:43 2022 ] Top1: 63.66% +[ Thu Sep 15 20:23:43 2022 ] Top5: 90.82% +[ Thu Sep 15 20:23:43 2022 ] Training epoch: 130 +[ Thu Sep 15 20:24:00 2022 ] Batch(32/123) done. Loss: 0.0082 lr:0.000100 network_time: 0.0517 +[ Thu Sep 15 20:24:33 2022 ] Eval epoch: 130 +[ Thu Sep 15 20:24:56 2022 ] Mean test loss of 258 batches: 1.9553499221801758. +[ Thu Sep 15 20:24:56 2022 ] Top1: 63.26% +[ Thu Sep 15 20:24:56 2022 ] Top5: 90.82% +[ Thu Sep 15 20:24:56 2022 ] Training epoch: 131 +[ Thu Sep 15 20:25:04 2022 ] Batch(9/123) done. Loss: 0.0023 lr:0.000100 network_time: 0.0526 +[ Thu Sep 15 20:25:40 2022 ] Batch(109/123) done. Loss: 0.0263 lr:0.000100 network_time: 0.0494 +[ Thu Sep 15 20:25:45 2022 ] Eval epoch: 131 +[ Thu Sep 15 20:26:08 2022 ] Mean test loss of 258 batches: 1.9092321395874023. +[ Thu Sep 15 20:26:08 2022 ] Top1: 63.81% +[ Thu Sep 15 20:26:08 2022 ] Top5: 91.06% +[ Thu Sep 15 20:26:08 2022 ] Training epoch: 132 +[ Thu Sep 15 20:26:44 2022 ] Batch(86/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0515 +[ Thu Sep 15 20:26:57 2022 ] Eval epoch: 132 +[ Thu Sep 15 20:27:20 2022 ] Mean test loss of 258 batches: 1.8571966886520386. +[ Thu Sep 15 20:27:21 2022 ] Top1: 63.51% +[ Thu Sep 15 20:27:21 2022 ] Top5: 91.11% +[ Thu Sep 15 20:27:21 2022 ] Training epoch: 133 +[ Thu Sep 15 20:27:48 2022 ] Batch(63/123) done. Loss: 0.0079 lr:0.000100 network_time: 0.0532 +[ Thu Sep 15 20:28:10 2022 ] Eval epoch: 133 +[ Thu Sep 15 20:28:33 2022 ] Mean test loss of 258 batches: 2.009004831314087. +[ Thu Sep 15 20:28:33 2022 ] Top1: 62.72% +[ Thu Sep 15 20:28:33 2022 ] Top5: 90.47% +[ Thu Sep 15 20:28:33 2022 ] Training epoch: 134 +[ Thu Sep 15 20:28:52 2022 ] Batch(40/123) done. Loss: 0.0086 lr:0.000100 network_time: 0.0507 +[ Thu Sep 15 20:29:23 2022 ] Eval epoch: 134 +[ Thu Sep 15 20:29:46 2022 ] Mean test loss of 258 batches: 1.814934253692627. +[ Thu Sep 15 20:29:46 2022 ] Top1: 64.21% +[ Thu Sep 15 20:29:46 2022 ] Top5: 91.35% +[ Thu Sep 15 20:29:46 2022 ] Training epoch: 135 +[ Thu Sep 15 20:29:57 2022 ] Batch(17/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0575 +[ Thu Sep 15 20:30:34 2022 ] Batch(117/123) done. Loss: 0.0049 lr:0.000100 network_time: 0.0484 +[ Thu Sep 15 20:30:36 2022 ] Eval epoch: 135 +[ Thu Sep 15 20:30:59 2022 ] Mean test loss of 258 batches: 1.8043664693832397. +[ Thu Sep 15 20:31:00 2022 ] Top1: 63.80% +[ Thu Sep 15 20:31:00 2022 ] Top5: 91.04% +[ Thu Sep 15 20:31:00 2022 ] Training epoch: 136 +[ Thu Sep 15 20:31:39 2022 ] Batch(94/123) done. Loss: 0.0053 lr:0.000100 network_time: 0.0498 +[ Thu Sep 15 20:31:49 2022 ] Eval epoch: 136 +[ Thu Sep 15 20:32:12 2022 ] Mean test loss of 258 batches: 1.9689311981201172. +[ Thu Sep 15 20:32:12 2022 ] Top1: 63.80% +[ Thu Sep 15 20:32:12 2022 ] Top5: 90.93% +[ Thu Sep 15 20:32:12 2022 ] Training epoch: 137 +[ Thu Sep 15 20:32:42 2022 ] Batch(71/123) done. Loss: 0.0085 lr:0.000100 network_time: 0.0516 +[ Thu Sep 15 20:33:01 2022 ] Eval epoch: 137 +[ Thu Sep 15 20:33:25 2022 ] Mean test loss of 258 batches: 1.8842922449111938. +[ Thu Sep 15 20:33:25 2022 ] Top1: 63.98% +[ Thu Sep 15 20:33:25 2022 ] Top5: 91.14% +[ Thu Sep 15 20:33:25 2022 ] Training epoch: 138 +[ Thu Sep 15 20:33:47 2022 ] Batch(48/123) done. Loss: 0.0178 lr:0.000100 network_time: 0.0504 +[ Thu Sep 15 20:34:14 2022 ] Eval epoch: 138 +[ Thu Sep 15 20:34:37 2022 ] Mean test loss of 258 batches: 1.9904453754425049. +[ Thu Sep 15 20:34:37 2022 ] Top1: 62.78% +[ Thu Sep 15 20:34:37 2022 ] Top5: 90.56% +[ Thu Sep 15 20:34:37 2022 ] Training epoch: 139 +[ Thu Sep 15 20:34:51 2022 ] Batch(25/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0549 +[ Thu Sep 15 20:35:27 2022 ] Eval epoch: 139 +[ Thu Sep 15 20:35:50 2022 ] Mean test loss of 258 batches: 1.9355473518371582. +[ Thu Sep 15 20:35:50 2022 ] Top1: 63.64% +[ Thu Sep 15 20:35:50 2022 ] Top5: 90.88% +[ Thu Sep 15 20:35:50 2022 ] Training epoch: 140 +[ Thu Sep 15 20:35:56 2022 ] Batch(2/123) done. Loss: 0.0047 lr:0.000100 network_time: 0.0507 +[ Thu Sep 15 20:36:33 2022 ] Batch(102/123) done. Loss: 0.0064 lr:0.000100 network_time: 0.0545 +[ Thu Sep 15 20:36:41 2022 ] Eval epoch: 140 +[ Thu Sep 15 20:37:04 2022 ] Mean test loss of 258 batches: 1.9572222232818604. +[ Thu Sep 15 20:37:04 2022 ] Top1: 62.53% +[ Thu Sep 15 20:37:04 2022 ] Top5: 90.63% diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_motion_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..307b5f2042024a05c70c76dfc68682d93199f62c --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/config.yaml @@ -0,0 +1,58 @@ +Experiment_name: ntu_ShiftGCN_bone_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_bone.yaml +device: +- 0 +- 1 +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_bone_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_bone_xsub diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e7ea33de36917b24ac3deb854ff7113f2c0492e7 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d5bce80cdeecce4cce3300b402719cd25c45d5d8530e964406d10576c3a0f35 +size 4979902 diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed99bd8ca9f9e34497bcb1ce7c2acd1ef19ece3b --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 17:47:53 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_bone_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_bone_xsub', 'Experiment_name': 'ntu_ShiftGCN_bone_xsub', 'config': './config/nturgbd-cross-subject/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [0, 1, 2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 17:47:53 2022 ] Training epoch: 1 +[ Thu Sep 15 17:48:40 2022 ] Batch(99/123) done. Loss: 2.3813 lr:0.100000 network_time: 0.0478 +[ Thu Sep 15 17:48:49 2022 ] Eval epoch: 1 +[ Thu Sep 15 17:49:11 2022 ] Mean test loss of 258 batches: 5.7082133293151855. +[ Thu Sep 15 17:49:11 2022 ] Top1: 10.85% +[ Thu Sep 15 17:49:11 2022 ] Top5: 36.14% +[ Thu Sep 15 17:49:11 2022 ] Training epoch: 2 +[ Thu Sep 15 17:49:43 2022 ] Batch(76/123) done. Loss: 2.1921 lr:0.100000 network_time: 0.0540 +[ Thu Sep 15 17:50:01 2022 ] Eval epoch: 2 +[ Thu Sep 15 17:50:23 2022 ] Mean test loss of 258 batches: 6.640047550201416. +[ Thu Sep 15 17:50:23 2022 ] Top1: 21.72% +[ Thu Sep 15 17:50:23 2022 ] Top5: 51.43% +[ Thu Sep 15 17:50:23 2022 ] Training epoch: 3 +[ Thu Sep 15 17:50:46 2022 ] Batch(53/123) done. Loss: 1.6533 lr:0.100000 network_time: 0.0488 +[ Thu Sep 15 17:51:12 2022 ] Eval epoch: 3 +[ Thu Sep 15 17:51:34 2022 ] Mean test loss of 258 batches: 3.587632656097412. +[ Thu Sep 15 17:51:34 2022 ] Top1: 26.24% +[ Thu Sep 15 17:51:34 2022 ] Top5: 60.65% +[ Thu Sep 15 17:51:34 2022 ] Training epoch: 4 +[ Thu Sep 15 17:51:49 2022 ] Batch(30/123) done. Loss: 1.6216 lr:0.100000 network_time: 0.0511 +[ Thu Sep 15 17:52:23 2022 ] Eval epoch: 4 +[ Thu Sep 15 17:52:45 2022 ] Mean test loss of 258 batches: 2.7592902183532715. +[ Thu Sep 15 17:52:45 2022 ] Top1: 34.18% +[ Thu Sep 15 17:52:46 2022 ] Top5: 68.90% +[ Thu Sep 15 17:52:46 2022 ] Training epoch: 5 +[ Thu Sep 15 17:52:52 2022 ] Batch(7/123) done. Loss: 1.1457 lr:0.100000 network_time: 0.0478 +[ Thu Sep 15 17:53:29 2022 ] Batch(107/123) done. Loss: 1.0360 lr:0.100000 network_time: 0.0486 +[ Thu Sep 15 17:53:35 2022 ] Eval epoch: 5 +[ Thu Sep 15 17:53:56 2022 ] Mean test loss of 258 batches: 3.083068370819092. +[ Thu Sep 15 17:53:57 2022 ] Top1: 35.23% +[ Thu Sep 15 17:53:57 2022 ] Top5: 65.32% +[ Thu Sep 15 17:53:57 2022 ] Training epoch: 6 +[ Thu Sep 15 17:54:32 2022 ] Batch(84/123) done. Loss: 1.1396 lr:0.100000 network_time: 0.0504 +[ Thu Sep 15 17:54:46 2022 ] Eval epoch: 6 +[ Thu Sep 15 17:55:08 2022 ] Mean test loss of 258 batches: 3.323880434036255. +[ Thu Sep 15 17:55:08 2022 ] Top1: 35.82% +[ Thu Sep 15 17:55:08 2022 ] Top5: 64.31% +[ Thu Sep 15 17:55:08 2022 ] Training epoch: 7 +[ Thu Sep 15 17:55:35 2022 ] Batch(61/123) done. Loss: 0.8685 lr:0.100000 network_time: 0.0488 +[ Thu Sep 15 17:55:58 2022 ] Eval epoch: 7 +[ Thu Sep 15 17:56:20 2022 ] Mean test loss of 258 batches: 3.480191230773926. +[ Thu Sep 15 17:56:20 2022 ] Top1: 32.70% +[ Thu Sep 15 17:56:20 2022 ] Top5: 62.88% +[ Thu Sep 15 17:56:20 2022 ] Training epoch: 8 +[ Thu Sep 15 17:56:38 2022 ] Batch(38/123) done. Loss: 1.0288 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 17:57:09 2022 ] Eval epoch: 8 +[ Thu Sep 15 17:57:32 2022 ] Mean test loss of 258 batches: 2.307069778442383. +[ Thu Sep 15 17:57:32 2022 ] Top1: 40.67% +[ Thu Sep 15 17:57:32 2022 ] Top5: 77.36% +[ Thu Sep 15 17:57:32 2022 ] Training epoch: 9 +[ Thu Sep 15 17:57:42 2022 ] Batch(15/123) done. Loss: 0.9701 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 17:58:18 2022 ] Batch(115/123) done. Loss: 1.0193 lr:0.100000 network_time: 0.0488 +[ Thu Sep 15 17:58:21 2022 ] Eval epoch: 9 +[ Thu Sep 15 17:58:43 2022 ] Mean test loss of 258 batches: 2.7951412200927734. +[ Thu Sep 15 17:58:43 2022 ] Top1: 37.25% +[ Thu Sep 15 17:58:43 2022 ] Top5: 70.56% +[ Thu Sep 15 17:58:43 2022 ] Training epoch: 10 +[ Thu Sep 15 17:59:21 2022 ] Batch(92/123) done. Loss: 1.0893 lr:0.100000 network_time: 0.0515 +[ Thu Sep 15 17:59:32 2022 ] Eval epoch: 10 +[ Thu Sep 15 17:59:55 2022 ] Mean test loss of 258 batches: 2.059424638748169. +[ Thu Sep 15 17:59:55 2022 ] Top1: 47.10% +[ Thu Sep 15 17:59:55 2022 ] Top5: 80.37% +[ Thu Sep 15 17:59:55 2022 ] Training epoch: 11 +[ Thu Sep 15 18:00:24 2022 ] Batch(69/123) done. Loss: 0.7402 lr:0.100000 network_time: 0.0520 +[ Thu Sep 15 18:00:44 2022 ] Eval epoch: 11 +[ Thu Sep 15 18:01:06 2022 ] Mean test loss of 258 batches: 2.2376978397369385. +[ Thu Sep 15 18:01:06 2022 ] Top1: 45.21% +[ Thu Sep 15 18:01:07 2022 ] Top5: 79.05% +[ Thu Sep 15 18:01:07 2022 ] Training epoch: 12 +[ Thu Sep 15 18:01:28 2022 ] Batch(46/123) done. Loss: 0.5997 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:01:56 2022 ] Eval epoch: 12 +[ Thu Sep 15 18:02:18 2022 ] Mean test loss of 258 batches: 2.544092893600464. +[ Thu Sep 15 18:02:18 2022 ] Top1: 44.84% +[ Thu Sep 15 18:02:18 2022 ] Top5: 79.83% +[ Thu Sep 15 18:02:18 2022 ] Training epoch: 13 +[ Thu Sep 15 18:02:31 2022 ] Batch(23/123) done. Loss: 0.9503 lr:0.100000 network_time: 0.0497 +[ Thu Sep 15 18:03:07 2022 ] Eval epoch: 13 +[ Thu Sep 15 18:03:29 2022 ] Mean test loss of 258 batches: 1.9147826433181763. +[ Thu Sep 15 18:03:29 2022 ] Top1: 51.48% +[ Thu Sep 15 18:03:29 2022 ] Top5: 83.05% +[ Thu Sep 15 18:03:29 2022 ] Training epoch: 14 +[ Thu Sep 15 18:03:34 2022 ] Batch(0/123) done. Loss: 0.5457 lr:0.100000 network_time: 0.1016 +[ Thu Sep 15 18:04:10 2022 ] Batch(100/123) done. Loss: 0.5821 lr:0.100000 network_time: 0.0494 +[ Thu Sep 15 18:04:19 2022 ] Eval epoch: 14 +[ Thu Sep 15 18:04:41 2022 ] Mean test loss of 258 batches: 3.2059364318847656. +[ Thu Sep 15 18:04:41 2022 ] Top1: 40.05% +[ Thu Sep 15 18:04:41 2022 ] Top5: 73.72% +[ Thu Sep 15 18:04:42 2022 ] Training epoch: 15 +[ Thu Sep 15 18:05:14 2022 ] Batch(77/123) done. Loss: 0.7532 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:05:31 2022 ] Eval epoch: 15 +[ Thu Sep 15 18:05:52 2022 ] Mean test loss of 258 batches: 2.295565605163574. +[ Thu Sep 15 18:05:52 2022 ] Top1: 44.93% +[ Thu Sep 15 18:05:53 2022 ] Top5: 81.57% +[ Thu Sep 15 18:05:53 2022 ] Training epoch: 16 +[ Thu Sep 15 18:06:17 2022 ] Batch(54/123) done. Loss: 0.7355 lr:0.100000 network_time: 0.0494 +[ Thu Sep 15 18:06:42 2022 ] Eval epoch: 16 +[ Thu Sep 15 18:07:04 2022 ] Mean test loss of 258 batches: 2.1258904933929443. +[ Thu Sep 15 18:07:04 2022 ] Top1: 49.40% +[ Thu Sep 15 18:07:04 2022 ] Top5: 83.77% +[ Thu Sep 15 18:07:04 2022 ] Training epoch: 17 +[ Thu Sep 15 18:07:19 2022 ] Batch(31/123) done. Loss: 0.6526 lr:0.100000 network_time: 0.0500 +[ Thu Sep 15 18:07:53 2022 ] Eval epoch: 17 +[ Thu Sep 15 18:08:15 2022 ] Mean test loss of 258 batches: 2.0040574073791504. +[ Thu Sep 15 18:08:15 2022 ] Top1: 50.99% +[ Thu Sep 15 18:08:15 2022 ] Top5: 84.38% +[ Thu Sep 15 18:08:15 2022 ] Training epoch: 18 +[ Thu Sep 15 18:08:22 2022 ] Batch(8/123) done. Loss: 0.4405 lr:0.100000 network_time: 0.0521 +[ Thu Sep 15 18:08:59 2022 ] Batch(108/123) done. Loss: 0.3498 lr:0.100000 network_time: 0.0497 +[ Thu Sep 15 18:09:05 2022 ] Eval epoch: 18 +[ Thu Sep 15 18:09:26 2022 ] Mean test loss of 258 batches: 2.170915365219116. +[ Thu Sep 15 18:09:26 2022 ] Top1: 51.73% +[ Thu Sep 15 18:09:26 2022 ] Top5: 83.59% +[ Thu Sep 15 18:09:26 2022 ] Training epoch: 19 +[ Thu Sep 15 18:10:02 2022 ] Batch(85/123) done. Loss: 0.4577 lr:0.100000 network_time: 0.0501 +[ Thu Sep 15 18:10:16 2022 ] Eval epoch: 19 +[ Thu Sep 15 18:10:37 2022 ] Mean test loss of 258 batches: 2.4165236949920654. +[ Thu Sep 15 18:10:38 2022 ] Top1: 48.97% +[ Thu Sep 15 18:10:38 2022 ] Top5: 81.39% +[ Thu Sep 15 18:10:38 2022 ] Training epoch: 20 +[ Thu Sep 15 18:11:04 2022 ] Batch(62/123) done. Loss: 0.4567 lr:0.100000 network_time: 0.0480 +[ Thu Sep 15 18:11:27 2022 ] Eval epoch: 20 +[ Thu Sep 15 18:11:49 2022 ] Mean test loss of 258 batches: 2.2029035091400146. +[ Thu Sep 15 18:11:49 2022 ] Top1: 48.95% +[ Thu Sep 15 18:11:49 2022 ] Top5: 83.41% +[ Thu Sep 15 18:11:49 2022 ] Training epoch: 21 +[ Thu Sep 15 18:12:07 2022 ] Batch(39/123) done. Loss: 0.5271 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:12:38 2022 ] Eval epoch: 21 +[ Thu Sep 15 18:13:00 2022 ] Mean test loss of 258 batches: 2.533735990524292. +[ Thu Sep 15 18:13:00 2022 ] Top1: 45.28% +[ Thu Sep 15 18:13:01 2022 ] Top5: 79.87% +[ Thu Sep 15 18:13:01 2022 ] Training epoch: 22 +[ Thu Sep 15 18:13:11 2022 ] Batch(16/123) done. Loss: 0.3980 lr:0.100000 network_time: 0.0473 +[ Thu Sep 15 18:13:48 2022 ] Batch(116/123) done. Loss: 0.5138 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:13:50 2022 ] Eval epoch: 22 +[ Thu Sep 15 18:14:12 2022 ] Mean test loss of 258 batches: 2.2566044330596924. +[ Thu Sep 15 18:14:12 2022 ] Top1: 49.99% +[ Thu Sep 15 18:14:12 2022 ] Top5: 83.68% +[ Thu Sep 15 18:14:12 2022 ] Training epoch: 23 +[ Thu Sep 15 18:14:50 2022 ] Batch(93/123) done. Loss: 0.8055 lr:0.100000 network_time: 0.0482 +[ Thu Sep 15 18:15:01 2022 ] Eval epoch: 23 +[ Thu Sep 15 18:15:22 2022 ] Mean test loss of 258 batches: 2.1785130500793457. +[ Thu Sep 15 18:15:22 2022 ] Top1: 51.98% +[ Thu Sep 15 18:15:23 2022 ] Top5: 83.66% +[ Thu Sep 15 18:15:23 2022 ] Training epoch: 24 +[ Thu Sep 15 18:15:52 2022 ] Batch(70/123) done. Loss: 0.3882 lr:0.100000 network_time: 0.0521 +[ Thu Sep 15 18:16:12 2022 ] Eval epoch: 24 +[ Thu Sep 15 18:16:34 2022 ] Mean test loss of 258 batches: 2.931326389312744. +[ Thu Sep 15 18:16:34 2022 ] Top1: 44.62% +[ Thu Sep 15 18:16:34 2022 ] Top5: 76.93% +[ Thu Sep 15 18:16:34 2022 ] Training epoch: 25 +[ Thu Sep 15 18:16:55 2022 ] Batch(47/123) done. Loss: 0.4080 lr:0.100000 network_time: 0.0516 +[ Thu Sep 15 18:17:23 2022 ] Eval epoch: 25 +[ Thu Sep 15 18:17:45 2022 ] Mean test loss of 258 batches: 1.9534658193588257. +[ Thu Sep 15 18:17:45 2022 ] Top1: 54.25% +[ Thu Sep 15 18:17:45 2022 ] Top5: 84.85% +[ Thu Sep 15 18:17:45 2022 ] Training epoch: 26 +[ Thu Sep 15 18:17:58 2022 ] Batch(24/123) done. Loss: 0.4425 lr:0.100000 network_time: 0.0469 +[ Thu Sep 15 18:18:34 2022 ] Eval epoch: 26 +[ Thu Sep 15 18:18:56 2022 ] Mean test loss of 258 batches: 2.3278818130493164. +[ Thu Sep 15 18:18:56 2022 ] Top1: 50.68% +[ Thu Sep 15 18:18:57 2022 ] Top5: 83.35% +[ Thu Sep 15 18:18:57 2022 ] Training epoch: 27 +[ Thu Sep 15 18:19:01 2022 ] Batch(1/123) done. Loss: 0.2602 lr:0.100000 network_time: 0.0477 +[ Thu Sep 15 18:19:38 2022 ] Batch(101/123) done. Loss: 0.4489 lr:0.100000 network_time: 0.0506 +[ Thu Sep 15 18:19:46 2022 ] Eval epoch: 27 +[ Thu Sep 15 18:20:08 2022 ] Mean test loss of 258 batches: 1.912603735923767. +[ Thu Sep 15 18:20:08 2022 ] Top1: 54.53% +[ Thu Sep 15 18:20:08 2022 ] Top5: 85.32% +[ Thu Sep 15 18:20:08 2022 ] Training epoch: 28 +[ Thu Sep 15 18:20:41 2022 ] Batch(78/123) done. Loss: 0.5833 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 18:20:57 2022 ] Eval epoch: 28 +[ Thu Sep 15 18:21:19 2022 ] Mean test loss of 258 batches: 2.167754650115967. +[ Thu Sep 15 18:21:19 2022 ] Top1: 55.58% +[ Thu Sep 15 18:21:19 2022 ] Top5: 86.30% +[ Thu Sep 15 18:21:19 2022 ] Training epoch: 29 +[ Thu Sep 15 18:21:44 2022 ] Batch(55/123) done. Loss: 0.3940 lr:0.100000 network_time: 0.0516 +[ Thu Sep 15 18:22:09 2022 ] Eval epoch: 29 +[ Thu Sep 15 18:22:30 2022 ] Mean test loss of 258 batches: 2.0642311573028564. +[ Thu Sep 15 18:22:31 2022 ] Top1: 55.52% +[ Thu Sep 15 18:22:31 2022 ] Top5: 86.89% +[ Thu Sep 15 18:22:31 2022 ] Training epoch: 30 +[ Thu Sep 15 18:22:47 2022 ] Batch(32/123) done. Loss: 0.4275 lr:0.100000 network_time: 0.0494 +[ Thu Sep 15 18:23:20 2022 ] Eval epoch: 30 +[ Thu Sep 15 18:23:42 2022 ] Mean test loss of 258 batches: 1.9236493110656738. +[ Thu Sep 15 18:23:42 2022 ] Top1: 55.09% +[ Thu Sep 15 18:23:42 2022 ] Top5: 84.92% +[ Thu Sep 15 18:23:42 2022 ] Training epoch: 31 +[ Thu Sep 15 18:23:50 2022 ] Batch(9/123) done. Loss: 0.2980 lr:0.100000 network_time: 0.0496 +[ Thu Sep 15 18:24:27 2022 ] Batch(109/123) done. Loss: 0.3614 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:24:32 2022 ] Eval epoch: 31 +[ Thu Sep 15 18:24:54 2022 ] Mean test loss of 258 batches: 2.2241408824920654. +[ Thu Sep 15 18:24:54 2022 ] Top1: 52.15% +[ Thu Sep 15 18:24:54 2022 ] Top5: 85.18% +[ Thu Sep 15 18:24:54 2022 ] Training epoch: 32 +[ Thu Sep 15 18:25:30 2022 ] Batch(86/123) done. Loss: 0.4021 lr:0.100000 network_time: 0.0503 +[ Thu Sep 15 18:25:43 2022 ] Eval epoch: 32 +[ Thu Sep 15 18:26:05 2022 ] Mean test loss of 258 batches: 1.9539873600006104. +[ Thu Sep 15 18:26:05 2022 ] Top1: 51.09% +[ Thu Sep 15 18:26:05 2022 ] Top5: 84.44% +[ Thu Sep 15 18:26:05 2022 ] Training epoch: 33 +[ Thu Sep 15 18:26:32 2022 ] Batch(63/123) done. Loss: 0.3629 lr:0.100000 network_time: 0.0688 +[ Thu Sep 15 18:26:54 2022 ] Eval epoch: 33 +[ Thu Sep 15 18:27:16 2022 ] Mean test loss of 258 batches: 2.223938465118408. +[ Thu Sep 15 18:27:16 2022 ] Top1: 53.82% +[ Thu Sep 15 18:27:17 2022 ] Top5: 84.55% +[ Thu Sep 15 18:27:17 2022 ] Training epoch: 34 +[ Thu Sep 15 18:27:35 2022 ] Batch(40/123) done. Loss: 0.3019 lr:0.100000 network_time: 0.0506 +[ Thu Sep 15 18:28:06 2022 ] Eval epoch: 34 +[ Thu Sep 15 18:28:28 2022 ] Mean test loss of 258 batches: 2.304352283477783. +[ Thu Sep 15 18:28:28 2022 ] Top1: 52.24% +[ Thu Sep 15 18:28:28 2022 ] Top5: 83.38% +[ Thu Sep 15 18:28:28 2022 ] Training epoch: 35 +[ Thu Sep 15 18:28:38 2022 ] Batch(17/123) done. Loss: 0.2968 lr:0.100000 network_time: 0.0554 +[ Thu Sep 15 18:29:15 2022 ] Batch(117/123) done. Loss: 0.3346 lr:0.100000 network_time: 0.0500 +[ Thu Sep 15 18:29:17 2022 ] Eval epoch: 35 +[ Thu Sep 15 18:29:39 2022 ] Mean test loss of 258 batches: 2.392214298248291. +[ Thu Sep 15 18:29:40 2022 ] Top1: 52.00% +[ Thu Sep 15 18:29:40 2022 ] Top5: 82.93% +[ Thu Sep 15 18:29:40 2022 ] Training epoch: 36 +[ Thu Sep 15 18:30:18 2022 ] Batch(94/123) done. Loss: 0.2313 lr:0.100000 network_time: 0.0524 +[ Thu Sep 15 18:30:29 2022 ] Eval epoch: 36 +[ Thu Sep 15 18:30:51 2022 ] Mean test loss of 258 batches: 2.248171806335449. +[ Thu Sep 15 18:30:51 2022 ] Top1: 52.39% +[ Thu Sep 15 18:30:51 2022 ] Top5: 84.59% +[ Thu Sep 15 18:30:51 2022 ] Training epoch: 37 +[ Thu Sep 15 18:31:21 2022 ] Batch(71/123) done. Loss: 0.2396 lr:0.100000 network_time: 0.0512 +[ Thu Sep 15 18:31:40 2022 ] Eval epoch: 37 +[ Thu Sep 15 18:32:02 2022 ] Mean test loss of 258 batches: 2.4701759815216064. +[ Thu Sep 15 18:32:02 2022 ] Top1: 52.17% +[ Thu Sep 15 18:32:02 2022 ] Top5: 85.15% +[ Thu Sep 15 18:32:02 2022 ] Training epoch: 38 +[ Thu Sep 15 18:32:24 2022 ] Batch(48/123) done. Loss: 0.1601 lr:0.100000 network_time: 0.0497 +[ Thu Sep 15 18:32:52 2022 ] Eval epoch: 38 +[ Thu Sep 15 18:33:13 2022 ] Mean test loss of 258 batches: 2.625194549560547. +[ Thu Sep 15 18:33:13 2022 ] Top1: 49.11% +[ Thu Sep 15 18:33:13 2022 ] Top5: 82.32% +[ Thu Sep 15 18:33:13 2022 ] Training epoch: 39 +[ Thu Sep 15 18:33:26 2022 ] Batch(25/123) done. Loss: 0.2385 lr:0.100000 network_time: 0.0491 +[ Thu Sep 15 18:34:02 2022 ] Eval epoch: 39 +[ Thu Sep 15 18:34:24 2022 ] Mean test loss of 258 batches: 2.2160768508911133. +[ Thu Sep 15 18:34:24 2022 ] Top1: 52.51% +[ Thu Sep 15 18:34:24 2022 ] Top5: 85.21% +[ Thu Sep 15 18:34:24 2022 ] Training epoch: 40 +[ Thu Sep 15 18:34:29 2022 ] Batch(2/123) done. Loss: 0.2694 lr:0.100000 network_time: 0.0741 +[ Thu Sep 15 18:35:05 2022 ] Batch(102/123) done. Loss: 0.4774 lr:0.100000 network_time: 0.0531 +[ Thu Sep 15 18:35:13 2022 ] Eval epoch: 40 +[ Thu Sep 15 18:35:35 2022 ] Mean test loss of 258 batches: 2.8346052169799805. +[ Thu Sep 15 18:35:35 2022 ] Top1: 45.68% +[ Thu Sep 15 18:35:35 2022 ] Top5: 77.05% +[ Thu Sep 15 18:35:35 2022 ] Training epoch: 41 +[ Thu Sep 15 18:36:08 2022 ] Batch(79/123) done. Loss: 0.2311 lr:0.100000 network_time: 0.0525 +[ Thu Sep 15 18:36:24 2022 ] Eval epoch: 41 +[ Thu Sep 15 18:36:47 2022 ] Mean test loss of 258 batches: 2.6487550735473633. +[ Thu Sep 15 18:36:47 2022 ] Top1: 48.83% +[ Thu Sep 15 18:36:47 2022 ] Top5: 81.41% +[ Thu Sep 15 18:36:47 2022 ] Training epoch: 42 +[ Thu Sep 15 18:37:11 2022 ] Batch(56/123) done. Loss: 0.1981 lr:0.100000 network_time: 0.0571 +[ Thu Sep 15 18:37:36 2022 ] Eval epoch: 42 +[ Thu Sep 15 18:37:58 2022 ] Mean test loss of 258 batches: 2.2918591499328613. +[ Thu Sep 15 18:37:58 2022 ] Top1: 51.79% +[ Thu Sep 15 18:37:58 2022 ] Top5: 82.48% +[ Thu Sep 15 18:37:58 2022 ] Training epoch: 43 +[ Thu Sep 15 18:38:14 2022 ] Batch(33/123) done. Loss: 0.2031 lr:0.100000 network_time: 0.0490 +[ Thu Sep 15 18:38:47 2022 ] Eval epoch: 43 +[ Thu Sep 15 18:39:09 2022 ] Mean test loss of 258 batches: 2.716893196105957. +[ Thu Sep 15 18:39:09 2022 ] Top1: 49.04% +[ Thu Sep 15 18:39:09 2022 ] Top5: 82.11% +[ Thu Sep 15 18:39:09 2022 ] Training epoch: 44 +[ Thu Sep 15 18:39:16 2022 ] Batch(10/123) done. Loss: 0.2014 lr:0.100000 network_time: 0.0465 +[ Thu Sep 15 18:39:53 2022 ] Batch(110/123) done. Loss: 0.2128 lr:0.100000 network_time: 0.0488 +[ Thu Sep 15 18:39:58 2022 ] Eval epoch: 44 +[ Thu Sep 15 18:40:20 2022 ] Mean test loss of 258 batches: 2.1684532165527344. +[ Thu Sep 15 18:40:20 2022 ] Top1: 55.17% +[ Thu Sep 15 18:40:20 2022 ] Top5: 86.61% +[ Thu Sep 15 18:40:20 2022 ] Training epoch: 45 +[ Thu Sep 15 18:40:55 2022 ] Batch(87/123) done. Loss: 0.1993 lr:0.100000 network_time: 0.0519 +[ Thu Sep 15 18:41:09 2022 ] Eval epoch: 45 +[ Thu Sep 15 18:41:31 2022 ] Mean test loss of 258 batches: 2.9825515747070312. +[ Thu Sep 15 18:41:31 2022 ] Top1: 47.93% +[ Thu Sep 15 18:41:31 2022 ] Top5: 80.66% +[ Thu Sep 15 18:41:31 2022 ] Training epoch: 46 +[ Thu Sep 15 18:41:58 2022 ] Batch(64/123) done. Loss: 0.2144 lr:0.100000 network_time: 0.0504 +[ Thu Sep 15 18:42:20 2022 ] Eval epoch: 46 +[ Thu Sep 15 18:42:42 2022 ] Mean test loss of 258 batches: 2.1467137336730957. +[ Thu Sep 15 18:42:42 2022 ] Top1: 54.75% +[ Thu Sep 15 18:42:42 2022 ] Top5: 85.50% +[ Thu Sep 15 18:42:42 2022 ] Training epoch: 47 +[ Thu Sep 15 18:43:01 2022 ] Batch(41/123) done. Loss: 0.3797 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 18:43:31 2022 ] Eval epoch: 47 +[ Thu Sep 15 18:43:53 2022 ] Mean test loss of 258 batches: 2.668498992919922. +[ Thu Sep 15 18:43:53 2022 ] Top1: 49.12% +[ Thu Sep 15 18:43:53 2022 ] Top5: 82.49% +[ Thu Sep 15 18:43:54 2022 ] Training epoch: 48 +[ Thu Sep 15 18:44:04 2022 ] Batch(18/123) done. Loss: 0.1254 lr:0.100000 network_time: 0.0476 +[ Thu Sep 15 18:44:41 2022 ] Batch(118/123) done. Loss: 0.2008 lr:0.100000 network_time: 0.0497 +[ Thu Sep 15 18:44:42 2022 ] Eval epoch: 48 +[ Thu Sep 15 18:45:04 2022 ] Mean test loss of 258 batches: 2.8411271572113037. +[ Thu Sep 15 18:45:04 2022 ] Top1: 48.40% +[ Thu Sep 15 18:45:05 2022 ] Top5: 79.99% +[ Thu Sep 15 18:45:05 2022 ] Training epoch: 49 +[ Thu Sep 15 18:45:43 2022 ] Batch(95/123) done. Loss: 0.3122 lr:0.100000 network_time: 0.0526 +[ Thu Sep 15 18:45:53 2022 ] Eval epoch: 49 +[ Thu Sep 15 18:46:16 2022 ] Mean test loss of 258 batches: 2.817328929901123. +[ Thu Sep 15 18:46:16 2022 ] Top1: 49.04% +[ Thu Sep 15 18:46:16 2022 ] Top5: 82.11% +[ Thu Sep 15 18:46:16 2022 ] Training epoch: 50 +[ Thu Sep 15 18:46:47 2022 ] Batch(72/123) done. Loss: 0.2220 lr:0.100000 network_time: 0.0502 +[ Thu Sep 15 18:47:05 2022 ] Eval epoch: 50 +[ Thu Sep 15 18:47:28 2022 ] Mean test loss of 258 batches: 2.002220869064331. +[ Thu Sep 15 18:47:28 2022 ] Top1: 56.75% +[ Thu Sep 15 18:47:28 2022 ] Top5: 88.09% +[ Thu Sep 15 18:47:28 2022 ] Training epoch: 51 +[ Thu Sep 15 18:47:50 2022 ] Batch(49/123) done. Loss: 0.2155 lr:0.100000 network_time: 0.0476 +[ Thu Sep 15 18:48:17 2022 ] Eval epoch: 51 +[ Thu Sep 15 18:48:40 2022 ] Mean test loss of 258 batches: 2.272024154663086. +[ Thu Sep 15 18:48:40 2022 ] Top1: 53.65% +[ Thu Sep 15 18:48:40 2022 ] Top5: 83.74% +[ Thu Sep 15 18:48:40 2022 ] Training epoch: 52 +[ Thu Sep 15 18:48:54 2022 ] Batch(26/123) done. Loss: 0.2241 lr:0.100000 network_time: 0.0483 +[ Thu Sep 15 18:49:29 2022 ] Eval epoch: 52 +[ Thu Sep 15 18:49:52 2022 ] Mean test loss of 258 batches: 2.388519763946533. +[ Thu Sep 15 18:49:52 2022 ] Top1: 54.24% +[ Thu Sep 15 18:49:52 2022 ] Top5: 84.73% +[ Thu Sep 15 18:49:52 2022 ] Training epoch: 53 +[ Thu Sep 15 18:49:57 2022 ] Batch(3/123) done. Loss: 0.2244 lr:0.100000 network_time: 0.0548 +[ Thu Sep 15 18:50:34 2022 ] Batch(103/123) done. Loss: 0.3865 lr:0.100000 network_time: 0.0536 +[ Thu Sep 15 18:50:41 2022 ] Eval epoch: 53 +[ Thu Sep 15 18:51:03 2022 ] Mean test loss of 258 batches: 2.6002821922302246. +[ Thu Sep 15 18:51:03 2022 ] Top1: 51.43% +[ Thu Sep 15 18:51:03 2022 ] Top5: 81.96% +[ Thu Sep 15 18:51:03 2022 ] Training epoch: 54 +[ Thu Sep 15 18:51:37 2022 ] Batch(80/123) done. Loss: 0.3344 lr:0.100000 network_time: 0.0506 +[ Thu Sep 15 18:51:52 2022 ] Eval epoch: 54 +[ Thu Sep 15 18:52:14 2022 ] Mean test loss of 258 batches: 2.0189566612243652. +[ Thu Sep 15 18:52:15 2022 ] Top1: 57.08% +[ Thu Sep 15 18:52:15 2022 ] Top5: 87.01% +[ Thu Sep 15 18:52:15 2022 ] Training epoch: 55 +[ Thu Sep 15 18:52:40 2022 ] Batch(57/123) done. Loss: 0.2622 lr:0.100000 network_time: 0.0501 +[ Thu Sep 15 18:53:04 2022 ] Eval epoch: 55 +[ Thu Sep 15 18:53:26 2022 ] Mean test loss of 258 batches: 2.4324235916137695. +[ Thu Sep 15 18:53:26 2022 ] Top1: 52.70% +[ Thu Sep 15 18:53:26 2022 ] Top5: 84.04% +[ Thu Sep 15 18:53:26 2022 ] Training epoch: 56 +[ Thu Sep 15 18:53:43 2022 ] Batch(34/123) done. Loss: 0.1660 lr:0.100000 network_time: 0.0509 +[ Thu Sep 15 18:54:15 2022 ] Eval epoch: 56 +[ Thu Sep 15 18:54:38 2022 ] Mean test loss of 258 batches: 2.0325913429260254. +[ Thu Sep 15 18:54:38 2022 ] Top1: 55.20% +[ Thu Sep 15 18:54:38 2022 ] Top5: 85.80% +[ Thu Sep 15 18:54:38 2022 ] Training epoch: 57 +[ Thu Sep 15 18:54:46 2022 ] Batch(11/123) done. Loss: 0.1341 lr:0.100000 network_time: 0.0484 +[ Thu Sep 15 18:55:22 2022 ] Batch(111/123) done. Loss: 0.1991 lr:0.100000 network_time: 0.0486 +[ Thu Sep 15 18:55:27 2022 ] Eval epoch: 57 +[ Thu Sep 15 18:55:49 2022 ] Mean test loss of 258 batches: 2.4592440128326416. +[ Thu Sep 15 18:55:49 2022 ] Top1: 52.16% +[ Thu Sep 15 18:55:49 2022 ] Top5: 85.21% +[ Thu Sep 15 18:55:49 2022 ] Training epoch: 58 +[ Thu Sep 15 18:56:26 2022 ] Batch(88/123) done. Loss: 0.2833 lr:0.100000 network_time: 0.0509 +[ Thu Sep 15 18:56:39 2022 ] Eval epoch: 58 +[ Thu Sep 15 18:57:01 2022 ] Mean test loss of 258 batches: 2.467912197113037. +[ Thu Sep 15 18:57:01 2022 ] Top1: 51.36% +[ Thu Sep 15 18:57:01 2022 ] Top5: 83.87% +[ Thu Sep 15 18:57:01 2022 ] Training epoch: 59 +[ Thu Sep 15 18:57:29 2022 ] Batch(65/123) done. Loss: 0.1076 lr:0.100000 network_time: 0.0481 +[ Thu Sep 15 18:57:50 2022 ] Eval epoch: 59 +[ Thu Sep 15 18:58:12 2022 ] Mean test loss of 258 batches: 2.4239609241485596. +[ Thu Sep 15 18:58:12 2022 ] Top1: 53.31% +[ Thu Sep 15 18:58:12 2022 ] Top5: 84.36% +[ Thu Sep 15 18:58:12 2022 ] Training epoch: 60 +[ Thu Sep 15 18:58:31 2022 ] Batch(42/123) done. Loss: 0.1055 lr:0.100000 network_time: 0.0495 +[ Thu Sep 15 18:59:01 2022 ] Eval epoch: 60 +[ Thu Sep 15 18:59:23 2022 ] Mean test loss of 258 batches: 3.575775146484375. +[ Thu Sep 15 18:59:23 2022 ] Top1: 43.85% +[ Thu Sep 15 18:59:23 2022 ] Top5: 77.35% +[ Thu Sep 15 18:59:23 2022 ] Training epoch: 61 +[ Thu Sep 15 18:59:34 2022 ] Batch(19/123) done. Loss: 0.0923 lr:0.010000 network_time: 0.0500 +[ Thu Sep 15 19:00:11 2022 ] Batch(119/123) done. Loss: 0.2071 lr:0.010000 network_time: 0.0513 +[ Thu Sep 15 19:00:13 2022 ] Eval epoch: 61 +[ Thu Sep 15 19:00:35 2022 ] Mean test loss of 258 batches: 1.877470850944519. +[ Thu Sep 15 19:00:35 2022 ] Top1: 60.21% +[ Thu Sep 15 19:00:35 2022 ] Top5: 88.57% +[ Thu Sep 15 19:00:35 2022 ] Training epoch: 62 +[ Thu Sep 15 19:01:14 2022 ] Batch(96/123) done. Loss: 0.0809 lr:0.010000 network_time: 0.0479 +[ Thu Sep 15 19:01:24 2022 ] Eval epoch: 62 +[ Thu Sep 15 19:01:46 2022 ] Mean test loss of 258 batches: 1.7835830450057983. +[ Thu Sep 15 19:01:46 2022 ] Top1: 61.78% +[ Thu Sep 15 19:01:46 2022 ] Top5: 89.26% +[ Thu Sep 15 19:01:46 2022 ] Training epoch: 63 +[ Thu Sep 15 19:02:17 2022 ] Batch(73/123) done. Loss: 0.0213 lr:0.010000 network_time: 0.0486 +[ Thu Sep 15 19:02:35 2022 ] Eval epoch: 63 +[ Thu Sep 15 19:02:57 2022 ] Mean test loss of 258 batches: 1.9798433780670166. +[ Thu Sep 15 19:02:57 2022 ] Top1: 60.31% +[ Thu Sep 15 19:02:57 2022 ] Top5: 88.11% +[ Thu Sep 15 19:02:57 2022 ] Training epoch: 64 +[ Thu Sep 15 19:03:19 2022 ] Batch(50/123) done. Loss: 0.0367 lr:0.010000 network_time: 0.0499 +[ Thu Sep 15 19:03:46 2022 ] Eval epoch: 64 +[ Thu Sep 15 19:04:08 2022 ] Mean test loss of 258 batches: 1.7843399047851562. +[ Thu Sep 15 19:04:08 2022 ] Top1: 63.04% +[ Thu Sep 15 19:04:08 2022 ] Top5: 89.64% +[ Thu Sep 15 19:04:08 2022 ] Training epoch: 65 +[ Thu Sep 15 19:04:22 2022 ] Batch(27/123) done. Loss: 0.0191 lr:0.010000 network_time: 0.0488 +[ Thu Sep 15 19:04:57 2022 ] Eval epoch: 65 +[ Thu Sep 15 19:05:19 2022 ] Mean test loss of 258 batches: 1.744349718093872. +[ Thu Sep 15 19:05:19 2022 ] Top1: 63.47% +[ Thu Sep 15 19:05:19 2022 ] Top5: 89.96% +[ Thu Sep 15 19:05:19 2022 ] Training epoch: 66 +[ Thu Sep 15 19:05:24 2022 ] Batch(4/123) done. Loss: 0.0174 lr:0.010000 network_time: 0.0475 +[ Thu Sep 15 19:06:01 2022 ] Batch(104/123) done. Loss: 0.0277 lr:0.010000 network_time: 0.0517 +[ Thu Sep 15 19:06:08 2022 ] Eval epoch: 66 +[ Thu Sep 15 19:06:30 2022 ] Mean test loss of 258 batches: 1.8125786781311035. +[ Thu Sep 15 19:06:31 2022 ] Top1: 62.75% +[ Thu Sep 15 19:06:31 2022 ] Top5: 89.50% +[ Thu Sep 15 19:06:31 2022 ] Training epoch: 67 +[ Thu Sep 15 19:07:05 2022 ] Batch(81/123) done. Loss: 0.0325 lr:0.010000 network_time: 0.0499 +[ Thu Sep 15 19:07:20 2022 ] Eval epoch: 67 +[ Thu Sep 15 19:07:42 2022 ] Mean test loss of 258 batches: 1.9440727233886719. +[ Thu Sep 15 19:07:42 2022 ] Top1: 60.93% +[ Thu Sep 15 19:07:42 2022 ] Top5: 88.65% +[ Thu Sep 15 19:07:43 2022 ] Training epoch: 68 +[ Thu Sep 15 19:08:08 2022 ] Batch(58/123) done. Loss: 0.0422 lr:0.010000 network_time: 0.0535 +[ Thu Sep 15 19:08:32 2022 ] Eval epoch: 68 +[ Thu Sep 15 19:08:55 2022 ] Mean test loss of 258 batches: 1.741814136505127. +[ Thu Sep 15 19:08:55 2022 ] Top1: 63.69% +[ Thu Sep 15 19:08:55 2022 ] Top5: 89.97% +[ Thu Sep 15 19:08:55 2022 ] Training epoch: 69 +[ Thu Sep 15 19:09:12 2022 ] Batch(35/123) done. Loss: 0.0118 lr:0.010000 network_time: 0.0488 +[ Thu Sep 15 19:09:44 2022 ] Eval epoch: 69 +[ Thu Sep 15 19:10:06 2022 ] Mean test loss of 258 batches: 1.7398862838745117. +[ Thu Sep 15 19:10:06 2022 ] Top1: 63.49% +[ Thu Sep 15 19:10:06 2022 ] Top5: 90.01% +[ Thu Sep 15 19:10:06 2022 ] Training epoch: 70 +[ Thu Sep 15 19:10:15 2022 ] Batch(12/123) done. Loss: 0.0105 lr:0.010000 network_time: 0.0503 +[ Thu Sep 15 19:10:52 2022 ] Batch(112/123) done. Loss: 0.0089 lr:0.010000 network_time: 0.0495 +[ Thu Sep 15 19:10:56 2022 ] Eval epoch: 70 +[ Thu Sep 15 19:11:18 2022 ] Mean test loss of 258 batches: 1.9365839958190918. +[ Thu Sep 15 19:11:18 2022 ] Top1: 60.96% +[ Thu Sep 15 19:11:18 2022 ] Top5: 88.48% +[ Thu Sep 15 19:11:18 2022 ] Training epoch: 71 +[ Thu Sep 15 19:11:55 2022 ] Batch(89/123) done. Loss: 0.0112 lr:0.010000 network_time: 0.0486 +[ Thu Sep 15 19:12:07 2022 ] Eval epoch: 71 +[ Thu Sep 15 19:12:29 2022 ] Mean test loss of 258 batches: 1.9486991167068481. +[ Thu Sep 15 19:12:29 2022 ] Top1: 61.18% +[ Thu Sep 15 19:12:29 2022 ] Top5: 88.75% +[ Thu Sep 15 19:12:30 2022 ] Training epoch: 72 +[ Thu Sep 15 19:12:57 2022 ] Batch(66/123) done. Loss: 0.0345 lr:0.010000 network_time: 0.0501 +[ Thu Sep 15 19:13:18 2022 ] Eval epoch: 72 +[ Thu Sep 15 19:13:40 2022 ] Mean test loss of 258 batches: 1.7845265865325928. +[ Thu Sep 15 19:13:41 2022 ] Top1: 63.67% +[ Thu Sep 15 19:13:41 2022 ] Top5: 89.93% +[ Thu Sep 15 19:13:41 2022 ] Training epoch: 73 +[ Thu Sep 15 19:14:01 2022 ] Batch(43/123) done. Loss: 0.0063 lr:0.010000 network_time: 0.0545 +[ Thu Sep 15 19:14:30 2022 ] Eval epoch: 73 +[ Thu Sep 15 19:14:52 2022 ] Mean test loss of 258 batches: 1.789925217628479. +[ Thu Sep 15 19:14:52 2022 ] Top1: 63.49% +[ Thu Sep 15 19:14:52 2022 ] Top5: 89.93% +[ Thu Sep 15 19:14:52 2022 ] Training epoch: 74 +[ Thu Sep 15 19:15:04 2022 ] Batch(20/123) done. Loss: 0.0044 lr:0.010000 network_time: 0.0546 +[ Thu Sep 15 19:15:41 2022 ] Batch(120/123) done. Loss: 0.0185 lr:0.010000 network_time: 0.0510 +[ Thu Sep 15 19:15:42 2022 ] Eval epoch: 74 +[ Thu Sep 15 19:16:04 2022 ] Mean test loss of 258 batches: 2.539682388305664. +[ Thu Sep 15 19:16:04 2022 ] Top1: 55.62% +[ Thu Sep 15 19:16:04 2022 ] Top5: 83.94% +[ Thu Sep 15 19:16:04 2022 ] Training epoch: 75 +[ Thu Sep 15 19:16:44 2022 ] Batch(97/123) done. Loss: 0.0110 lr:0.010000 network_time: 0.0490 +[ Thu Sep 15 19:16:53 2022 ] Eval epoch: 75 +[ Thu Sep 15 19:17:16 2022 ] Mean test loss of 258 batches: 1.8047730922698975. +[ Thu Sep 15 19:17:16 2022 ] Top1: 63.53% +[ Thu Sep 15 19:17:16 2022 ] Top5: 90.11% +[ Thu Sep 15 19:17:16 2022 ] Training epoch: 76 +[ Thu Sep 15 19:17:47 2022 ] Batch(74/123) done. Loss: 0.0177 lr:0.010000 network_time: 0.0472 +[ Thu Sep 15 19:18:05 2022 ] Eval epoch: 76 +[ Thu Sep 15 19:18:27 2022 ] Mean test loss of 258 batches: 1.815438985824585. +[ Thu Sep 15 19:18:27 2022 ] Top1: 63.13% +[ Thu Sep 15 19:18:27 2022 ] Top5: 89.87% +[ Thu Sep 15 19:18:27 2022 ] Training epoch: 77 +[ Thu Sep 15 19:18:50 2022 ] Batch(51/123) done. Loss: 0.0060 lr:0.010000 network_time: 0.0566 +[ Thu Sep 15 19:19:16 2022 ] Eval epoch: 77 +[ Thu Sep 15 19:19:38 2022 ] Mean test loss of 258 batches: 1.7667587995529175. +[ Thu Sep 15 19:19:38 2022 ] Top1: 63.53% +[ Thu Sep 15 19:19:38 2022 ] Top5: 90.14% +[ Thu Sep 15 19:19:38 2022 ] Training epoch: 78 +[ Thu Sep 15 19:19:53 2022 ] Batch(28/123) done. Loss: 0.0132 lr:0.010000 network_time: 0.0503 +[ Thu Sep 15 19:20:28 2022 ] Eval epoch: 78 +[ Thu Sep 15 19:20:50 2022 ] Mean test loss of 258 batches: 1.8042775392532349. +[ Thu Sep 15 19:20:50 2022 ] Top1: 63.43% +[ Thu Sep 15 19:20:50 2022 ] Top5: 90.12% +[ Thu Sep 15 19:20:50 2022 ] Training epoch: 79 +[ Thu Sep 15 19:20:56 2022 ] Batch(5/123) done. Loss: 0.0055 lr:0.010000 network_time: 0.0501 +[ Thu Sep 15 19:21:33 2022 ] Batch(105/123) done. Loss: 0.0050 lr:0.010000 network_time: 0.0499 +[ Thu Sep 15 19:21:39 2022 ] Eval epoch: 79 +[ Thu Sep 15 19:22:02 2022 ] Mean test loss of 258 batches: 1.7913448810577393. +[ Thu Sep 15 19:22:02 2022 ] Top1: 63.30% +[ Thu Sep 15 19:22:02 2022 ] Top5: 89.98% +[ Thu Sep 15 19:22:02 2022 ] Training epoch: 80 +[ Thu Sep 15 19:22:36 2022 ] Batch(82/123) done. Loss: 0.0145 lr:0.010000 network_time: 0.0501 +[ Thu Sep 15 19:22:51 2022 ] Eval epoch: 80 +[ Thu Sep 15 19:23:13 2022 ] Mean test loss of 258 batches: 1.7956622838974. +[ Thu Sep 15 19:23:13 2022 ] Top1: 63.67% +[ Thu Sep 15 19:23:13 2022 ] Top5: 90.21% +[ Thu Sep 15 19:23:13 2022 ] Training epoch: 81 +[ Thu Sep 15 19:23:39 2022 ] Batch(59/123) done. Loss: 0.0074 lr:0.001000 network_time: 0.0484 +[ Thu Sep 15 19:24:03 2022 ] Eval epoch: 81 +[ Thu Sep 15 19:24:24 2022 ] Mean test loss of 258 batches: 1.8657962083816528. +[ Thu Sep 15 19:24:24 2022 ] Top1: 62.61% +[ Thu Sep 15 19:24:24 2022 ] Top5: 89.59% +[ Thu Sep 15 19:24:24 2022 ] Training epoch: 82 +[ Thu Sep 15 19:24:42 2022 ] Batch(36/123) done. Loss: 0.0165 lr:0.001000 network_time: 0.0491 +[ Thu Sep 15 19:25:14 2022 ] Eval epoch: 82 +[ Thu Sep 15 19:25:36 2022 ] Mean test loss of 258 batches: 1.816205620765686. +[ Thu Sep 15 19:25:36 2022 ] Top1: 63.49% +[ Thu Sep 15 19:25:36 2022 ] Top5: 89.88% +[ Thu Sep 15 19:25:36 2022 ] Training epoch: 83 +[ Thu Sep 15 19:25:45 2022 ] Batch(13/123) done. Loss: 0.0259 lr:0.001000 network_time: 0.0535 +[ Thu Sep 15 19:26:21 2022 ] Batch(113/123) done. Loss: 0.0102 lr:0.001000 network_time: 0.0512 +[ Thu Sep 15 19:26:25 2022 ] Eval epoch: 83 +[ Thu Sep 15 19:26:47 2022 ] Mean test loss of 258 batches: 1.8096046447753906. +[ Thu Sep 15 19:26:47 2022 ] Top1: 63.41% +[ Thu Sep 15 19:26:47 2022 ] Top5: 90.00% +[ Thu Sep 15 19:26:47 2022 ] Training epoch: 84 +[ Thu Sep 15 19:27:24 2022 ] Batch(90/123) done. Loss: 0.0099 lr:0.001000 network_time: 0.0497 +[ Thu Sep 15 19:27:36 2022 ] Eval epoch: 84 +[ Thu Sep 15 19:27:58 2022 ] Mean test loss of 258 batches: 1.8829015493392944. +[ Thu Sep 15 19:27:58 2022 ] Top1: 62.32% +[ Thu Sep 15 19:27:58 2022 ] Top5: 89.57% +[ Thu Sep 15 19:27:59 2022 ] Training epoch: 85 +[ Thu Sep 15 19:28:27 2022 ] Batch(67/123) done. Loss: 0.0425 lr:0.001000 network_time: 0.0476 +[ Thu Sep 15 19:28:48 2022 ] Eval epoch: 85 +[ Thu Sep 15 19:29:09 2022 ] Mean test loss of 258 batches: 1.8470373153686523. +[ Thu Sep 15 19:29:10 2022 ] Top1: 63.13% +[ Thu Sep 15 19:29:10 2022 ] Top5: 89.90% +[ Thu Sep 15 19:29:10 2022 ] Training epoch: 86 +[ Thu Sep 15 19:29:30 2022 ] Batch(44/123) done. Loss: 0.0200 lr:0.001000 network_time: 0.0479 +[ Thu Sep 15 19:29:59 2022 ] Eval epoch: 86 +[ Thu Sep 15 19:30:21 2022 ] Mean test loss of 258 batches: 1.75849187374115. +[ Thu Sep 15 19:30:21 2022 ] Top1: 64.07% +[ Thu Sep 15 19:30:21 2022 ] Top5: 90.30% +[ Thu Sep 15 19:30:21 2022 ] Training epoch: 87 +[ Thu Sep 15 19:30:33 2022 ] Batch(21/123) done. Loss: 0.0124 lr:0.001000 network_time: 0.0509 +[ Thu Sep 15 19:31:10 2022 ] Batch(121/123) done. Loss: 0.0037 lr:0.001000 network_time: 0.0508 +[ Thu Sep 15 19:31:10 2022 ] Eval epoch: 87 +[ Thu Sep 15 19:31:32 2022 ] Mean test loss of 258 batches: 1.8853404521942139. +[ Thu Sep 15 19:31:32 2022 ] Top1: 62.67% +[ Thu Sep 15 19:31:32 2022 ] Top5: 89.62% +[ Thu Sep 15 19:31:33 2022 ] Training epoch: 88 +[ Thu Sep 15 19:32:13 2022 ] Batch(98/123) done. Loss: 0.0058 lr:0.001000 network_time: 0.0507 +[ Thu Sep 15 19:32:22 2022 ] Eval epoch: 88 +[ Thu Sep 15 19:32:44 2022 ] Mean test loss of 258 batches: 1.8328523635864258. +[ Thu Sep 15 19:32:44 2022 ] Top1: 63.30% +[ Thu Sep 15 19:32:44 2022 ] Top5: 89.90% +[ Thu Sep 15 19:32:44 2022 ] Training epoch: 89 +[ Thu Sep 15 19:33:16 2022 ] Batch(75/123) done. Loss: 0.0281 lr:0.001000 network_time: 0.0507 +[ Thu Sep 15 19:33:33 2022 ] Eval epoch: 89 +[ Thu Sep 15 19:33:55 2022 ] Mean test loss of 258 batches: 1.786879539489746. +[ Thu Sep 15 19:33:55 2022 ] Top1: 63.64% +[ Thu Sep 15 19:33:55 2022 ] Top5: 90.19% +[ Thu Sep 15 19:33:55 2022 ] Training epoch: 90 +[ Thu Sep 15 19:34:18 2022 ] Batch(52/123) done. Loss: 0.0072 lr:0.001000 network_time: 0.0499 +[ Thu Sep 15 19:34:44 2022 ] Eval epoch: 90 +[ Thu Sep 15 19:35:06 2022 ] Mean test loss of 258 batches: 1.8092989921569824. +[ Thu Sep 15 19:35:06 2022 ] Top1: 63.61% +[ Thu Sep 15 19:35:06 2022 ] Top5: 89.99% +[ Thu Sep 15 19:35:07 2022 ] Training epoch: 91 +[ Thu Sep 15 19:35:21 2022 ] Batch(29/123) done. Loss: 0.0100 lr:0.001000 network_time: 0.0501 +[ Thu Sep 15 19:35:56 2022 ] Eval epoch: 91 +[ Thu Sep 15 19:36:18 2022 ] Mean test loss of 258 batches: 1.7964448928833008. +[ Thu Sep 15 19:36:18 2022 ] Top1: 63.43% +[ Thu Sep 15 19:36:18 2022 ] Top5: 90.09% +[ Thu Sep 15 19:36:18 2022 ] Training epoch: 92 +[ Thu Sep 15 19:36:24 2022 ] Batch(6/123) done. Loss: 0.0078 lr:0.001000 network_time: 0.0484 +[ Thu Sep 15 19:37:01 2022 ] Batch(106/123) done. Loss: 0.0191 lr:0.001000 network_time: 0.0503 +[ Thu Sep 15 19:37:07 2022 ] Eval epoch: 92 +[ Thu Sep 15 19:37:29 2022 ] Mean test loss of 258 batches: 1.8964364528656006. +[ Thu Sep 15 19:37:29 2022 ] Top1: 62.33% +[ Thu Sep 15 19:37:29 2022 ] Top5: 89.44% +[ Thu Sep 15 19:37:29 2022 ] Training epoch: 93 +[ Thu Sep 15 19:38:04 2022 ] Batch(83/123) done. Loss: 0.0090 lr:0.001000 network_time: 0.0504 +[ Thu Sep 15 19:38:18 2022 ] Eval epoch: 93 +[ Thu Sep 15 19:38:40 2022 ] Mean test loss of 258 batches: 1.8207327127456665. +[ Thu Sep 15 19:38:40 2022 ] Top1: 63.50% +[ Thu Sep 15 19:38:40 2022 ] Top5: 90.06% +[ Thu Sep 15 19:38:40 2022 ] Training epoch: 94 +[ Thu Sep 15 19:39:07 2022 ] Batch(60/123) done. Loss: 0.0136 lr:0.001000 network_time: 0.0513 +[ Thu Sep 15 19:39:30 2022 ] Eval epoch: 94 +[ Thu Sep 15 19:39:51 2022 ] Mean test loss of 258 batches: 1.8205301761627197. +[ Thu Sep 15 19:39:52 2022 ] Top1: 63.17% +[ Thu Sep 15 19:39:52 2022 ] Top5: 89.72% +[ Thu Sep 15 19:39:52 2022 ] Training epoch: 95 +[ Thu Sep 15 19:40:09 2022 ] Batch(37/123) done. Loss: 0.0293 lr:0.001000 network_time: 0.0525 +[ Thu Sep 15 19:40:41 2022 ] Eval epoch: 95 +[ Thu Sep 15 19:41:03 2022 ] Mean test loss of 258 batches: 1.797655701637268. +[ Thu Sep 15 19:41:03 2022 ] Top1: 63.70% +[ Thu Sep 15 19:41:03 2022 ] Top5: 89.80% +[ Thu Sep 15 19:41:03 2022 ] Training epoch: 96 +[ Thu Sep 15 19:41:12 2022 ] Batch(14/123) done. Loss: 0.0108 lr:0.001000 network_time: 0.0514 +[ Thu Sep 15 19:41:49 2022 ] Batch(114/123) done. Loss: 0.0097 lr:0.001000 network_time: 0.0488 +[ Thu Sep 15 19:41:52 2022 ] Eval epoch: 96 +[ Thu Sep 15 19:42:14 2022 ] Mean test loss of 258 batches: 1.812503695487976. +[ Thu Sep 15 19:42:14 2022 ] Top1: 63.56% +[ Thu Sep 15 19:42:14 2022 ] Top5: 90.00% +[ Thu Sep 15 19:42:14 2022 ] Training epoch: 97 +[ Thu Sep 15 19:42:52 2022 ] Batch(91/123) done. Loss: 0.0153 lr:0.001000 network_time: 0.0531 +[ Thu Sep 15 19:43:03 2022 ] Eval epoch: 97 +[ Thu Sep 15 19:43:25 2022 ] Mean test loss of 258 batches: 2.090479850769043. +[ Thu Sep 15 19:43:25 2022 ] Top1: 59.76% +[ Thu Sep 15 19:43:25 2022 ] Top5: 87.82% +[ Thu Sep 15 19:43:26 2022 ] Training epoch: 98 +[ Thu Sep 15 19:43:54 2022 ] Batch(68/123) done. Loss: 0.0160 lr:0.001000 network_time: 0.0525 +[ Thu Sep 15 19:44:15 2022 ] Eval epoch: 98 +[ Thu Sep 15 19:44:36 2022 ] Mean test loss of 258 batches: 1.7830455303192139. +[ Thu Sep 15 19:44:36 2022 ] Top1: 63.97% +[ Thu Sep 15 19:44:37 2022 ] Top5: 90.38% +[ Thu Sep 15 19:44:37 2022 ] Training epoch: 99 +[ Thu Sep 15 19:44:57 2022 ] Batch(45/123) done. Loss: 0.0050 lr:0.001000 network_time: 0.0537 +[ Thu Sep 15 19:45:25 2022 ] Eval epoch: 99 +[ Thu Sep 15 19:45:47 2022 ] Mean test loss of 258 batches: 1.8485912084579468. +[ Thu Sep 15 19:45:47 2022 ] Top1: 63.38% +[ Thu Sep 15 19:45:47 2022 ] Top5: 89.60% +[ Thu Sep 15 19:45:47 2022 ] Training epoch: 100 +[ Thu Sep 15 19:46:00 2022 ] Batch(22/123) done. Loss: 0.0056 lr:0.001000 network_time: 0.0519 +[ Thu Sep 15 19:46:36 2022 ] Batch(122/123) done. Loss: 0.0069 lr:0.001000 network_time: 0.0516 +[ Thu Sep 15 19:46:37 2022 ] Eval epoch: 100 +[ Thu Sep 15 19:46:59 2022 ] Mean test loss of 258 batches: 1.8104979991912842. +[ Thu Sep 15 19:46:59 2022 ] Top1: 63.95% +[ Thu Sep 15 19:46:59 2022 ] Top5: 90.19% +[ Thu Sep 15 19:46:59 2022 ] Training epoch: 101 +[ Thu Sep 15 19:47:39 2022 ] Batch(99/123) done. Loss: 0.0266 lr:0.000100 network_time: 0.0471 +[ Thu Sep 15 19:47:48 2022 ] Eval epoch: 101 +[ Thu Sep 15 19:48:10 2022 ] Mean test loss of 258 batches: 2.072659730911255. +[ Thu Sep 15 19:48:10 2022 ] Top1: 60.56% +[ Thu Sep 15 19:48:10 2022 ] Top5: 88.27% +[ Thu Sep 15 19:48:10 2022 ] Training epoch: 102 +[ Thu Sep 15 19:48:42 2022 ] Batch(76/123) done. Loss: 0.0156 lr:0.000100 network_time: 0.0511 +[ Thu Sep 15 19:48:59 2022 ] Eval epoch: 102 +[ Thu Sep 15 19:49:21 2022 ] Mean test loss of 258 batches: 1.7830250263214111. +[ Thu Sep 15 19:49:21 2022 ] Top1: 63.90% +[ Thu Sep 15 19:49:21 2022 ] Top5: 90.25% +[ Thu Sep 15 19:49:21 2022 ] Training epoch: 103 +[ Thu Sep 15 19:49:45 2022 ] Batch(53/123) done. Loss: 0.0099 lr:0.000100 network_time: 0.0513 +[ Thu Sep 15 19:50:11 2022 ] Eval epoch: 103 +[ Thu Sep 15 19:50:33 2022 ] Mean test loss of 258 batches: 1.8229682445526123. +[ Thu Sep 15 19:50:33 2022 ] Top1: 63.18% +[ Thu Sep 15 19:50:33 2022 ] Top5: 89.80% +[ Thu Sep 15 19:50:33 2022 ] Training epoch: 104 +[ Thu Sep 15 19:50:48 2022 ] Batch(30/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0522 +[ Thu Sep 15 19:51:22 2022 ] Eval epoch: 104 +[ Thu Sep 15 19:51:43 2022 ] Mean test loss of 258 batches: 1.7732776403427124. +[ Thu Sep 15 19:51:43 2022 ] Top1: 63.84% +[ Thu Sep 15 19:51:43 2022 ] Top5: 90.27% +[ Thu Sep 15 19:51:44 2022 ] Training epoch: 105 +[ Thu Sep 15 19:51:50 2022 ] Batch(7/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0545 +[ Thu Sep 15 19:52:27 2022 ] Batch(107/123) done. Loss: 0.0102 lr:0.000100 network_time: 0.0489 +[ Thu Sep 15 19:52:33 2022 ] Eval epoch: 105 +[ Thu Sep 15 19:52:55 2022 ] Mean test loss of 258 batches: 1.8950495719909668. +[ Thu Sep 15 19:52:55 2022 ] Top1: 63.23% +[ Thu Sep 15 19:52:55 2022 ] Top5: 89.54% +[ Thu Sep 15 19:52:55 2022 ] Training epoch: 106 +[ Thu Sep 15 19:53:30 2022 ] Batch(84/123) done. Loss: 0.0049 lr:0.000100 network_time: 0.0528 +[ Thu Sep 15 19:53:44 2022 ] Eval epoch: 106 +[ Thu Sep 15 19:54:06 2022 ] Mean test loss of 258 batches: 1.841883659362793. +[ Thu Sep 15 19:54:06 2022 ] Top1: 62.84% +[ Thu Sep 15 19:54:06 2022 ] Top5: 89.73% +[ Thu Sep 15 19:54:06 2022 ] Training epoch: 107 +[ Thu Sep 15 19:54:33 2022 ] Batch(61/123) done. Loss: 0.0210 lr:0.000100 network_time: 0.0536 +[ Thu Sep 15 19:54:55 2022 ] Eval epoch: 107 +[ Thu Sep 15 19:55:17 2022 ] Mean test loss of 258 batches: 1.8099277019500732. +[ Thu Sep 15 19:55:17 2022 ] Top1: 63.50% +[ Thu Sep 15 19:55:18 2022 ] Top5: 90.20% +[ Thu Sep 15 19:55:18 2022 ] Training epoch: 108 +[ Thu Sep 15 19:55:36 2022 ] Batch(38/123) done. Loss: 0.0105 lr:0.000100 network_time: 0.0517 +[ Thu Sep 15 19:56:07 2022 ] Eval epoch: 108 +[ Thu Sep 15 19:56:29 2022 ] Mean test loss of 258 batches: 1.7447724342346191. +[ Thu Sep 15 19:56:29 2022 ] Top1: 64.22% +[ Thu Sep 15 19:56:29 2022 ] Top5: 90.49% +[ Thu Sep 15 19:56:29 2022 ] Training epoch: 109 +[ Thu Sep 15 19:56:39 2022 ] Batch(15/123) done. Loss: 0.0234 lr:0.000100 network_time: 0.0460 +[ Thu Sep 15 19:57:15 2022 ] Batch(115/123) done. Loss: 0.0054 lr:0.000100 network_time: 0.0537 +[ Thu Sep 15 19:57:18 2022 ] Eval epoch: 109 +[ Thu Sep 15 19:57:40 2022 ] Mean test loss of 258 batches: 1.908057451248169. +[ Thu Sep 15 19:57:40 2022 ] Top1: 62.76% +[ Thu Sep 15 19:57:40 2022 ] Top5: 89.36% +[ Thu Sep 15 19:57:40 2022 ] Training epoch: 110 +[ Thu Sep 15 19:58:18 2022 ] Batch(92/123) done. Loss: 0.0211 lr:0.000100 network_time: 0.0511 +[ Thu Sep 15 19:58:29 2022 ] Eval epoch: 110 +[ Thu Sep 15 19:58:51 2022 ] Mean test loss of 258 batches: 1.8803648948669434. +[ Thu Sep 15 19:58:51 2022 ] Top1: 62.55% +[ Thu Sep 15 19:58:51 2022 ] Top5: 89.43% +[ Thu Sep 15 19:58:51 2022 ] Training epoch: 111 +[ Thu Sep 15 19:59:21 2022 ] Batch(69/123) done. Loss: 0.0044 lr:0.000100 network_time: 0.0523 +[ Thu Sep 15 19:59:41 2022 ] Eval epoch: 111 +[ Thu Sep 15 20:00:02 2022 ] Mean test loss of 258 batches: 1.7714176177978516. +[ Thu Sep 15 20:00:02 2022 ] Top1: 64.00% +[ Thu Sep 15 20:00:02 2022 ] Top5: 90.25% +[ Thu Sep 15 20:00:03 2022 ] Training epoch: 112 +[ Thu Sep 15 20:00:23 2022 ] Batch(46/123) done. Loss: 0.0086 lr:0.000100 network_time: 0.0493 +[ Thu Sep 15 20:00:52 2022 ] Eval epoch: 112 +[ Thu Sep 15 20:01:13 2022 ] Mean test loss of 258 batches: 1.7842161655426025. +[ Thu Sep 15 20:01:14 2022 ] Top1: 63.52% +[ Thu Sep 15 20:01:14 2022 ] Top5: 90.15% +[ Thu Sep 15 20:01:14 2022 ] Training epoch: 113 +[ Thu Sep 15 20:01:26 2022 ] Batch(23/123) done. Loss: 0.0056 lr:0.000100 network_time: 0.0471 +[ Thu Sep 15 20:02:03 2022 ] Eval epoch: 113 +[ Thu Sep 15 20:02:25 2022 ] Mean test loss of 258 batches: 1.7943865060806274. +[ Thu Sep 15 20:02:25 2022 ] Top1: 63.93% +[ Thu Sep 15 20:02:25 2022 ] Top5: 90.07% +[ Thu Sep 15 20:02:25 2022 ] Training epoch: 114 +[ Thu Sep 15 20:02:29 2022 ] Batch(0/123) done. Loss: 0.0093 lr:0.000100 network_time: 0.0932 +[ Thu Sep 15 20:03:06 2022 ] Batch(100/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0536 +[ Thu Sep 15 20:03:14 2022 ] Eval epoch: 114 +[ Thu Sep 15 20:03:36 2022 ] Mean test loss of 258 batches: 1.8065061569213867. +[ Thu Sep 15 20:03:36 2022 ] Top1: 63.38% +[ Thu Sep 15 20:03:36 2022 ] Top5: 89.91% +[ Thu Sep 15 20:03:36 2022 ] Training epoch: 115 +[ Thu Sep 15 20:04:08 2022 ] Batch(77/123) done. Loss: 0.0055 lr:0.000100 network_time: 0.0531 +[ Thu Sep 15 20:04:25 2022 ] Eval epoch: 115 +[ Thu Sep 15 20:04:47 2022 ] Mean test loss of 258 batches: 1.8034496307373047. +[ Thu Sep 15 20:04:47 2022 ] Top1: 63.55% +[ Thu Sep 15 20:04:47 2022 ] Top5: 90.01% +[ Thu Sep 15 20:04:47 2022 ] Training epoch: 116 +[ Thu Sep 15 20:05:11 2022 ] Batch(54/123) done. Loss: 0.0097 lr:0.000100 network_time: 0.0483 +[ Thu Sep 15 20:05:36 2022 ] Eval epoch: 116 +[ Thu Sep 15 20:05:58 2022 ] Mean test loss of 258 batches: 1.792580008506775. +[ Thu Sep 15 20:05:59 2022 ] Top1: 63.86% +[ Thu Sep 15 20:05:59 2022 ] Top5: 90.25% +[ Thu Sep 15 20:05:59 2022 ] Training epoch: 117 +[ Thu Sep 15 20:06:14 2022 ] Batch(31/123) done. Loss: 0.0100 lr:0.000100 network_time: 0.0495 +[ Thu Sep 15 20:06:48 2022 ] Eval epoch: 117 +[ Thu Sep 15 20:07:11 2022 ] Mean test loss of 258 batches: 1.8426971435546875. +[ Thu Sep 15 20:07:11 2022 ] Top1: 63.03% +[ Thu Sep 15 20:07:11 2022 ] Top5: 89.89% +[ Thu Sep 15 20:07:11 2022 ] Training epoch: 118 +[ Thu Sep 15 20:07:18 2022 ] Batch(8/123) done. Loss: 0.0055 lr:0.000100 network_time: 0.0495 +[ Thu Sep 15 20:07:55 2022 ] Batch(108/123) done. Loss: 0.0075 lr:0.000100 network_time: 0.0490 +[ Thu Sep 15 20:08:00 2022 ] Eval epoch: 118 +[ Thu Sep 15 20:08:22 2022 ] Mean test loss of 258 batches: 1.9181476831436157. +[ Thu Sep 15 20:08:23 2022 ] Top1: 61.90% +[ Thu Sep 15 20:08:23 2022 ] Top5: 89.36% +[ Thu Sep 15 20:08:23 2022 ] Training epoch: 119 +[ Thu Sep 15 20:08:58 2022 ] Batch(85/123) done. Loss: 0.0073 lr:0.000100 network_time: 0.0516 +[ Thu Sep 15 20:09:12 2022 ] Eval epoch: 119 +[ Thu Sep 15 20:09:34 2022 ] Mean test loss of 258 batches: 1.851119875907898. +[ Thu Sep 15 20:09:34 2022 ] Top1: 63.08% +[ Thu Sep 15 20:09:34 2022 ] Top5: 89.56% +[ Thu Sep 15 20:09:34 2022 ] Training epoch: 120 +[ Thu Sep 15 20:10:01 2022 ] Batch(62/123) done. Loss: 0.0060 lr:0.000100 network_time: 0.0499 +[ Thu Sep 15 20:10:24 2022 ] Eval epoch: 120 +[ Thu Sep 15 20:10:46 2022 ] Mean test loss of 258 batches: 1.8299061059951782. +[ Thu Sep 15 20:10:46 2022 ] Top1: 63.41% +[ Thu Sep 15 20:10:46 2022 ] Top5: 89.90% +[ Thu Sep 15 20:10:46 2022 ] Training epoch: 121 +[ Thu Sep 15 20:11:04 2022 ] Batch(39/123) done. Loss: 0.0065 lr:0.000100 network_time: 0.0485 +[ Thu Sep 15 20:11:35 2022 ] Eval epoch: 121 +[ Thu Sep 15 20:11:57 2022 ] Mean test loss of 258 batches: 1.870434284210205. +[ Thu Sep 15 20:11:57 2022 ] Top1: 62.78% +[ Thu Sep 15 20:11:57 2022 ] Top5: 89.54% +[ Thu Sep 15 20:11:57 2022 ] Training epoch: 122 +[ Thu Sep 15 20:12:07 2022 ] Batch(16/123) done. Loss: 0.0136 lr:0.000100 network_time: 0.0467 +[ Thu Sep 15 20:12:44 2022 ] Batch(116/123) done. Loss: 0.0066 lr:0.000100 network_time: 0.0548 +[ Thu Sep 15 20:12:46 2022 ] Eval epoch: 122 +[ Thu Sep 15 20:13:08 2022 ] Mean test loss of 258 batches: 1.8513699769973755. +[ Thu Sep 15 20:13:08 2022 ] Top1: 63.33% +[ Thu Sep 15 20:13:08 2022 ] Top5: 89.77% +[ Thu Sep 15 20:13:08 2022 ] Training epoch: 123 +[ Thu Sep 15 20:13:47 2022 ] Batch(93/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0520 +[ Thu Sep 15 20:13:58 2022 ] Eval epoch: 123 +[ Thu Sep 15 20:14:20 2022 ] Mean test loss of 258 batches: 1.815142035484314. +[ Thu Sep 15 20:14:20 2022 ] Top1: 63.26% +[ Thu Sep 15 20:14:20 2022 ] Top5: 89.87% +[ Thu Sep 15 20:14:20 2022 ] Training epoch: 124 +[ Thu Sep 15 20:14:50 2022 ] Batch(70/123) done. Loss: 0.0084 lr:0.000100 network_time: 0.0505 +[ Thu Sep 15 20:15:09 2022 ] Eval epoch: 124 +[ Thu Sep 15 20:15:32 2022 ] Mean test loss of 258 batches: 1.8830796480178833. +[ Thu Sep 15 20:15:32 2022 ] Top1: 62.94% +[ Thu Sep 15 20:15:32 2022 ] Top5: 89.45% +[ Thu Sep 15 20:15:32 2022 ] Training epoch: 125 +[ Thu Sep 15 20:15:53 2022 ] Batch(47/123) done. Loss: 0.0085 lr:0.000100 network_time: 0.0505 +[ Thu Sep 15 20:16:21 2022 ] Eval epoch: 125 +[ Thu Sep 15 20:16:44 2022 ] Mean test loss of 258 batches: 1.7838399410247803. +[ Thu Sep 15 20:16:44 2022 ] Top1: 64.08% +[ Thu Sep 15 20:16:44 2022 ] Top5: 90.30% +[ Thu Sep 15 20:16:44 2022 ] Training epoch: 126 +[ Thu Sep 15 20:16:56 2022 ] Batch(24/123) done. Loss: 0.0177 lr:0.000100 network_time: 0.0495 +[ Thu Sep 15 20:17:33 2022 ] Eval epoch: 126 +[ Thu Sep 15 20:17:55 2022 ] Mean test loss of 258 batches: 1.8182704448699951. +[ Thu Sep 15 20:17:55 2022 ] Top1: 63.30% +[ Thu Sep 15 20:17:55 2022 ] Top5: 90.08% +[ Thu Sep 15 20:17:55 2022 ] Training epoch: 127 +[ Thu Sep 15 20:17:59 2022 ] Batch(1/123) done. Loss: 0.0223 lr:0.000100 network_time: 0.0490 +[ Thu Sep 15 20:18:36 2022 ] Batch(101/123) done. Loss: 0.0044 lr:0.000100 network_time: 0.0500 +[ Thu Sep 15 20:18:44 2022 ] Eval epoch: 127 +[ Thu Sep 15 20:19:06 2022 ] Mean test loss of 258 batches: 1.8703221082687378. +[ Thu Sep 15 20:19:06 2022 ] Top1: 62.72% +[ Thu Sep 15 20:19:06 2022 ] Top5: 89.54% +[ Thu Sep 15 20:19:06 2022 ] Training epoch: 128 +[ Thu Sep 15 20:19:39 2022 ] Batch(78/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0493 +[ Thu Sep 15 20:19:55 2022 ] Eval epoch: 128 +[ Thu Sep 15 20:20:17 2022 ] Mean test loss of 258 batches: 1.8690118789672852. +[ Thu Sep 15 20:20:18 2022 ] Top1: 62.58% +[ Thu Sep 15 20:20:18 2022 ] Top5: 89.51% +[ Thu Sep 15 20:20:18 2022 ] Training epoch: 129 +[ Thu Sep 15 20:20:42 2022 ] Batch(55/123) done. Loss: 0.0102 lr:0.000100 network_time: 0.0490 +[ Thu Sep 15 20:21:07 2022 ] Eval epoch: 129 +[ Thu Sep 15 20:21:29 2022 ] Mean test loss of 258 batches: 1.8351080417633057. +[ Thu Sep 15 20:21:29 2022 ] Top1: 63.52% +[ Thu Sep 15 20:21:29 2022 ] Top5: 89.91% +[ Thu Sep 15 20:21:29 2022 ] Training epoch: 130 +[ Thu Sep 15 20:21:45 2022 ] Batch(32/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0524 +[ Thu Sep 15 20:22:18 2022 ] Eval epoch: 130 +[ Thu Sep 15 20:22:40 2022 ] Mean test loss of 258 batches: 1.9370732307434082. +[ Thu Sep 15 20:22:41 2022 ] Top1: 62.17% +[ Thu Sep 15 20:22:41 2022 ] Top5: 89.35% +[ Thu Sep 15 20:22:41 2022 ] Training epoch: 131 +[ Thu Sep 15 20:22:48 2022 ] Batch(9/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0560 +[ Thu Sep 15 20:23:25 2022 ] Batch(109/123) done. Loss: 0.0128 lr:0.000100 network_time: 0.0560 +[ Thu Sep 15 20:23:30 2022 ] Eval epoch: 131 +[ Thu Sep 15 20:23:53 2022 ] Mean test loss of 258 batches: 1.851426124572754. +[ Thu Sep 15 20:23:53 2022 ] Top1: 63.35% +[ Thu Sep 15 20:23:53 2022 ] Top5: 89.80% +[ Thu Sep 15 20:23:53 2022 ] Training epoch: 132 +[ Thu Sep 15 20:24:28 2022 ] Batch(86/123) done. Loss: 0.0055 lr:0.000100 network_time: 0.0470 +[ Thu Sep 15 20:24:42 2022 ] Eval epoch: 132 +[ Thu Sep 15 20:25:04 2022 ] Mean test loss of 258 batches: 1.8273863792419434. +[ Thu Sep 15 20:25:04 2022 ] Top1: 63.27% +[ Thu Sep 15 20:25:04 2022 ] Top5: 89.86% +[ Thu Sep 15 20:25:05 2022 ] Training epoch: 133 +[ Thu Sep 15 20:25:32 2022 ] Batch(63/123) done. Loss: 0.0160 lr:0.000100 network_time: 0.0491 +[ Thu Sep 15 20:25:53 2022 ] Eval epoch: 133 +[ Thu Sep 15 20:26:16 2022 ] Mean test loss of 258 batches: 1.9667322635650635. +[ Thu Sep 15 20:26:16 2022 ] Top1: 61.92% +[ Thu Sep 15 20:26:16 2022 ] Top5: 88.80% +[ Thu Sep 15 20:26:16 2022 ] Training epoch: 134 +[ Thu Sep 15 20:26:35 2022 ] Batch(40/123) done. Loss: 0.0103 lr:0.000100 network_time: 0.0457 +[ Thu Sep 15 20:27:06 2022 ] Eval epoch: 134 +[ Thu Sep 15 20:27:28 2022 ] Mean test loss of 258 batches: 1.7689836025238037. +[ Thu Sep 15 20:27:28 2022 ] Top1: 63.86% +[ Thu Sep 15 20:27:28 2022 ] Top5: 90.31% +[ Thu Sep 15 20:27:28 2022 ] Training epoch: 135 +[ Thu Sep 15 20:27:38 2022 ] Batch(17/123) done. Loss: 0.0054 lr:0.000100 network_time: 0.0543 +[ Thu Sep 15 20:28:15 2022 ] Batch(117/123) done. Loss: 0.0125 lr:0.000100 network_time: 0.0534 +[ Thu Sep 15 20:28:17 2022 ] Eval epoch: 135 +[ Thu Sep 15 20:28:39 2022 ] Mean test loss of 258 batches: 1.8875691890716553. +[ Thu Sep 15 20:28:39 2022 ] Top1: 62.49% +[ Thu Sep 15 20:28:39 2022 ] Top5: 89.44% +[ Thu Sep 15 20:28:39 2022 ] Training epoch: 136 +[ Thu Sep 15 20:29:18 2022 ] Batch(94/123) done. Loss: 0.0068 lr:0.000100 network_time: 0.0499 +[ Thu Sep 15 20:29:29 2022 ] Eval epoch: 136 +[ Thu Sep 15 20:29:51 2022 ] Mean test loss of 258 batches: 2.03627610206604. +[ Thu Sep 15 20:29:51 2022 ] Top1: 60.84% +[ Thu Sep 15 20:29:51 2022 ] Top5: 88.41% +[ Thu Sep 15 20:29:51 2022 ] Training epoch: 137 +[ Thu Sep 15 20:30:21 2022 ] Batch(71/123) done. Loss: 0.0097 lr:0.000100 network_time: 0.0516 +[ Thu Sep 15 20:30:41 2022 ] Eval epoch: 137 +[ Thu Sep 15 20:31:03 2022 ] Mean test loss of 258 batches: 1.8615801334381104. +[ Thu Sep 15 20:31:03 2022 ] Top1: 62.60% +[ Thu Sep 15 20:31:03 2022 ] Top5: 89.79% +[ Thu Sep 15 20:31:03 2022 ] Training epoch: 138 +[ Thu Sep 15 20:31:24 2022 ] Batch(48/123) done. Loss: 0.0286 lr:0.000100 network_time: 0.0510 +[ Thu Sep 15 20:31:52 2022 ] Eval epoch: 138 +[ Thu Sep 15 20:32:14 2022 ] Mean test loss of 258 batches: 1.9517872333526611. +[ Thu Sep 15 20:32:15 2022 ] Top1: 61.99% +[ Thu Sep 15 20:32:15 2022 ] Top5: 89.02% +[ Thu Sep 15 20:32:15 2022 ] Training epoch: 139 +[ Thu Sep 15 20:32:28 2022 ] Batch(25/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0492 +[ Thu Sep 15 20:33:04 2022 ] Eval epoch: 139 +[ Thu Sep 15 20:33:26 2022 ] Mean test loss of 258 batches: 1.9645498991012573. +[ Thu Sep 15 20:33:26 2022 ] Top1: 62.20% +[ Thu Sep 15 20:33:26 2022 ] Top5: 88.99% +[ Thu Sep 15 20:33:26 2022 ] Training epoch: 140 +[ Thu Sep 15 20:33:31 2022 ] Batch(2/123) done. Loss: 0.0057 lr:0.000100 network_time: 0.0496 +[ Thu Sep 15 20:34:07 2022 ] Batch(102/123) done. Loss: 0.0084 lr:0.000100 network_time: 0.0479 +[ Thu Sep 15 20:34:15 2022 ] Eval epoch: 140 +[ Thu Sep 15 20:34:37 2022 ] Mean test loss of 258 batches: 1.9595528841018677. +[ Thu Sep 15 20:34:37 2022 ] Top1: 61.40% +[ Thu Sep 15 20:34:37 2022 ] Top5: 88.99% diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_bone_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf86d19c981debc8d0f6d41c1411aedeea860755 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_joint_motion_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_joint_motion_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_joint_motion_xsub diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d24fcd6af2ddaed0b9b69519dd5c55a60f6a948f --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b69d62a09c755f915c0f11a163f96d8e0008156465c096689f02d91214028ee0 +size 4979902 diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..3a308c3425a8295d8338dcc031787c1e15d796cd --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/log.txt @@ -0,0 +1,875 @@ +[ Wed Sep 14 13:30:21 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_motion_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_motion_xsub', 'Experiment_name': 'ntu_ShiftGCN_joint_motion_xsub', 'config': './config/nturgbd-cross-subject/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:30:21 2022 ] Training epoch: 1 +[ Wed Sep 14 13:31:39 2022 ] Batch(99/123) done. Loss: 1.8322 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 13:31:56 2022 ] Eval epoch: 1 +[ Wed Sep 14 13:32:29 2022 ] Mean test loss of 258 batches: 5.421380996704102. +[ Wed Sep 14 13:32:29 2022 ] Top1: 16.31% +[ Wed Sep 14 13:32:29 2022 ] Top5: 41.85% +[ Wed Sep 14 13:32:29 2022 ] Training epoch: 2 +[ Wed Sep 14 13:33:28 2022 ] Batch(76/123) done. Loss: 1.7521 lr:0.100000 network_time: 0.0322 +[ Wed Sep 14 13:34:02 2022 ] Eval epoch: 2 +[ Wed Sep 14 13:34:35 2022 ] Mean test loss of 258 batches: 3.908324956893921. +[ Wed Sep 14 13:34:35 2022 ] Top1: 26.00% +[ Wed Sep 14 13:34:35 2022 ] Top5: 55.09% +[ Wed Sep 14 13:34:35 2022 ] Training epoch: 3 +[ Wed Sep 14 13:35:17 2022 ] Batch(53/123) done. Loss: 1.3974 lr:0.100000 network_time: 0.0291 +[ Wed Sep 14 13:36:08 2022 ] Eval epoch: 3 +[ Wed Sep 14 13:36:41 2022 ] Mean test loss of 258 batches: 3.634312391281128. +[ Wed Sep 14 13:36:41 2022 ] Top1: 31.05% +[ Wed Sep 14 13:36:41 2022 ] Top5: 64.15% +[ Wed Sep 14 13:36:41 2022 ] Training epoch: 4 +[ Wed Sep 14 13:37:07 2022 ] Batch(30/123) done. Loss: 1.4544 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 13:38:14 2022 ] Eval epoch: 4 +[ Wed Sep 14 13:38:47 2022 ] Mean test loss of 258 batches: 3.0245800018310547. +[ Wed Sep 14 13:38:47 2022 ] Top1: 37.07% +[ Wed Sep 14 13:38:47 2022 ] Top5: 71.13% +[ Wed Sep 14 13:38:47 2022 ] Training epoch: 5 +[ Wed Sep 14 13:38:56 2022 ] Batch(7/123) done. Loss: 1.3769 lr:0.100000 network_time: 0.0284 +[ Wed Sep 14 13:40:09 2022 ] Batch(107/123) done. Loss: 0.9981 lr:0.100000 network_time: 0.0323 +[ Wed Sep 14 13:40:20 2022 ] Eval epoch: 5 +[ Wed Sep 14 13:40:53 2022 ] Mean test loss of 258 batches: 3.6688332557678223. +[ Wed Sep 14 13:40:53 2022 ] Top1: 37.91% +[ Wed Sep 14 13:40:53 2022 ] Top5: 72.09% +[ Wed Sep 14 13:40:53 2022 ] Training epoch: 6 +[ Wed Sep 14 13:41:58 2022 ] Batch(84/123) done. Loss: 1.0236 lr:0.100000 network_time: 0.0286 +[ Wed Sep 14 13:42:26 2022 ] Eval epoch: 6 +[ Wed Sep 14 13:42:59 2022 ] Mean test loss of 258 batches: 3.80401349067688. +[ Wed Sep 14 13:42:59 2022 ] Top1: 35.28% +[ Wed Sep 14 13:42:59 2022 ] Top5: 69.93% +[ Wed Sep 14 13:42:59 2022 ] Training epoch: 7 +[ Wed Sep 14 13:43:48 2022 ] Batch(61/123) done. Loss: 0.8047 lr:0.100000 network_time: 0.0289 +[ Wed Sep 14 13:44:32 2022 ] Eval epoch: 7 +[ Wed Sep 14 13:45:06 2022 ] Mean test loss of 258 batches: 2.357377290725708. +[ Wed Sep 14 13:45:06 2022 ] Top1: 47.07% +[ Wed Sep 14 13:45:06 2022 ] Top5: 79.77% +[ Wed Sep 14 13:45:06 2022 ] Training epoch: 8 +[ Wed Sep 14 13:45:37 2022 ] Batch(38/123) done. Loss: 0.9309 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 13:46:39 2022 ] Eval epoch: 8 +[ Wed Sep 14 13:47:12 2022 ] Mean test loss of 258 batches: 2.9091107845306396. +[ Wed Sep 14 13:47:12 2022 ] Top1: 44.64% +[ Wed Sep 14 13:47:12 2022 ] Top5: 77.13% +[ Wed Sep 14 13:47:13 2022 ] Training epoch: 9 +[ Wed Sep 14 13:47:28 2022 ] Batch(15/123) done. Loss: 0.7382 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 13:48:40 2022 ] Batch(115/123) done. Loss: 0.6233 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 13:48:46 2022 ] Eval epoch: 9 +[ Wed Sep 14 13:49:19 2022 ] Mean test loss of 258 batches: 2.300452947616577. +[ Wed Sep 14 13:49:19 2022 ] Top1: 49.20% +[ Wed Sep 14 13:49:19 2022 ] Top5: 83.79% +[ Wed Sep 14 13:49:19 2022 ] Training epoch: 10 +[ Wed Sep 14 13:50:30 2022 ] Batch(92/123) done. Loss: 0.8082 lr:0.100000 network_time: 0.0325 +[ Wed Sep 14 13:50:52 2022 ] Eval epoch: 10 +[ Wed Sep 14 13:51:25 2022 ] Mean test loss of 258 batches: 2.6167848110198975. +[ Wed Sep 14 13:51:25 2022 ] Top1: 40.61% +[ Wed Sep 14 13:51:25 2022 ] Top5: 75.19% +[ Wed Sep 14 13:51:25 2022 ] Training epoch: 11 +[ Wed Sep 14 13:52:19 2022 ] Batch(69/123) done. Loss: 0.7017 lr:0.100000 network_time: 0.0365 +[ Wed Sep 14 13:52:58 2022 ] Eval epoch: 11 +[ Wed Sep 14 13:53:31 2022 ] Mean test loss of 258 batches: 2.6783909797668457. +[ Wed Sep 14 13:53:31 2022 ] Top1: 44.78% +[ Wed Sep 14 13:53:31 2022 ] Top5: 79.88% +[ Wed Sep 14 13:53:31 2022 ] Training epoch: 12 +[ Wed Sep 14 13:54:09 2022 ] Batch(46/123) done. Loss: 0.6095 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 13:55:04 2022 ] Eval epoch: 12 +[ Wed Sep 14 13:55:38 2022 ] Mean test loss of 258 batches: 3.100949764251709. +[ Wed Sep 14 13:55:38 2022 ] Top1: 46.73% +[ Wed Sep 14 13:55:38 2022 ] Top5: 81.48% +[ Wed Sep 14 13:55:38 2022 ] Training epoch: 13 +[ Wed Sep 14 13:55:59 2022 ] Batch(23/123) done. Loss: 0.3895 lr:0.100000 network_time: 0.0285 +[ Wed Sep 14 13:57:11 2022 ] Eval epoch: 13 +[ Wed Sep 14 13:57:44 2022 ] Mean test loss of 258 batches: 2.872067451477051. +[ Wed Sep 14 13:57:44 2022 ] Top1: 42.58% +[ Wed Sep 14 13:57:44 2022 ] Top5: 77.13% +[ Wed Sep 14 13:57:44 2022 ] Training epoch: 14 +[ Wed Sep 14 13:57:48 2022 ] Batch(0/123) done. Loss: 0.3612 lr:0.100000 network_time: 0.0457 +[ Wed Sep 14 13:59:01 2022 ] Batch(100/123) done. Loss: 0.4800 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 13:59:17 2022 ] Eval epoch: 14 +[ Wed Sep 14 13:59:49 2022 ] Mean test loss of 258 batches: 3.2104737758636475. +[ Wed Sep 14 13:59:50 2022 ] Top1: 52.74% +[ Wed Sep 14 13:59:50 2022 ] Top5: 83.59% +[ Wed Sep 14 13:59:50 2022 ] Training epoch: 15 +[ Wed Sep 14 14:00:49 2022 ] Batch(77/123) done. Loss: 0.5065 lr:0.100000 network_time: 0.0308 +[ Wed Sep 14 14:01:22 2022 ] Eval epoch: 15 +[ Wed Sep 14 14:01:55 2022 ] Mean test loss of 258 batches: 2.190570116043091. +[ Wed Sep 14 14:01:55 2022 ] Top1: 53.07% +[ Wed Sep 14 14:01:55 2022 ] Top5: 85.18% +[ Wed Sep 14 14:01:55 2022 ] Training epoch: 16 +[ Wed Sep 14 14:02:39 2022 ] Batch(54/123) done. Loss: 0.4220 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 14:03:28 2022 ] Eval epoch: 16 +[ Wed Sep 14 14:04:02 2022 ] Mean test loss of 258 batches: 2.1734108924865723. +[ Wed Sep 14 14:04:02 2022 ] Top1: 47.49% +[ Wed Sep 14 14:04:02 2022 ] Top5: 81.16% +[ Wed Sep 14 14:04:02 2022 ] Training epoch: 17 +[ Wed Sep 14 14:04:28 2022 ] Batch(31/123) done. Loss: 0.5759 lr:0.100000 network_time: 0.0318 +[ Wed Sep 14 14:05:35 2022 ] Eval epoch: 17 +[ Wed Sep 14 14:06:08 2022 ] Mean test loss of 258 batches: 3.361882209777832. +[ Wed Sep 14 14:06:08 2022 ] Top1: 44.20% +[ Wed Sep 14 14:06:08 2022 ] Top5: 79.01% +[ Wed Sep 14 14:06:08 2022 ] Training epoch: 18 +[ Wed Sep 14 14:06:18 2022 ] Batch(8/123) done. Loss: 0.2408 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 14:07:30 2022 ] Batch(108/123) done. Loss: 0.3474 lr:0.100000 network_time: 0.0312 +[ Wed Sep 14 14:07:41 2022 ] Eval epoch: 18 +[ Wed Sep 14 14:08:14 2022 ] Mean test loss of 258 batches: 1.9666603803634644. +[ Wed Sep 14 14:08:14 2022 ] Top1: 53.97% +[ Wed Sep 14 14:08:14 2022 ] Top5: 85.68% +[ Wed Sep 14 14:08:14 2022 ] Training epoch: 19 +[ Wed Sep 14 14:09:20 2022 ] Batch(85/123) done. Loss: 0.3814 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 14:09:47 2022 ] Eval epoch: 19 +[ Wed Sep 14 14:10:20 2022 ] Mean test loss of 258 batches: 2.295517921447754. +[ Wed Sep 14 14:10:20 2022 ] Top1: 53.63% +[ Wed Sep 14 14:10:20 2022 ] Top5: 84.36% +[ Wed Sep 14 14:10:20 2022 ] Training epoch: 20 +[ Wed Sep 14 14:11:09 2022 ] Batch(62/123) done. Loss: 0.2456 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 14:11:52 2022 ] Eval epoch: 20 +[ Wed Sep 14 14:12:25 2022 ] Mean test loss of 258 batches: 2.4664697647094727. +[ Wed Sep 14 14:12:25 2022 ] Top1: 47.50% +[ Wed Sep 14 14:12:25 2022 ] Top5: 82.88% +[ Wed Sep 14 14:12:25 2022 ] Training epoch: 21 +[ Wed Sep 14 14:12:58 2022 ] Batch(39/123) done. Loss: 0.4303 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 14:13:58 2022 ] Eval epoch: 21 +[ Wed Sep 14 14:14:31 2022 ] Mean test loss of 258 batches: 2.3439109325408936. +[ Wed Sep 14 14:14:31 2022 ] Top1: 52.30% +[ Wed Sep 14 14:14:32 2022 ] Top5: 86.02% +[ Wed Sep 14 14:14:32 2022 ] Training epoch: 22 +[ Wed Sep 14 14:14:47 2022 ] Batch(16/123) done. Loss: 0.2752 lr:0.100000 network_time: 0.0420 +[ Wed Sep 14 14:16:00 2022 ] Batch(116/123) done. Loss: 0.2960 lr:0.100000 network_time: 0.0303 +[ Wed Sep 14 14:16:04 2022 ] Eval epoch: 22 +[ Wed Sep 14 14:16:37 2022 ] Mean test loss of 258 batches: 2.3402199745178223. +[ Wed Sep 14 14:16:38 2022 ] Top1: 49.31% +[ Wed Sep 14 14:16:38 2022 ] Top5: 82.39% +[ Wed Sep 14 14:16:38 2022 ] Training epoch: 23 +[ Wed Sep 14 14:17:49 2022 ] Batch(93/123) done. Loss: 0.5902 lr:0.100000 network_time: 0.0383 +[ Wed Sep 14 14:18:11 2022 ] Eval epoch: 23 +[ Wed Sep 14 14:18:44 2022 ] Mean test loss of 258 batches: 3.4021964073181152. +[ Wed Sep 14 14:18:44 2022 ] Top1: 44.00% +[ Wed Sep 14 14:18:44 2022 ] Top5: 78.38% +[ Wed Sep 14 14:18:44 2022 ] Training epoch: 24 +[ Wed Sep 14 14:19:39 2022 ] Batch(70/123) done. Loss: 0.2657 lr:0.100000 network_time: 0.0348 +[ Wed Sep 14 14:20:17 2022 ] Eval epoch: 24 +[ Wed Sep 14 14:20:50 2022 ] Mean test loss of 258 batches: 2.8668456077575684. +[ Wed Sep 14 14:20:50 2022 ] Top1: 48.26% +[ Wed Sep 14 14:20:50 2022 ] Top5: 82.93% +[ Wed Sep 14 14:20:50 2022 ] Training epoch: 25 +[ Wed Sep 14 14:21:28 2022 ] Batch(47/123) done. Loss: 0.3772 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 14:22:23 2022 ] Eval epoch: 25 +[ Wed Sep 14 14:22:56 2022 ] Mean test loss of 258 batches: 3.23077130317688. +[ Wed Sep 14 14:22:56 2022 ] Top1: 46.24% +[ Wed Sep 14 14:22:56 2022 ] Top5: 78.16% +[ Wed Sep 14 14:22:56 2022 ] Training epoch: 26 +[ Wed Sep 14 14:23:17 2022 ] Batch(24/123) done. Loss: 0.3657 lr:0.100000 network_time: 0.0288 +[ Wed Sep 14 14:24:29 2022 ] Eval epoch: 26 +[ Wed Sep 14 14:25:02 2022 ] Mean test loss of 258 batches: 2.7416818141937256. +[ Wed Sep 14 14:25:02 2022 ] Top1: 51.59% +[ Wed Sep 14 14:25:03 2022 ] Top5: 82.94% +[ Wed Sep 14 14:25:03 2022 ] Training epoch: 27 +[ Wed Sep 14 14:25:07 2022 ] Batch(1/123) done. Loss: 0.1882 lr:0.100000 network_time: 0.0299 +[ Wed Sep 14 14:26:20 2022 ] Batch(101/123) done. Loss: 0.2472 lr:0.100000 network_time: 0.0290 +[ Wed Sep 14 14:26:36 2022 ] Eval epoch: 27 +[ Wed Sep 14 14:27:08 2022 ] Mean test loss of 258 batches: 2.7824652194976807. +[ Wed Sep 14 14:27:09 2022 ] Top1: 46.99% +[ Wed Sep 14 14:27:09 2022 ] Top5: 80.48% +[ Wed Sep 14 14:27:09 2022 ] Training epoch: 28 +[ Wed Sep 14 14:28:09 2022 ] Batch(78/123) done. Loss: 0.4206 lr:0.100000 network_time: 0.0333 +[ Wed Sep 14 14:28:41 2022 ] Eval epoch: 28 +[ Wed Sep 14 14:29:14 2022 ] Mean test loss of 258 batches: 2.5910394191741943. +[ Wed Sep 14 14:29:14 2022 ] Top1: 52.99% +[ Wed Sep 14 14:29:14 2022 ] Top5: 84.35% +[ Wed Sep 14 14:29:14 2022 ] Training epoch: 29 +[ Wed Sep 14 14:29:58 2022 ] Batch(55/123) done. Loss: 0.4513 lr:0.100000 network_time: 0.0272 +[ Wed Sep 14 14:30:47 2022 ] Eval epoch: 29 +[ Wed Sep 14 14:31:19 2022 ] Mean test loss of 258 batches: 3.8707644939422607. +[ Wed Sep 14 14:31:19 2022 ] Top1: 42.49% +[ Wed Sep 14 14:31:19 2022 ] Top5: 75.29% +[ Wed Sep 14 14:31:19 2022 ] Training epoch: 30 +[ Wed Sep 14 14:31:46 2022 ] Batch(32/123) done. Loss: 0.2088 lr:0.100000 network_time: 0.0335 +[ Wed Sep 14 14:32:52 2022 ] Eval epoch: 30 +[ Wed Sep 14 14:33:25 2022 ] Mean test loss of 258 batches: 2.0434672832489014. +[ Wed Sep 14 14:33:25 2022 ] Top1: 53.36% +[ Wed Sep 14 14:33:25 2022 ] Top5: 85.15% +[ Wed Sep 14 14:33:25 2022 ] Training epoch: 31 +[ Wed Sep 14 14:33:36 2022 ] Batch(9/123) done. Loss: 0.1743 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 14:34:48 2022 ] Batch(109/123) done. Loss: 0.1601 lr:0.100000 network_time: 0.0328 +[ Wed Sep 14 14:34:58 2022 ] Eval epoch: 31 +[ Wed Sep 14 14:35:30 2022 ] Mean test loss of 258 batches: 2.6095051765441895. +[ Wed Sep 14 14:35:31 2022 ] Top1: 52.76% +[ Wed Sep 14 14:35:31 2022 ] Top5: 83.31% +[ Wed Sep 14 14:35:31 2022 ] Training epoch: 32 +[ Wed Sep 14 14:36:37 2022 ] Batch(86/123) done. Loss: 0.2414 lr:0.100000 network_time: 0.0278 +[ Wed Sep 14 14:37:03 2022 ] Eval epoch: 32 +[ Wed Sep 14 14:37:36 2022 ] Mean test loss of 258 batches: 2.538578510284424. +[ Wed Sep 14 14:37:36 2022 ] Top1: 53.28% +[ Wed Sep 14 14:37:36 2022 ] Top5: 85.77% +[ Wed Sep 14 14:37:36 2022 ] Training epoch: 33 +[ Wed Sep 14 14:38:26 2022 ] Batch(63/123) done. Loss: 0.3211 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 14:39:09 2022 ] Eval epoch: 33 +[ Wed Sep 14 14:39:41 2022 ] Mean test loss of 258 batches: 2.6285548210144043. +[ Wed Sep 14 14:39:41 2022 ] Top1: 52.92% +[ Wed Sep 14 14:39:42 2022 ] Top5: 84.96% +[ Wed Sep 14 14:39:42 2022 ] Training epoch: 34 +[ Wed Sep 14 14:40:15 2022 ] Batch(40/123) done. Loss: 0.1880 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 14:41:14 2022 ] Eval epoch: 34 +[ Wed Sep 14 14:41:47 2022 ] Mean test loss of 258 batches: 2.0833334922790527. +[ Wed Sep 14 14:41:47 2022 ] Top1: 53.60% +[ Wed Sep 14 14:41:47 2022 ] Top5: 86.80% +[ Wed Sep 14 14:41:47 2022 ] Training epoch: 35 +[ Wed Sep 14 14:42:03 2022 ] Batch(17/123) done. Loss: 0.1468 lr:0.100000 network_time: 0.0350 +[ Wed Sep 14 14:43:16 2022 ] Batch(117/123) done. Loss: 0.2022 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 14:43:20 2022 ] Eval epoch: 35 +[ Wed Sep 14 14:43:52 2022 ] Mean test loss of 258 batches: 2.6050968170166016. +[ Wed Sep 14 14:43:52 2022 ] Top1: 49.00% +[ Wed Sep 14 14:43:53 2022 ] Top5: 81.38% +[ Wed Sep 14 14:43:53 2022 ] Training epoch: 36 +[ Wed Sep 14 14:45:05 2022 ] Batch(94/123) done. Loss: 0.3228 lr:0.100000 network_time: 0.0292 +[ Wed Sep 14 14:45:25 2022 ] Eval epoch: 36 +[ Wed Sep 14 14:45:58 2022 ] Mean test loss of 258 batches: 2.1508259773254395. +[ Wed Sep 14 14:45:58 2022 ] Top1: 56.06% +[ Wed Sep 14 14:45:58 2022 ] Top5: 84.73% +[ Wed Sep 14 14:45:58 2022 ] Training epoch: 37 +[ Wed Sep 14 14:46:54 2022 ] Batch(71/123) done. Loss: 0.5055 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 14:47:31 2022 ] Eval epoch: 37 +[ Wed Sep 14 14:48:04 2022 ] Mean test loss of 258 batches: 2.3189916610717773. +[ Wed Sep 14 14:48:04 2022 ] Top1: 55.84% +[ Wed Sep 14 14:48:04 2022 ] Top5: 84.73% +[ Wed Sep 14 14:48:04 2022 ] Training epoch: 38 +[ Wed Sep 14 14:48:43 2022 ] Batch(48/123) done. Loss: 0.2248 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 14:49:37 2022 ] Eval epoch: 38 +[ Wed Sep 14 14:50:10 2022 ] Mean test loss of 258 batches: 3.328526496887207. +[ Wed Sep 14 14:50:10 2022 ] Top1: 40.93% +[ Wed Sep 14 14:50:10 2022 ] Top5: 77.16% +[ Wed Sep 14 14:50:10 2022 ] Training epoch: 39 +[ Wed Sep 14 14:50:32 2022 ] Batch(25/123) done. Loss: 0.1325 lr:0.100000 network_time: 0.0321 +[ Wed Sep 14 14:51:42 2022 ] Eval epoch: 39 +[ Wed Sep 14 14:52:15 2022 ] Mean test loss of 258 batches: 3.4714365005493164. +[ Wed Sep 14 14:52:15 2022 ] Top1: 44.80% +[ Wed Sep 14 14:52:15 2022 ] Top5: 79.50% +[ Wed Sep 14 14:52:15 2022 ] Training epoch: 40 +[ Wed Sep 14 14:52:20 2022 ] Batch(2/123) done. Loss: 0.2373 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 14:53:33 2022 ] Batch(102/123) done. Loss: 0.2601 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 14:53:48 2022 ] Eval epoch: 40 +[ Wed Sep 14 14:54:20 2022 ] Mean test loss of 258 batches: 2.6973369121551514. +[ Wed Sep 14 14:54:20 2022 ] Top1: 50.18% +[ Wed Sep 14 14:54:21 2022 ] Top5: 82.68% +[ Wed Sep 14 14:54:21 2022 ] Training epoch: 41 +[ Wed Sep 14 14:55:22 2022 ] Batch(79/123) done. Loss: 0.3010 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 14:55:53 2022 ] Eval epoch: 41 +[ Wed Sep 14 14:56:26 2022 ] Mean test loss of 258 batches: 2.5908446311950684. +[ Wed Sep 14 14:56:26 2022 ] Top1: 51.38% +[ Wed Sep 14 14:56:26 2022 ] Top5: 84.08% +[ Wed Sep 14 14:56:26 2022 ] Training epoch: 42 +[ Wed Sep 14 14:57:11 2022 ] Batch(56/123) done. Loss: 0.3432 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 14:57:59 2022 ] Eval epoch: 42 +[ Wed Sep 14 14:58:32 2022 ] Mean test loss of 258 batches: 2.8294875621795654. +[ Wed Sep 14 14:58:32 2022 ] Top1: 52.04% +[ Wed Sep 14 14:58:32 2022 ] Top5: 83.27% +[ Wed Sep 14 14:58:32 2022 ] Training epoch: 43 +[ Wed Sep 14 14:59:00 2022 ] Batch(33/123) done. Loss: 0.0792 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 15:00:04 2022 ] Eval epoch: 43 +[ Wed Sep 14 15:00:37 2022 ] Mean test loss of 258 batches: 2.162405252456665. +[ Wed Sep 14 15:00:37 2022 ] Top1: 55.95% +[ Wed Sep 14 15:00:37 2022 ] Top5: 86.15% +[ Wed Sep 14 15:00:37 2022 ] Training epoch: 44 +[ Wed Sep 14 15:00:48 2022 ] Batch(10/123) done. Loss: 0.1217 lr:0.100000 network_time: 0.0304 +[ Wed Sep 14 15:02:01 2022 ] Batch(110/123) done. Loss: 0.1810 lr:0.100000 network_time: 0.0331 +[ Wed Sep 14 15:02:10 2022 ] Eval epoch: 44 +[ Wed Sep 14 15:02:42 2022 ] Mean test loss of 258 batches: 2.7251181602478027. +[ Wed Sep 14 15:02:43 2022 ] Top1: 51.24% +[ Wed Sep 14 15:02:43 2022 ] Top5: 83.48% +[ Wed Sep 14 15:02:43 2022 ] Training epoch: 45 +[ Wed Sep 14 15:03:49 2022 ] Batch(87/123) done. Loss: 0.1741 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 15:04:15 2022 ] Eval epoch: 45 +[ Wed Sep 14 15:04:48 2022 ] Mean test loss of 258 batches: 2.7098324298858643. +[ Wed Sep 14 15:04:48 2022 ] Top1: 51.33% +[ Wed Sep 14 15:04:48 2022 ] Top5: 82.89% +[ Wed Sep 14 15:04:48 2022 ] Training epoch: 46 +[ Wed Sep 14 15:05:38 2022 ] Batch(64/123) done. Loss: 0.1355 lr:0.100000 network_time: 0.0331 +[ Wed Sep 14 15:06:20 2022 ] Eval epoch: 46 +[ Wed Sep 14 15:06:53 2022 ] Mean test loss of 258 batches: 2.519627332687378. +[ Wed Sep 14 15:06:53 2022 ] Top1: 54.52% +[ Wed Sep 14 15:06:53 2022 ] Top5: 84.55% +[ Wed Sep 14 15:06:53 2022 ] Training epoch: 47 +[ Wed Sep 14 15:07:27 2022 ] Batch(41/123) done. Loss: 0.3258 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 15:08:26 2022 ] Eval epoch: 47 +[ Wed Sep 14 15:08:59 2022 ] Mean test loss of 258 batches: 2.6793875694274902. +[ Wed Sep 14 15:08:59 2022 ] Top1: 49.95% +[ Wed Sep 14 15:08:59 2022 ] Top5: 81.57% +[ Wed Sep 14 15:08:59 2022 ] Training epoch: 48 +[ Wed Sep 14 15:09:16 2022 ] Batch(18/123) done. Loss: 0.1776 lr:0.100000 network_time: 0.0327 +[ Wed Sep 14 15:10:28 2022 ] Batch(118/123) done. Loss: 0.1763 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 15:10:31 2022 ] Eval epoch: 48 +[ Wed Sep 14 15:11:04 2022 ] Mean test loss of 258 batches: 2.500056505203247. +[ Wed Sep 14 15:11:04 2022 ] Top1: 53.82% +[ Wed Sep 14 15:11:04 2022 ] Top5: 79.20% +[ Wed Sep 14 15:11:05 2022 ] Training epoch: 49 +[ Wed Sep 14 15:12:17 2022 ] Batch(95/123) done. Loss: 0.1675 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 15:12:37 2022 ] Eval epoch: 49 +[ Wed Sep 14 15:13:10 2022 ] Mean test loss of 258 batches: 2.3841023445129395. +[ Wed Sep 14 15:13:10 2022 ] Top1: 55.69% +[ Wed Sep 14 15:13:10 2022 ] Top5: 85.04% +[ Wed Sep 14 15:13:10 2022 ] Training epoch: 50 +[ Wed Sep 14 15:14:06 2022 ] Batch(72/123) done. Loss: 0.2073 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 15:14:43 2022 ] Eval epoch: 50 +[ Wed Sep 14 15:15:16 2022 ] Mean test loss of 258 batches: 2.200166702270508. +[ Wed Sep 14 15:15:16 2022 ] Top1: 53.15% +[ Wed Sep 14 15:15:16 2022 ] Top5: 85.08% +[ Wed Sep 14 15:15:16 2022 ] Training epoch: 51 +[ Wed Sep 14 15:15:55 2022 ] Batch(49/123) done. Loss: 0.2201 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 15:16:48 2022 ] Eval epoch: 51 +[ Wed Sep 14 15:17:21 2022 ] Mean test loss of 258 batches: 2.511787176132202. +[ Wed Sep 14 15:17:21 2022 ] Top1: 54.19% +[ Wed Sep 14 15:17:21 2022 ] Top5: 86.07% +[ Wed Sep 14 15:17:21 2022 ] Training epoch: 52 +[ Wed Sep 14 15:17:44 2022 ] Batch(26/123) done. Loss: 0.2281 lr:0.100000 network_time: 0.0261 +[ Wed Sep 14 15:18:54 2022 ] Eval epoch: 52 +[ Wed Sep 14 15:19:26 2022 ] Mean test loss of 258 batches: 2.6677470207214355. +[ Wed Sep 14 15:19:26 2022 ] Top1: 52.61% +[ Wed Sep 14 15:19:26 2022 ] Top5: 83.11% +[ Wed Sep 14 15:19:26 2022 ] Training epoch: 53 +[ Wed Sep 14 15:19:32 2022 ] Batch(3/123) done. Loss: 0.0533 lr:0.100000 network_time: 0.0331 +[ Wed Sep 14 15:20:45 2022 ] Batch(103/123) done. Loss: 0.1253 lr:0.100000 network_time: 0.0315 +[ Wed Sep 14 15:20:59 2022 ] Eval epoch: 53 +[ Wed Sep 14 15:21:31 2022 ] Mean test loss of 258 batches: 2.311555862426758. +[ Wed Sep 14 15:21:31 2022 ] Top1: 51.09% +[ Wed Sep 14 15:21:31 2022 ] Top5: 84.42% +[ Wed Sep 14 15:21:32 2022 ] Training epoch: 54 +[ Wed Sep 14 15:22:33 2022 ] Batch(80/123) done. Loss: 0.3264 lr:0.100000 network_time: 0.0266 +[ Wed Sep 14 15:23:04 2022 ] Eval epoch: 54 +[ Wed Sep 14 15:23:37 2022 ] Mean test loss of 258 batches: 1.9971171617507935. +[ Wed Sep 14 15:23:37 2022 ] Top1: 58.15% +[ Wed Sep 14 15:23:37 2022 ] Top5: 87.86% +[ Wed Sep 14 15:23:37 2022 ] Training epoch: 55 +[ Wed Sep 14 15:24:23 2022 ] Batch(57/123) done. Loss: 0.1479 lr:0.100000 network_time: 0.0307 +[ Wed Sep 14 15:25:10 2022 ] Eval epoch: 55 +[ Wed Sep 14 15:25:43 2022 ] Mean test loss of 258 batches: 2.5352351665496826. +[ Wed Sep 14 15:25:43 2022 ] Top1: 51.66% +[ Wed Sep 14 15:25:43 2022 ] Top5: 83.76% +[ Wed Sep 14 15:25:43 2022 ] Training epoch: 56 +[ Wed Sep 14 15:26:12 2022 ] Batch(34/123) done. Loss: 0.0831 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 15:27:16 2022 ] Eval epoch: 56 +[ Wed Sep 14 15:27:48 2022 ] Mean test loss of 258 batches: 3.105311632156372. +[ Wed Sep 14 15:27:48 2022 ] Top1: 46.59% +[ Wed Sep 14 15:27:48 2022 ] Top5: 78.11% +[ Wed Sep 14 15:27:48 2022 ] Training epoch: 57 +[ Wed Sep 14 15:28:00 2022 ] Batch(11/123) done. Loss: 0.2058 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 15:29:13 2022 ] Batch(111/123) done. Loss: 0.0805 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 15:29:21 2022 ] Eval epoch: 57 +[ Wed Sep 14 15:29:53 2022 ] Mean test loss of 258 batches: 2.3883368968963623. +[ Wed Sep 14 15:29:53 2022 ] Top1: 55.04% +[ Wed Sep 14 15:29:53 2022 ] Top5: 84.45% +[ Wed Sep 14 15:29:53 2022 ] Training epoch: 58 +[ Wed Sep 14 15:31:01 2022 ] Batch(88/123) done. Loss: 0.1361 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 15:31:26 2022 ] Eval epoch: 58 +[ Wed Sep 14 15:31:59 2022 ] Mean test loss of 258 batches: 2.499546527862549. +[ Wed Sep 14 15:31:59 2022 ] Top1: 50.06% +[ Wed Sep 14 15:31:59 2022 ] Top5: 81.28% +[ Wed Sep 14 15:31:59 2022 ] Training epoch: 59 +[ Wed Sep 14 15:32:50 2022 ] Batch(65/123) done. Loss: 0.1671 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 15:33:32 2022 ] Eval epoch: 59 +[ Wed Sep 14 15:34:04 2022 ] Mean test loss of 258 batches: 2.3240156173706055. +[ Wed Sep 14 15:34:04 2022 ] Top1: 55.70% +[ Wed Sep 14 15:34:05 2022 ] Top5: 86.14% +[ Wed Sep 14 15:34:05 2022 ] Training epoch: 60 +[ Wed Sep 14 15:34:39 2022 ] Batch(42/123) done. Loss: 0.1858 lr:0.100000 network_time: 0.0314 +[ Wed Sep 14 15:35:37 2022 ] Eval epoch: 60 +[ Wed Sep 14 15:36:10 2022 ] Mean test loss of 258 batches: 3.158168315887451. +[ Wed Sep 14 15:36:10 2022 ] Top1: 45.92% +[ Wed Sep 14 15:36:10 2022 ] Top5: 78.06% +[ Wed Sep 14 15:36:10 2022 ] Training epoch: 61 +[ Wed Sep 14 15:36:28 2022 ] Batch(19/123) done. Loss: 0.0107 lr:0.010000 network_time: 0.0306 +[ Wed Sep 14 15:37:40 2022 ] Batch(119/123) done. Loss: 0.0232 lr:0.010000 network_time: 0.0257 +[ Wed Sep 14 15:37:43 2022 ] Eval epoch: 61 +[ Wed Sep 14 15:38:15 2022 ] Mean test loss of 258 batches: 1.911889910697937. +[ Wed Sep 14 15:38:15 2022 ] Top1: 62.90% +[ Wed Sep 14 15:38:15 2022 ] Top5: 89.40% +[ Wed Sep 14 15:38:15 2022 ] Training epoch: 62 +[ Wed Sep 14 15:39:29 2022 ] Batch(96/123) done. Loss: 0.0245 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 15:39:48 2022 ] Eval epoch: 62 +[ Wed Sep 14 15:40:20 2022 ] Mean test loss of 258 batches: 2.0928752422332764. +[ Wed Sep 14 15:40:20 2022 ] Top1: 62.90% +[ Wed Sep 14 15:40:21 2022 ] Top5: 89.33% +[ Wed Sep 14 15:40:21 2022 ] Training epoch: 63 +[ Wed Sep 14 15:41:17 2022 ] Batch(73/123) done. Loss: 0.0077 lr:0.010000 network_time: 0.0316 +[ Wed Sep 14 15:41:53 2022 ] Eval epoch: 63 +[ Wed Sep 14 15:42:27 2022 ] Mean test loss of 258 batches: 2.0350732803344727. +[ Wed Sep 14 15:42:27 2022 ] Top1: 63.79% +[ Wed Sep 14 15:42:27 2022 ] Top5: 89.59% +[ Wed Sep 14 15:42:27 2022 ] Training epoch: 64 +[ Wed Sep 14 15:43:07 2022 ] Batch(50/123) done. Loss: 0.0201 lr:0.010000 network_time: 0.0264 +[ Wed Sep 14 15:43:59 2022 ] Eval epoch: 64 +[ Wed Sep 14 15:44:32 2022 ] Mean test loss of 258 batches: 1.8492361307144165. +[ Wed Sep 14 15:44:32 2022 ] Top1: 64.04% +[ Wed Sep 14 15:44:32 2022 ] Top5: 90.05% +[ Wed Sep 14 15:44:32 2022 ] Training epoch: 65 +[ Wed Sep 14 15:44:55 2022 ] Batch(27/123) done. Loss: 0.0059 lr:0.010000 network_time: 0.0258 +[ Wed Sep 14 15:46:04 2022 ] Eval epoch: 65 +[ Wed Sep 14 15:46:37 2022 ] Mean test loss of 258 batches: 1.795392632484436. +[ Wed Sep 14 15:46:37 2022 ] Top1: 63.41% +[ Wed Sep 14 15:46:37 2022 ] Top5: 90.03% +[ Wed Sep 14 15:46:37 2022 ] Training epoch: 66 +[ Wed Sep 14 15:46:44 2022 ] Batch(4/123) done. Loss: 0.0055 lr:0.010000 network_time: 0.0255 +[ Wed Sep 14 15:47:56 2022 ] Batch(104/123) done. Loss: 0.0576 lr:0.010000 network_time: 0.0271 +[ Wed Sep 14 15:48:10 2022 ] Eval epoch: 66 +[ Wed Sep 14 15:48:42 2022 ] Mean test loss of 258 batches: 1.9341166019439697. +[ Wed Sep 14 15:48:42 2022 ] Top1: 64.14% +[ Wed Sep 14 15:48:42 2022 ] Top5: 89.90% +[ Wed Sep 14 15:48:42 2022 ] Training epoch: 67 +[ Wed Sep 14 15:49:45 2022 ] Batch(81/123) done. Loss: 0.0215 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 15:50:15 2022 ] Eval epoch: 67 +[ Wed Sep 14 15:50:48 2022 ] Mean test loss of 258 batches: 2.1112568378448486. +[ Wed Sep 14 15:50:48 2022 ] Top1: 61.59% +[ Wed Sep 14 15:50:48 2022 ] Top5: 88.41% +[ Wed Sep 14 15:50:48 2022 ] Training epoch: 68 +[ Wed Sep 14 15:51:34 2022 ] Batch(58/123) done. Loss: 0.0064 lr:0.010000 network_time: 0.0294 +[ Wed Sep 14 15:52:20 2022 ] Eval epoch: 68 +[ Wed Sep 14 15:52:53 2022 ] Mean test loss of 258 batches: 1.7816262245178223. +[ Wed Sep 14 15:52:53 2022 ] Top1: 64.25% +[ Wed Sep 14 15:52:53 2022 ] Top5: 90.30% +[ Wed Sep 14 15:52:53 2022 ] Training epoch: 69 +[ Wed Sep 14 15:53:22 2022 ] Batch(35/123) done. Loss: 0.0043 lr:0.010000 network_time: 0.0313 +[ Wed Sep 14 15:54:26 2022 ] Eval epoch: 69 +[ Wed Sep 14 15:54:58 2022 ] Mean test loss of 258 batches: 1.813872218132019. +[ Wed Sep 14 15:54:58 2022 ] Top1: 62.75% +[ Wed Sep 14 15:54:58 2022 ] Top5: 89.63% +[ Wed Sep 14 15:54:59 2022 ] Training epoch: 70 +[ Wed Sep 14 15:55:11 2022 ] Batch(12/123) done. Loss: 0.0021 lr:0.010000 network_time: 0.0276 +[ Wed Sep 14 15:56:24 2022 ] Batch(112/123) done. Loss: 0.0146 lr:0.010000 network_time: 0.0307 +[ Wed Sep 14 15:56:31 2022 ] Eval epoch: 70 +[ Wed Sep 14 15:57:04 2022 ] Mean test loss of 258 batches: 1.836743712425232. +[ Wed Sep 14 15:57:04 2022 ] Top1: 64.22% +[ Wed Sep 14 15:57:04 2022 ] Top5: 90.13% +[ Wed Sep 14 15:57:04 2022 ] Training epoch: 71 +[ Wed Sep 14 15:58:12 2022 ] Batch(89/123) done. Loss: 0.0099 lr:0.010000 network_time: 0.0314 +[ Wed Sep 14 15:58:37 2022 ] Eval epoch: 71 +[ Wed Sep 14 15:59:09 2022 ] Mean test loss of 258 batches: 1.9741806983947754. +[ Wed Sep 14 15:59:09 2022 ] Top1: 64.31% +[ Wed Sep 14 15:59:10 2022 ] Top5: 89.92% +[ Wed Sep 14 15:59:10 2022 ] Training epoch: 72 +[ Wed Sep 14 16:00:01 2022 ] Batch(66/123) done. Loss: 0.0041 lr:0.010000 network_time: 0.0271 +[ Wed Sep 14 16:00:42 2022 ] Eval epoch: 72 +[ Wed Sep 14 16:01:15 2022 ] Mean test loss of 258 batches: 1.8784939050674438. +[ Wed Sep 14 16:01:15 2022 ] Top1: 62.79% +[ Wed Sep 14 16:01:15 2022 ] Top5: 89.43% +[ Wed Sep 14 16:01:15 2022 ] Training epoch: 73 +[ Wed Sep 14 16:01:50 2022 ] Batch(43/123) done. Loss: 0.0081 lr:0.010000 network_time: 0.0291 +[ Wed Sep 14 16:02:48 2022 ] Eval epoch: 73 +[ Wed Sep 14 16:03:21 2022 ] Mean test loss of 258 batches: 1.9898936748504639. +[ Wed Sep 14 16:03:21 2022 ] Top1: 60.02% +[ Wed Sep 14 16:03:21 2022 ] Top5: 88.44% +[ Wed Sep 14 16:03:21 2022 ] Training epoch: 74 +[ Wed Sep 14 16:03:39 2022 ] Batch(20/123) done. Loss: 0.0023 lr:0.010000 network_time: 0.0536 +[ Wed Sep 14 16:04:52 2022 ] Batch(120/123) done. Loss: 0.0129 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 16:04:54 2022 ] Eval epoch: 74 +[ Wed Sep 14 16:05:26 2022 ] Mean test loss of 258 batches: 1.978050947189331. +[ Wed Sep 14 16:05:26 2022 ] Top1: 63.94% +[ Wed Sep 14 16:05:26 2022 ] Top5: 89.74% +[ Wed Sep 14 16:05:27 2022 ] Training epoch: 75 +[ Wed Sep 14 16:06:41 2022 ] Batch(97/123) done. Loss: 0.0034 lr:0.010000 network_time: 0.0278 +[ Wed Sep 14 16:06:59 2022 ] Eval epoch: 75 +[ Wed Sep 14 16:07:32 2022 ] Mean test loss of 258 batches: 1.8710790872573853. +[ Wed Sep 14 16:07:32 2022 ] Top1: 64.35% +[ Wed Sep 14 16:07:32 2022 ] Top5: 90.26% +[ Wed Sep 14 16:07:32 2022 ] Training epoch: 76 +[ Wed Sep 14 16:08:30 2022 ] Batch(74/123) done. Loss: 0.0054 lr:0.010000 network_time: 0.0272 +[ Wed Sep 14 16:09:05 2022 ] Eval epoch: 76 +[ Wed Sep 14 16:09:37 2022 ] Mean test loss of 258 batches: 1.8576815128326416. +[ Wed Sep 14 16:09:37 2022 ] Top1: 63.73% +[ Wed Sep 14 16:09:37 2022 ] Top5: 89.97% +[ Wed Sep 14 16:09:37 2022 ] Training epoch: 77 +[ Wed Sep 14 16:10:18 2022 ] Batch(51/123) done. Loss: 0.0042 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 16:11:09 2022 ] Eval epoch: 77 +[ Wed Sep 14 16:11:42 2022 ] Mean test loss of 258 batches: 2.003507375717163. +[ Wed Sep 14 16:11:42 2022 ] Top1: 59.48% +[ Wed Sep 14 16:11:43 2022 ] Top5: 88.16% +[ Wed Sep 14 16:11:43 2022 ] Training epoch: 78 +[ Wed Sep 14 16:12:07 2022 ] Batch(28/123) done. Loss: 0.0060 lr:0.010000 network_time: 0.0274 +[ Wed Sep 14 16:13:15 2022 ] Eval epoch: 78 +[ Wed Sep 14 16:13:48 2022 ] Mean test loss of 258 batches: 1.820636510848999. +[ Wed Sep 14 16:13:48 2022 ] Top1: 64.49% +[ Wed Sep 14 16:13:48 2022 ] Top5: 90.28% +[ Wed Sep 14 16:13:48 2022 ] Training epoch: 79 +[ Wed Sep 14 16:13:56 2022 ] Batch(5/123) done. Loss: 0.0029 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 16:15:08 2022 ] Batch(105/123) done. Loss: 0.0084 lr:0.010000 network_time: 0.0319 +[ Wed Sep 14 16:15:21 2022 ] Eval epoch: 79 +[ Wed Sep 14 16:15:54 2022 ] Mean test loss of 258 batches: 1.90288507938385. +[ Wed Sep 14 16:15:54 2022 ] Top1: 62.17% +[ Wed Sep 14 16:15:54 2022 ] Top5: 89.42% +[ Wed Sep 14 16:15:54 2022 ] Training epoch: 80 +[ Wed Sep 14 16:16:57 2022 ] Batch(82/123) done. Loss: 0.0056 lr:0.010000 network_time: 0.0267 +[ Wed Sep 14 16:17:26 2022 ] Eval epoch: 80 +[ Wed Sep 14 16:17:59 2022 ] Mean test loss of 258 batches: 1.7706035375595093. +[ Wed Sep 14 16:17:59 2022 ] Top1: 64.63% +[ Wed Sep 14 16:17:59 2022 ] Top5: 90.46% +[ Wed Sep 14 16:17:59 2022 ] Training epoch: 81 +[ Wed Sep 14 16:18:46 2022 ] Batch(59/123) done. Loss: 0.0040 lr:0.001000 network_time: 0.0296 +[ Wed Sep 14 16:19:32 2022 ] Eval epoch: 81 +[ Wed Sep 14 16:20:04 2022 ] Mean test loss of 258 batches: 1.8429886102676392. +[ Wed Sep 14 16:20:04 2022 ] Top1: 64.60% +[ Wed Sep 14 16:20:04 2022 ] Top5: 90.33% +[ Wed Sep 14 16:20:04 2022 ] Training epoch: 82 +[ Wed Sep 14 16:20:34 2022 ] Batch(36/123) done. Loss: 0.0052 lr:0.001000 network_time: 0.0295 +[ Wed Sep 14 16:21:37 2022 ] Eval epoch: 82 +[ Wed Sep 14 16:22:10 2022 ] Mean test loss of 258 batches: 1.933942198753357. +[ Wed Sep 14 16:22:10 2022 ] Top1: 63.77% +[ Wed Sep 14 16:22:10 2022 ] Top5: 89.74% +[ Wed Sep 14 16:22:10 2022 ] Training epoch: 83 +[ Wed Sep 14 16:22:23 2022 ] Batch(13/123) done. Loss: 0.0026 lr:0.001000 network_time: 0.0325 +[ Wed Sep 14 16:23:36 2022 ] Batch(113/123) done. Loss: 0.0026 lr:0.001000 network_time: 0.0279 +[ Wed Sep 14 16:23:43 2022 ] Eval epoch: 83 +[ Wed Sep 14 16:24:15 2022 ] Mean test loss of 258 batches: 1.7652544975280762. +[ Wed Sep 14 16:24:15 2022 ] Top1: 64.11% +[ Wed Sep 14 16:24:15 2022 ] Top5: 90.29% +[ Wed Sep 14 16:24:16 2022 ] Training epoch: 84 +[ Wed Sep 14 16:25:25 2022 ] Batch(90/123) done. Loss: 0.0038 lr:0.001000 network_time: 0.0314 +[ Wed Sep 14 16:25:48 2022 ] Eval epoch: 84 +[ Wed Sep 14 16:26:21 2022 ] Mean test loss of 258 batches: 1.8277084827423096. +[ Wed Sep 14 16:26:21 2022 ] Top1: 63.18% +[ Wed Sep 14 16:26:21 2022 ] Top5: 89.96% +[ Wed Sep 14 16:26:21 2022 ] Training epoch: 85 +[ Wed Sep 14 16:27:13 2022 ] Batch(67/123) done. Loss: 0.0028 lr:0.001000 network_time: 0.0271 +[ Wed Sep 14 16:27:54 2022 ] Eval epoch: 85 +[ Wed Sep 14 16:28:26 2022 ] Mean test loss of 258 batches: 1.7809817790985107. +[ Wed Sep 14 16:28:26 2022 ] Top1: 64.47% +[ Wed Sep 14 16:28:27 2022 ] Top5: 90.45% +[ Wed Sep 14 16:28:27 2022 ] Training epoch: 86 +[ Wed Sep 14 16:29:02 2022 ] Batch(44/123) done. Loss: 0.0091 lr:0.001000 network_time: 0.0278 +[ Wed Sep 14 16:29:59 2022 ] Eval epoch: 86 +[ Wed Sep 14 16:30:32 2022 ] Mean test loss of 258 batches: 1.8707036972045898. +[ Wed Sep 14 16:30:32 2022 ] Top1: 62.34% +[ Wed Sep 14 16:30:32 2022 ] Top5: 89.38% +[ Wed Sep 14 16:30:32 2022 ] Training epoch: 87 +[ Wed Sep 14 16:30:52 2022 ] Batch(21/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0260 +[ Wed Sep 14 16:32:04 2022 ] Batch(121/123) done. Loss: 0.0062 lr:0.001000 network_time: 0.0307 +[ Wed Sep 14 16:32:05 2022 ] Eval epoch: 87 +[ Wed Sep 14 16:32:38 2022 ] Mean test loss of 258 batches: 1.8078993558883667. +[ Wed Sep 14 16:32:38 2022 ] Top1: 64.64% +[ Wed Sep 14 16:32:38 2022 ] Top5: 90.34% +[ Wed Sep 14 16:32:38 2022 ] Training epoch: 88 +[ Wed Sep 14 16:33:53 2022 ] Batch(98/123) done. Loss: 0.0088 lr:0.001000 network_time: 0.0265 +[ Wed Sep 14 16:34:11 2022 ] Eval epoch: 88 +[ Wed Sep 14 16:34:44 2022 ] Mean test loss of 258 batches: 1.7731508016586304. +[ Wed Sep 14 16:34:44 2022 ] Top1: 64.29% +[ Wed Sep 14 16:34:44 2022 ] Top5: 90.33% +[ Wed Sep 14 16:34:44 2022 ] Training epoch: 89 +[ Wed Sep 14 16:35:42 2022 ] Batch(75/123) done. Loss: 0.0014 lr:0.001000 network_time: 0.0288 +[ Wed Sep 14 16:36:16 2022 ] Eval epoch: 89 +[ Wed Sep 14 16:36:49 2022 ] Mean test loss of 258 batches: 1.8627841472625732. +[ Wed Sep 14 16:36:49 2022 ] Top1: 62.02% +[ Wed Sep 14 16:36:49 2022 ] Top5: 89.46% +[ Wed Sep 14 16:36:49 2022 ] Training epoch: 90 +[ Wed Sep 14 16:37:31 2022 ] Batch(52/123) done. Loss: 0.0049 lr:0.001000 network_time: 0.0316 +[ Wed Sep 14 16:38:22 2022 ] Eval epoch: 90 +[ Wed Sep 14 16:38:55 2022 ] Mean test loss of 258 batches: 1.824079990386963. +[ Wed Sep 14 16:38:55 2022 ] Top1: 64.66% +[ Wed Sep 14 16:38:55 2022 ] Top5: 90.42% +[ Wed Sep 14 16:38:55 2022 ] Training epoch: 91 +[ Wed Sep 14 16:39:20 2022 ] Batch(29/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0309 +[ Wed Sep 14 16:40:28 2022 ] Eval epoch: 91 +[ Wed Sep 14 16:41:01 2022 ] Mean test loss of 258 batches: 1.771101474761963. +[ Wed Sep 14 16:41:01 2022 ] Top1: 64.53% +[ Wed Sep 14 16:41:01 2022 ] Top5: 90.39% +[ Wed Sep 14 16:41:01 2022 ] Training epoch: 92 +[ Wed Sep 14 16:41:09 2022 ] Batch(6/123) done. Loss: 0.0039 lr:0.001000 network_time: 0.0326 +[ Wed Sep 14 16:42:22 2022 ] Batch(106/123) done. Loss: 0.0112 lr:0.001000 network_time: 0.0315 +[ Wed Sep 14 16:42:33 2022 ] Eval epoch: 92 +[ Wed Sep 14 16:43:06 2022 ] Mean test loss of 258 batches: 1.9300997257232666. +[ Wed Sep 14 16:43:06 2022 ] Top1: 64.31% +[ Wed Sep 14 16:43:06 2022 ] Top5: 90.14% +[ Wed Sep 14 16:43:06 2022 ] Training epoch: 93 +[ Wed Sep 14 16:44:10 2022 ] Batch(83/123) done. Loss: 0.0053 lr:0.001000 network_time: 0.0368 +[ Wed Sep 14 16:44:39 2022 ] Eval epoch: 93 +[ Wed Sep 14 16:45:12 2022 ] Mean test loss of 258 batches: 1.7791281938552856. +[ Wed Sep 14 16:45:12 2022 ] Top1: 64.49% +[ Wed Sep 14 16:45:12 2022 ] Top5: 90.49% +[ Wed Sep 14 16:45:12 2022 ] Training epoch: 94 +[ Wed Sep 14 16:45:59 2022 ] Batch(60/123) done. Loss: 0.0029 lr:0.001000 network_time: 0.0300 +[ Wed Sep 14 16:46:45 2022 ] Eval epoch: 94 +[ Wed Sep 14 16:47:17 2022 ] Mean test loss of 258 batches: 1.7987879514694214. +[ Wed Sep 14 16:47:17 2022 ] Top1: 64.46% +[ Wed Sep 14 16:47:17 2022 ] Top5: 90.42% +[ Wed Sep 14 16:47:17 2022 ] Training epoch: 95 +[ Wed Sep 14 16:47:48 2022 ] Batch(37/123) done. Loss: 0.0069 lr:0.001000 network_time: 0.0321 +[ Wed Sep 14 16:48:50 2022 ] Eval epoch: 95 +[ Wed Sep 14 16:49:22 2022 ] Mean test loss of 258 batches: 1.9317775964736938. +[ Wed Sep 14 16:49:22 2022 ] Top1: 64.55% +[ Wed Sep 14 16:49:23 2022 ] Top5: 90.23% +[ Wed Sep 14 16:49:23 2022 ] Training epoch: 96 +[ Wed Sep 14 16:49:37 2022 ] Batch(14/123) done. Loss: 0.0553 lr:0.001000 network_time: 0.0329 +[ Wed Sep 14 16:50:49 2022 ] Batch(114/123) done. Loss: 0.0015 lr:0.001000 network_time: 0.0284 +[ Wed Sep 14 16:50:55 2022 ] Eval epoch: 96 +[ Wed Sep 14 16:51:28 2022 ] Mean test loss of 258 batches: 1.7748775482177734. +[ Wed Sep 14 16:51:28 2022 ] Top1: 63.97% +[ Wed Sep 14 16:51:28 2022 ] Top5: 90.09% +[ Wed Sep 14 16:51:28 2022 ] Training epoch: 97 +[ Wed Sep 14 16:52:38 2022 ] Batch(91/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0269 +[ Wed Sep 14 16:53:01 2022 ] Eval epoch: 97 +[ Wed Sep 14 16:53:34 2022 ] Mean test loss of 258 batches: 2.001574754714966. +[ Wed Sep 14 16:53:34 2022 ] Top1: 59.91% +[ Wed Sep 14 16:53:34 2022 ] Top5: 88.32% +[ Wed Sep 14 16:53:34 2022 ] Training epoch: 98 +[ Wed Sep 14 16:54:27 2022 ] Batch(68/123) done. Loss: 0.0029 lr:0.001000 network_time: 0.0311 +[ Wed Sep 14 16:55:07 2022 ] Eval epoch: 98 +[ Wed Sep 14 16:55:40 2022 ] Mean test loss of 258 batches: 1.9462146759033203. +[ Wed Sep 14 16:55:40 2022 ] Top1: 62.26% +[ Wed Sep 14 16:55:40 2022 ] Top5: 89.31% +[ Wed Sep 14 16:55:40 2022 ] Training epoch: 99 +[ Wed Sep 14 16:56:16 2022 ] Batch(45/123) done. Loss: 0.0050 lr:0.001000 network_time: 0.0275 +[ Wed Sep 14 16:57:12 2022 ] Eval epoch: 99 +[ Wed Sep 14 16:57:45 2022 ] Mean test loss of 258 batches: 1.8766233921051025. +[ Wed Sep 14 16:57:45 2022 ] Top1: 64.34% +[ Wed Sep 14 16:57:45 2022 ] Top5: 90.11% +[ Wed Sep 14 16:57:45 2022 ] Training epoch: 100 +[ Wed Sep 14 16:58:05 2022 ] Batch(22/123) done. Loss: 0.0029 lr:0.001000 network_time: 0.0298 +[ Wed Sep 14 16:59:17 2022 ] Batch(122/123) done. Loss: 0.0024 lr:0.001000 network_time: 0.0273 +[ Wed Sep 14 16:59:18 2022 ] Eval epoch: 100 +[ Wed Sep 14 16:59:50 2022 ] Mean test loss of 258 batches: 2.055025339126587. +[ Wed Sep 14 16:59:50 2022 ] Top1: 63.57% +[ Wed Sep 14 16:59:50 2022 ] Top5: 89.82% +[ Wed Sep 14 16:59:51 2022 ] Training epoch: 101 +[ Wed Sep 14 17:01:06 2022 ] Batch(99/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 17:01:23 2022 ] Eval epoch: 101 +[ Wed Sep 14 17:01:56 2022 ] Mean test loss of 258 batches: 2.1624438762664795. +[ Wed Sep 14 17:01:56 2022 ] Top1: 63.18% +[ Wed Sep 14 17:01:56 2022 ] Top5: 89.18% +[ Wed Sep 14 17:01:56 2022 ] Training epoch: 102 +[ Wed Sep 14 17:02:55 2022 ] Batch(76/123) done. Loss: 0.0037 lr:0.000100 network_time: 0.0286 +[ Wed Sep 14 17:03:29 2022 ] Eval epoch: 102 +[ Wed Sep 14 17:04:01 2022 ] Mean test loss of 258 batches: 1.8533306121826172. +[ Wed Sep 14 17:04:01 2022 ] Top1: 62.90% +[ Wed Sep 14 17:04:02 2022 ] Top5: 89.73% +[ Wed Sep 14 17:04:02 2022 ] Training epoch: 103 +[ Wed Sep 14 17:04:44 2022 ] Batch(53/123) done. Loss: 0.0062 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 17:05:34 2022 ] Eval epoch: 103 +[ Wed Sep 14 17:06:07 2022 ] Mean test loss of 258 batches: 1.783291220664978. +[ Wed Sep 14 17:06:07 2022 ] Top1: 64.95% +[ Wed Sep 14 17:06:07 2022 ] Top5: 90.61% +[ Wed Sep 14 17:06:07 2022 ] Training epoch: 104 +[ Wed Sep 14 17:06:33 2022 ] Batch(30/123) done. Loss: 0.0126 lr:0.000100 network_time: 0.0259 +[ Wed Sep 14 17:07:40 2022 ] Eval epoch: 104 +[ Wed Sep 14 17:08:12 2022 ] Mean test loss of 258 batches: 2.0738165378570557. +[ Wed Sep 14 17:08:12 2022 ] Top1: 63.87% +[ Wed Sep 14 17:08:12 2022 ] Top5: 89.65% +[ Wed Sep 14 17:08:12 2022 ] Training epoch: 105 +[ Wed Sep 14 17:08:21 2022 ] Batch(7/123) done. Loss: 0.0053 lr:0.000100 network_time: 0.0263 +[ Wed Sep 14 17:09:33 2022 ] Batch(107/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 17:09:45 2022 ] Eval epoch: 105 +[ Wed Sep 14 17:10:18 2022 ] Mean test loss of 258 batches: 1.8350716829299927. +[ Wed Sep 14 17:10:18 2022 ] Top1: 63.67% +[ Wed Sep 14 17:10:18 2022 ] Top5: 90.12% +[ Wed Sep 14 17:10:18 2022 ] Training epoch: 106 +[ Wed Sep 14 17:11:22 2022 ] Batch(84/123) done. Loss: 0.0068 lr:0.000100 network_time: 0.0269 +[ Wed Sep 14 17:11:50 2022 ] Eval epoch: 106 +[ Wed Sep 14 17:12:23 2022 ] Mean test loss of 258 batches: 1.885536551475525. +[ Wed Sep 14 17:12:23 2022 ] Top1: 63.25% +[ Wed Sep 14 17:12:23 2022 ] Top5: 89.85% +[ Wed Sep 14 17:12:23 2022 ] Training epoch: 107 +[ Wed Sep 14 17:13:11 2022 ] Batch(61/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0272 +[ Wed Sep 14 17:13:56 2022 ] Eval epoch: 107 +[ Wed Sep 14 17:14:28 2022 ] Mean test loss of 258 batches: 1.8537285327911377. +[ Wed Sep 14 17:14:28 2022 ] Top1: 64.15% +[ Wed Sep 14 17:14:28 2022 ] Top5: 90.17% +[ Wed Sep 14 17:14:28 2022 ] Training epoch: 108 +[ Wed Sep 14 17:15:00 2022 ] Batch(38/123) done. Loss: 0.0113 lr:0.000100 network_time: 0.0306 +[ Wed Sep 14 17:16:01 2022 ] Eval epoch: 108 +[ Wed Sep 14 17:16:33 2022 ] Mean test loss of 258 batches: 1.797559380531311. +[ Wed Sep 14 17:16:34 2022 ] Top1: 64.61% +[ Wed Sep 14 17:16:34 2022 ] Top5: 90.61% +[ Wed Sep 14 17:16:34 2022 ] Training epoch: 109 +[ Wed Sep 14 17:16:48 2022 ] Batch(15/123) done. Loss: 0.0047 lr:0.000100 network_time: 0.0307 +[ Wed Sep 14 17:18:01 2022 ] Batch(115/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 17:18:06 2022 ] Eval epoch: 109 +[ Wed Sep 14 17:18:39 2022 ] Mean test loss of 258 batches: 1.8364958763122559. +[ Wed Sep 14 17:18:39 2022 ] Top1: 64.34% +[ Wed Sep 14 17:18:39 2022 ] Top5: 90.31% +[ Wed Sep 14 17:18:39 2022 ] Training epoch: 110 +[ Wed Sep 14 17:19:50 2022 ] Batch(92/123) done. Loss: 0.0052 lr:0.000100 network_time: 0.0274 +[ Wed Sep 14 17:20:12 2022 ] Eval epoch: 110 +[ Wed Sep 14 17:20:45 2022 ] Mean test loss of 258 batches: 1.7939398288726807. +[ Wed Sep 14 17:20:45 2022 ] Top1: 64.52% +[ Wed Sep 14 17:20:45 2022 ] Top5: 90.28% +[ Wed Sep 14 17:20:45 2022 ] Training epoch: 111 +[ Wed Sep 14 17:21:39 2022 ] Batch(69/123) done. Loss: 0.0013 lr:0.000100 network_time: 0.0265 +[ Wed Sep 14 17:22:17 2022 ] Eval epoch: 111 +[ Wed Sep 14 17:22:50 2022 ] Mean test loss of 258 batches: 1.9241000413894653. +[ Wed Sep 14 17:22:50 2022 ] Top1: 64.59% +[ Wed Sep 14 17:22:50 2022 ] Top5: 90.22% +[ Wed Sep 14 17:22:50 2022 ] Training epoch: 112 +[ Wed Sep 14 17:23:27 2022 ] Batch(46/123) done. Loss: 0.0044 lr:0.000100 network_time: 0.0266 +[ Wed Sep 14 17:24:23 2022 ] Eval epoch: 112 +[ Wed Sep 14 17:24:55 2022 ] Mean test loss of 258 batches: 1.7735662460327148. +[ Wed Sep 14 17:24:55 2022 ] Top1: 64.43% +[ Wed Sep 14 17:24:55 2022 ] Top5: 90.43% +[ Wed Sep 14 17:24:55 2022 ] Training epoch: 113 +[ Wed Sep 14 17:25:16 2022 ] Batch(23/123) done. Loss: 0.0043 lr:0.000100 network_time: 0.0265 +[ Wed Sep 14 17:26:28 2022 ] Eval epoch: 113 +[ Wed Sep 14 17:27:01 2022 ] Mean test loss of 258 batches: 1.8969343900680542. +[ Wed Sep 14 17:27:01 2022 ] Top1: 64.16% +[ Wed Sep 14 17:27:01 2022 ] Top5: 90.10% +[ Wed Sep 14 17:27:01 2022 ] Training epoch: 114 +[ Wed Sep 14 17:27:05 2022 ] Batch(0/123) done. Loss: 0.0043 lr:0.000100 network_time: 0.0598 +[ Wed Sep 14 17:28:17 2022 ] Batch(100/123) done. Loss: 0.0062 lr:0.000100 network_time: 0.0330 +[ Wed Sep 14 17:28:34 2022 ] Eval epoch: 114 +[ Wed Sep 14 17:29:07 2022 ] Mean test loss of 258 batches: 1.8805485963821411. +[ Wed Sep 14 17:29:07 2022 ] Top1: 63.18% +[ Wed Sep 14 17:29:07 2022 ] Top5: 89.80% +[ Wed Sep 14 17:29:07 2022 ] Training epoch: 115 +[ Wed Sep 14 17:30:07 2022 ] Batch(77/123) done. Loss: 0.0041 lr:0.000100 network_time: 0.0271 +[ Wed Sep 14 17:30:39 2022 ] Eval epoch: 115 +[ Wed Sep 14 17:31:12 2022 ] Mean test loss of 258 batches: 1.844014286994934. +[ Wed Sep 14 17:31:12 2022 ] Top1: 64.09% +[ Wed Sep 14 17:31:12 2022 ] Top5: 90.31% +[ Wed Sep 14 17:31:12 2022 ] Training epoch: 116 +[ Wed Sep 14 17:31:55 2022 ] Batch(54/123) done. Loss: 0.0065 lr:0.000100 network_time: 0.0270 +[ Wed Sep 14 17:32:45 2022 ] Eval epoch: 116 +[ Wed Sep 14 17:33:18 2022 ] Mean test loss of 258 batches: 1.9180779457092285. +[ Wed Sep 14 17:33:18 2022 ] Top1: 63.95% +[ Wed Sep 14 17:33:18 2022 ] Top5: 90.06% +[ Wed Sep 14 17:33:18 2022 ] Training epoch: 117 +[ Wed Sep 14 17:33:44 2022 ] Batch(31/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0264 +[ Wed Sep 14 17:34:51 2022 ] Eval epoch: 117 +[ Wed Sep 14 17:35:23 2022 ] Mean test loss of 258 batches: 1.81834077835083. +[ Wed Sep 14 17:35:23 2022 ] Top1: 64.39% +[ Wed Sep 14 17:35:23 2022 ] Top5: 90.51% +[ Wed Sep 14 17:35:23 2022 ] Training epoch: 118 +[ Wed Sep 14 17:35:33 2022 ] Batch(8/123) done. Loss: 0.0031 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 17:36:46 2022 ] Batch(108/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0264 +[ Wed Sep 14 17:36:56 2022 ] Eval epoch: 118 +[ Wed Sep 14 17:37:29 2022 ] Mean test loss of 258 batches: 1.917968511581421. +[ Wed Sep 14 17:37:29 2022 ] Top1: 63.95% +[ Wed Sep 14 17:37:29 2022 ] Top5: 90.01% +[ Wed Sep 14 17:37:29 2022 ] Training epoch: 119 +[ Wed Sep 14 17:38:35 2022 ] Batch(85/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0285 +[ Wed Sep 14 17:39:02 2022 ] Eval epoch: 119 +[ Wed Sep 14 17:39:35 2022 ] Mean test loss of 258 batches: 1.7381958961486816. +[ Wed Sep 14 17:39:35 2022 ] Top1: 64.74% +[ Wed Sep 14 17:39:35 2022 ] Top5: 90.64% +[ Wed Sep 14 17:39:35 2022 ] Training epoch: 120 +[ Wed Sep 14 17:40:24 2022 ] Batch(62/123) done. Loss: 0.0068 lr:0.000100 network_time: 0.0260 +[ Wed Sep 14 17:41:08 2022 ] Eval epoch: 120 +[ Wed Sep 14 17:41:41 2022 ] Mean test loss of 258 batches: 1.9656460285186768. +[ Wed Sep 14 17:41:41 2022 ] Top1: 63.85% +[ Wed Sep 14 17:41:41 2022 ] Top5: 89.90% +[ Wed Sep 14 17:41:41 2022 ] Training epoch: 121 +[ Wed Sep 14 17:42:13 2022 ] Batch(39/123) done. Loss: 0.0029 lr:0.000100 network_time: 0.0317 +[ Wed Sep 14 17:43:13 2022 ] Eval epoch: 121 +[ Wed Sep 14 17:43:46 2022 ] Mean test loss of 258 batches: 1.8274791240692139. +[ Wed Sep 14 17:43:46 2022 ] Top1: 64.00% +[ Wed Sep 14 17:43:46 2022 ] Top5: 90.22% +[ Wed Sep 14 17:43:46 2022 ] Training epoch: 122 +[ Wed Sep 14 17:44:02 2022 ] Batch(16/123) done. Loss: 0.0178 lr:0.000100 network_time: 0.0337 +[ Wed Sep 14 17:45:15 2022 ] Batch(116/123) done. Loss: 0.0044 lr:0.000100 network_time: 0.0264 +[ Wed Sep 14 17:45:19 2022 ] Eval epoch: 122 +[ Wed Sep 14 17:45:52 2022 ] Mean test loss of 258 batches: 1.8402843475341797. +[ Wed Sep 14 17:45:52 2022 ] Top1: 63.36% +[ Wed Sep 14 17:45:53 2022 ] Top5: 90.08% +[ Wed Sep 14 17:45:53 2022 ] Training epoch: 123 +[ Wed Sep 14 17:47:04 2022 ] Batch(93/123) done. Loss: 0.0018 lr:0.000100 network_time: 0.0277 +[ Wed Sep 14 17:47:25 2022 ] Eval epoch: 123 +[ Wed Sep 14 17:47:58 2022 ] Mean test loss of 258 batches: 1.957480549812317. +[ Wed Sep 14 17:47:58 2022 ] Top1: 64.07% +[ Wed Sep 14 17:47:59 2022 ] Top5: 90.03% +[ Wed Sep 14 17:47:59 2022 ] Training epoch: 124 +[ Wed Sep 14 17:48:53 2022 ] Batch(70/123) done. Loss: 0.0031 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 17:49:31 2022 ] Eval epoch: 124 +[ Wed Sep 14 17:50:04 2022 ] Mean test loss of 258 batches: 1.8941755294799805. +[ Wed Sep 14 17:50:04 2022 ] Top1: 62.70% +[ Wed Sep 14 17:50:04 2022 ] Top5: 89.49% +[ Wed Sep 14 17:50:05 2022 ] Training epoch: 125 +[ Wed Sep 14 17:50:43 2022 ] Batch(47/123) done. Loss: 0.0070 lr:0.000100 network_time: 0.0277 +[ Wed Sep 14 17:51:37 2022 ] Eval epoch: 125 +[ Wed Sep 14 17:52:10 2022 ] Mean test loss of 258 batches: 1.9753775596618652. +[ Wed Sep 14 17:52:10 2022 ] Top1: 64.40% +[ Wed Sep 14 17:52:10 2022 ] Top5: 90.04% +[ Wed Sep 14 17:52:10 2022 ] Training epoch: 126 +[ Wed Sep 14 17:52:31 2022 ] Batch(24/123) done. Loss: 0.0036 lr:0.000100 network_time: 0.0311 +[ Wed Sep 14 17:53:43 2022 ] Eval epoch: 126 +[ Wed Sep 14 17:54:16 2022 ] Mean test loss of 258 batches: 1.9079700708389282. +[ Wed Sep 14 17:54:16 2022 ] Top1: 63.95% +[ Wed Sep 14 17:54:16 2022 ] Top5: 89.97% +[ Wed Sep 14 17:54:16 2022 ] Training epoch: 127 +[ Wed Sep 14 17:54:20 2022 ] Batch(1/123) done. Loss: 0.0100 lr:0.000100 network_time: 0.0312 +[ Wed Sep 14 17:55:33 2022 ] Batch(101/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0288 +[ Wed Sep 14 17:55:48 2022 ] Eval epoch: 127 +[ Wed Sep 14 17:56:21 2022 ] Mean test loss of 258 batches: 1.9740208387374878. +[ Wed Sep 14 17:56:21 2022 ] Top1: 63.95% +[ Wed Sep 14 17:56:22 2022 ] Top5: 89.88% +[ Wed Sep 14 17:56:22 2022 ] Training epoch: 128 +[ Wed Sep 14 17:57:22 2022 ] Batch(78/123) done. Loss: 0.0012 lr:0.000100 network_time: 0.0290 +[ Wed Sep 14 17:57:55 2022 ] Eval epoch: 128 +[ Wed Sep 14 17:58:28 2022 ] Mean test loss of 258 batches: 1.942625641822815. +[ Wed Sep 14 17:58:28 2022 ] Top1: 63.92% +[ Wed Sep 14 17:58:28 2022 ] Top5: 90.06% +[ Wed Sep 14 17:58:28 2022 ] Training epoch: 129 +[ Wed Sep 14 17:59:12 2022 ] Batch(55/123) done. Loss: 0.0108 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 18:00:00 2022 ] Eval epoch: 129 +[ Wed Sep 14 18:00:33 2022 ] Mean test loss of 258 batches: 1.9056057929992676. +[ Wed Sep 14 18:00:33 2022 ] Top1: 61.71% +[ Wed Sep 14 18:00:33 2022 ] Top5: 89.24% +[ Wed Sep 14 18:00:34 2022 ] Training epoch: 130 +[ Wed Sep 14 18:01:01 2022 ] Batch(32/123) done. Loss: 0.0126 lr:0.000100 network_time: 0.0275 +[ Wed Sep 14 18:02:06 2022 ] Eval epoch: 130 +[ Wed Sep 14 18:02:39 2022 ] Mean test loss of 258 batches: 1.9844281673431396. +[ Wed Sep 14 18:02:39 2022 ] Top1: 64.03% +[ Wed Sep 14 18:02:39 2022 ] Top5: 89.79% +[ Wed Sep 14 18:02:39 2022 ] Training epoch: 131 +[ Wed Sep 14 18:02:49 2022 ] Batch(9/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0293 +[ Wed Sep 14 18:04:02 2022 ] Batch(109/123) done. Loss: 0.0041 lr:0.000100 network_time: 0.0286 +[ Wed Sep 14 18:04:11 2022 ] Eval epoch: 131 +[ Wed Sep 14 18:04:44 2022 ] Mean test loss of 258 batches: 2.0076076984405518. +[ Wed Sep 14 18:04:44 2022 ] Top1: 64.24% +[ Wed Sep 14 18:04:44 2022 ] Top5: 89.90% +[ Wed Sep 14 18:04:44 2022 ] Training epoch: 132 +[ Wed Sep 14 18:05:50 2022 ] Batch(86/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0298 +[ Wed Sep 14 18:06:17 2022 ] Eval epoch: 132 +[ Wed Sep 14 18:06:49 2022 ] Mean test loss of 258 batches: 1.7660404443740845. +[ Wed Sep 14 18:06:49 2022 ] Top1: 64.48% +[ Wed Sep 14 18:06:49 2022 ] Top5: 90.42% +[ Wed Sep 14 18:06:50 2022 ] Training epoch: 133 +[ Wed Sep 14 18:07:39 2022 ] Batch(63/123) done. Loss: 0.0091 lr:0.000100 network_time: 0.0277 +[ Wed Sep 14 18:08:22 2022 ] Eval epoch: 133 +[ Wed Sep 14 18:08:55 2022 ] Mean test loss of 258 batches: 1.7811264991760254. +[ Wed Sep 14 18:08:55 2022 ] Top1: 64.15% +[ Wed Sep 14 18:08:55 2022 ] Top5: 90.29% +[ Wed Sep 14 18:08:55 2022 ] Training epoch: 134 +[ Wed Sep 14 18:09:28 2022 ] Batch(40/123) done. Loss: 0.0097 lr:0.000100 network_time: 0.0268 +[ Wed Sep 14 18:10:28 2022 ] Eval epoch: 134 +[ Wed Sep 14 18:11:00 2022 ] Mean test loss of 258 batches: 1.7638517618179321. +[ Wed Sep 14 18:11:00 2022 ] Top1: 64.26% +[ Wed Sep 14 18:11:01 2022 ] Top5: 90.34% +[ Wed Sep 14 18:11:01 2022 ] Training epoch: 135 +[ Wed Sep 14 18:11:17 2022 ] Batch(17/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 18:12:29 2022 ] Batch(117/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0329 +[ Wed Sep 14 18:12:33 2022 ] Eval epoch: 135 +[ Wed Sep 14 18:13:06 2022 ] Mean test loss of 258 batches: 1.834794044494629. +[ Wed Sep 14 18:13:06 2022 ] Top1: 62.77% +[ Wed Sep 14 18:13:06 2022 ] Top5: 89.71% +[ Wed Sep 14 18:13:06 2022 ] Training epoch: 136 +[ Wed Sep 14 18:14:18 2022 ] Batch(94/123) done. Loss: 0.0225 lr:0.000100 network_time: 0.0307 +[ Wed Sep 14 18:14:39 2022 ] Eval epoch: 136 +[ Wed Sep 14 18:15:11 2022 ] Mean test loss of 258 batches: 1.848984718322754. +[ Wed Sep 14 18:15:11 2022 ] Top1: 64.85% +[ Wed Sep 14 18:15:11 2022 ] Top5: 90.39% +[ Wed Sep 14 18:15:11 2022 ] Training epoch: 137 +[ Wed Sep 14 18:16:07 2022 ] Batch(71/123) done. Loss: 0.0038 lr:0.000100 network_time: 0.0291 +[ Wed Sep 14 18:16:44 2022 ] Eval epoch: 137 +[ Wed Sep 14 18:17:16 2022 ] Mean test loss of 258 batches: 1.887627363204956. +[ Wed Sep 14 18:17:17 2022 ] Top1: 64.55% +[ Wed Sep 14 18:17:17 2022 ] Top5: 90.42% +[ Wed Sep 14 18:17:17 2022 ] Training epoch: 138 +[ Wed Sep 14 18:17:55 2022 ] Batch(48/123) done. Loss: 0.0091 lr:0.000100 network_time: 0.0314 +[ Wed Sep 14 18:18:49 2022 ] Eval epoch: 138 +[ Wed Sep 14 18:19:23 2022 ] Mean test loss of 258 batches: 1.8002877235412598. +[ Wed Sep 14 18:19:23 2022 ] Top1: 64.74% +[ Wed Sep 14 18:19:23 2022 ] Top5: 90.39% +[ Wed Sep 14 18:19:23 2022 ] Training epoch: 139 +[ Wed Sep 14 18:19:45 2022 ] Batch(25/123) done. Loss: 0.0043 lr:0.000100 network_time: 0.0300 +[ Wed Sep 14 18:20:55 2022 ] Eval epoch: 139 +[ Wed Sep 14 18:21:28 2022 ] Mean test loss of 258 batches: 1.872145652770996. +[ Wed Sep 14 18:21:28 2022 ] Top1: 64.27% +[ Wed Sep 14 18:21:28 2022 ] Top5: 90.06% +[ Wed Sep 14 18:21:28 2022 ] Training epoch: 140 +[ Wed Sep 14 18:21:34 2022 ] Batch(2/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0340 +[ Wed Sep 14 18:22:46 2022 ] Batch(102/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0267 +[ Wed Sep 14 18:23:01 2022 ] Eval epoch: 140 +[ Wed Sep 14 18:23:34 2022 ] Mean test loss of 258 batches: 1.8609108924865723. +[ Wed Sep 14 18:23:34 2022 ] Top1: 64.27% +[ Wed Sep 14 18:23:34 2022 ] Top5: 90.23% diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_motion_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..846b3439448e9a5e910112abcb2d8ce627d87c45 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_joint_xsub +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-subject/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_joint_xsub +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_joint_xsub diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d6b038647984f8b282a8248907fed4fe0347a1cb --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08a6edb8ac93121efedccfaa08fe4ede9e0e78cb68578f758c768a9e17efa792 +size 4979902 diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/log.txt b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..1430eb7e7b4627bc9c31a6d68a34572f2f4698f3 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/log.txt @@ -0,0 +1,893 @@ +[ Wed Sep 14 13:20:57 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_xsub', 'Experiment_name': 'ntu_ShiftGCN_joint_xsub', 'config': './config/nturgbd-cross-subject/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:20:57 2022 ] Training epoch: 1 +[ Wed Sep 14 13:21:38 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_xsub', 'Experiment_name': 'ntu_ShiftGCN_joint_xsub', 'config': './config/nturgbd-cross-subject/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:21:38 2022 ] Training epoch: 1 +[ Wed Sep 14 13:26:13 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_xsub', 'Experiment_name': 'ntu_ShiftGCN_joint_xsub', 'config': './config/nturgbd-cross-subject/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:26:13 2022 ] Training epoch: 1 +[ Wed Sep 14 13:27:32 2022 ] Batch(99/123) done. Loss: 2.1354 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 13:27:49 2022 ] Eval epoch: 1 +[ Wed Sep 14 13:28:22 2022 ] Mean test loss of 258 batches: 4.33281946182251. +[ Wed Sep 14 13:28:22 2022 ] Top1: 13.93% +[ Wed Sep 14 13:28:22 2022 ] Top5: 38.90% +[ Wed Sep 14 13:28:22 2022 ] Training epoch: 2 +[ Wed Sep 14 13:30:08 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_xsub', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_xsub', 'Experiment_name': 'ntu_ShiftGCN_joint_xsub', 'config': './config/nturgbd-cross-subject/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xsub/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Wed Sep 14 13:30:08 2022 ] Training epoch: 1 +[ Wed Sep 14 13:31:26 2022 ] Batch(99/123) done. Loss: 2.1354 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 13:31:43 2022 ] Eval epoch: 1 +[ Wed Sep 14 13:32:15 2022 ] Mean test loss of 258 batches: 4.33281946182251. +[ Wed Sep 14 13:32:15 2022 ] Top1: 13.93% +[ Wed Sep 14 13:32:15 2022 ] Top5: 38.90% +[ Wed Sep 14 13:32:15 2022 ] Training epoch: 2 +[ Wed Sep 14 13:33:14 2022 ] Batch(76/123) done. Loss: 2.2529 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 13:33:48 2022 ] Eval epoch: 2 +[ Wed Sep 14 13:34:20 2022 ] Mean test loss of 258 batches: 3.571162700653076. +[ Wed Sep 14 13:34:20 2022 ] Top1: 19.44% +[ Wed Sep 14 13:34:21 2022 ] Top5: 50.74% +[ Wed Sep 14 13:34:21 2022 ] Training epoch: 3 +[ Wed Sep 14 13:35:03 2022 ] Batch(53/123) done. Loss: 1.8599 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 13:35:54 2022 ] Eval epoch: 3 +[ Wed Sep 14 13:36:26 2022 ] Mean test loss of 258 batches: 3.5653231143951416. +[ Wed Sep 14 13:36:26 2022 ] Top1: 23.73% +[ Wed Sep 14 13:36:26 2022 ] Top5: 58.72% +[ Wed Sep 14 13:36:26 2022 ] Training epoch: 4 +[ Wed Sep 14 13:36:52 2022 ] Batch(30/123) done. Loss: 1.6613 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 13:37:59 2022 ] Eval epoch: 4 +[ Wed Sep 14 13:38:31 2022 ] Mean test loss of 258 batches: 2.9171056747436523. +[ Wed Sep 14 13:38:31 2022 ] Top1: 29.33% +[ Wed Sep 14 13:38:31 2022 ] Top5: 64.11% +[ Wed Sep 14 13:38:31 2022 ] Training epoch: 5 +[ Wed Sep 14 13:38:40 2022 ] Batch(7/123) done. Loss: 1.1064 lr:0.100000 network_time: 0.0258 +[ Wed Sep 14 13:39:53 2022 ] Batch(107/123) done. Loss: 1.2393 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 13:40:04 2022 ] Eval epoch: 5 +[ Wed Sep 14 13:40:36 2022 ] Mean test loss of 258 batches: 2.6525120735168457. +[ Wed Sep 14 13:40:36 2022 ] Top1: 33.77% +[ Wed Sep 14 13:40:36 2022 ] Top5: 70.72% +[ Wed Sep 14 13:40:37 2022 ] Training epoch: 6 +[ Wed Sep 14 13:41:41 2022 ] Batch(84/123) done. Loss: 1.2044 lr:0.100000 network_time: 0.0322 +[ Wed Sep 14 13:42:09 2022 ] Eval epoch: 6 +[ Wed Sep 14 13:42:42 2022 ] Mean test loss of 258 batches: 2.4061315059661865. +[ Wed Sep 14 13:42:42 2022 ] Top1: 35.90% +[ Wed Sep 14 13:42:42 2022 ] Top5: 73.77% +[ Wed Sep 14 13:42:42 2022 ] Training epoch: 7 +[ Wed Sep 14 13:43:30 2022 ] Batch(61/123) done. Loss: 0.9143 lr:0.100000 network_time: 0.0260 +[ Wed Sep 14 13:44:15 2022 ] Eval epoch: 7 +[ Wed Sep 14 13:44:47 2022 ] Mean test loss of 258 batches: 2.4678521156311035. +[ Wed Sep 14 13:44:48 2022 ] Top1: 38.75% +[ Wed Sep 14 13:44:48 2022 ] Top5: 73.76% +[ Wed Sep 14 13:44:48 2022 ] Training epoch: 8 +[ Wed Sep 14 13:45:19 2022 ] Batch(38/123) done. Loss: 1.0245 lr:0.100000 network_time: 0.0257 +[ Wed Sep 14 13:46:20 2022 ] Eval epoch: 8 +[ Wed Sep 14 13:46:53 2022 ] Mean test loss of 258 batches: 2.290700912475586. +[ Wed Sep 14 13:46:53 2022 ] Top1: 39.76% +[ Wed Sep 14 13:46:53 2022 ] Top5: 77.05% +[ Wed Sep 14 13:46:53 2022 ] Training epoch: 9 +[ Wed Sep 14 13:47:08 2022 ] Batch(15/123) done. Loss: 1.0405 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 13:48:21 2022 ] Batch(115/123) done. Loss: 1.0858 lr:0.100000 network_time: 0.0317 +[ Wed Sep 14 13:48:26 2022 ] Eval epoch: 9 +[ Wed Sep 14 13:48:59 2022 ] Mean test loss of 258 batches: 2.499346971511841. +[ Wed Sep 14 13:48:59 2022 ] Top1: 40.40% +[ Wed Sep 14 13:48:59 2022 ] Top5: 74.43% +[ Wed Sep 14 13:48:59 2022 ] Training epoch: 10 +[ Wed Sep 14 13:50:10 2022 ] Batch(92/123) done. Loss: 1.0761 lr:0.100000 network_time: 0.0295 +[ Wed Sep 14 13:50:32 2022 ] Eval epoch: 10 +[ Wed Sep 14 13:51:05 2022 ] Mean test loss of 258 batches: 2.265429973602295. +[ Wed Sep 14 13:51:05 2022 ] Top1: 41.90% +[ Wed Sep 14 13:51:05 2022 ] Top5: 76.64% +[ Wed Sep 14 13:51:05 2022 ] Training epoch: 11 +[ Wed Sep 14 13:51:59 2022 ] Batch(69/123) done. Loss: 0.6436 lr:0.100000 network_time: 0.0291 +[ Wed Sep 14 13:52:38 2022 ] Eval epoch: 11 +[ Wed Sep 14 13:53:11 2022 ] Mean test loss of 258 batches: 2.014800786972046. +[ Wed Sep 14 13:53:11 2022 ] Top1: 47.50% +[ Wed Sep 14 13:53:11 2022 ] Top5: 81.51% +[ Wed Sep 14 13:53:11 2022 ] Training epoch: 12 +[ Wed Sep 14 13:53:49 2022 ] Batch(46/123) done. Loss: 0.6086 lr:0.100000 network_time: 0.0289 +[ Wed Sep 14 13:54:44 2022 ] Eval epoch: 12 +[ Wed Sep 14 13:55:17 2022 ] Mean test loss of 258 batches: 2.291834831237793. +[ Wed Sep 14 13:55:17 2022 ] Top1: 46.36% +[ Wed Sep 14 13:55:17 2022 ] Top5: 81.22% +[ Wed Sep 14 13:55:17 2022 ] Training epoch: 13 +[ Wed Sep 14 13:55:38 2022 ] Batch(23/123) done. Loss: 0.8663 lr:0.100000 network_time: 0.0259 +[ Wed Sep 14 13:56:50 2022 ] Eval epoch: 13 +[ Wed Sep 14 13:57:22 2022 ] Mean test loss of 258 batches: 2.2155113220214844. +[ Wed Sep 14 13:57:22 2022 ] Top1: 46.39% +[ Wed Sep 14 13:57:23 2022 ] Top5: 80.01% +[ Wed Sep 14 13:57:23 2022 ] Training epoch: 14 +[ Wed Sep 14 13:57:26 2022 ] Batch(0/123) done. Loss: 0.6118 lr:0.100000 network_time: 0.0475 +[ Wed Sep 14 13:58:39 2022 ] Batch(100/123) done. Loss: 0.4732 lr:0.100000 network_time: 0.0320 +[ Wed Sep 14 13:58:56 2022 ] Eval epoch: 14 +[ Wed Sep 14 13:59:28 2022 ] Mean test loss of 258 batches: 2.701786518096924. +[ Wed Sep 14 13:59:28 2022 ] Top1: 38.97% +[ Wed Sep 14 13:59:28 2022 ] Top5: 75.62% +[ Wed Sep 14 13:59:28 2022 ] Training epoch: 15 +[ Wed Sep 14 14:00:28 2022 ] Batch(77/123) done. Loss: 0.6637 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 14:01:01 2022 ] Eval epoch: 15 +[ Wed Sep 14 14:01:33 2022 ] Mean test loss of 258 batches: 2.2561497688293457. +[ Wed Sep 14 14:01:33 2022 ] Top1: 46.73% +[ Wed Sep 14 14:01:33 2022 ] Top5: 79.88% +[ Wed Sep 14 14:01:33 2022 ] Training epoch: 16 +[ Wed Sep 14 14:02:16 2022 ] Batch(54/123) done. Loss: 0.6604 lr:0.100000 network_time: 0.0298 +[ Wed Sep 14 14:03:06 2022 ] Eval epoch: 16 +[ Wed Sep 14 14:03:38 2022 ] Mean test loss of 258 batches: 2.2094640731811523. +[ Wed Sep 14 14:03:38 2022 ] Top1: 46.29% +[ Wed Sep 14 14:03:38 2022 ] Top5: 82.10% +[ Wed Sep 14 14:03:39 2022 ] Training epoch: 17 +[ Wed Sep 14 14:04:05 2022 ] Batch(31/123) done. Loss: 0.5614 lr:0.100000 network_time: 0.0300 +[ Wed Sep 14 14:05:12 2022 ] Eval epoch: 17 +[ Wed Sep 14 14:05:45 2022 ] Mean test loss of 258 batches: 2.6692111492156982. +[ Wed Sep 14 14:05:45 2022 ] Top1: 41.47% +[ Wed Sep 14 14:05:45 2022 ] Top5: 77.44% +[ Wed Sep 14 14:05:45 2022 ] Training epoch: 18 +[ Wed Sep 14 14:05:55 2022 ] Batch(8/123) done. Loss: 0.3214 lr:0.100000 network_time: 0.0334 +[ Wed Sep 14 14:07:08 2022 ] Batch(108/123) done. Loss: 0.4732 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 14:07:18 2022 ] Eval epoch: 18 +[ Wed Sep 14 14:07:50 2022 ] Mean test loss of 258 batches: 2.0434653759002686. +[ Wed Sep 14 14:07:50 2022 ] Top1: 51.87% +[ Wed Sep 14 14:07:51 2022 ] Top5: 83.21% +[ Wed Sep 14 14:07:51 2022 ] Training epoch: 19 +[ Wed Sep 14 14:08:57 2022 ] Batch(85/123) done. Loss: 0.4650 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 14:09:24 2022 ] Eval epoch: 19 +[ Wed Sep 14 14:09:56 2022 ] Mean test loss of 258 batches: 1.86018967628479. +[ Wed Sep 14 14:09:57 2022 ] Top1: 51.05% +[ Wed Sep 14 14:09:57 2022 ] Top5: 83.74% +[ Wed Sep 14 14:09:57 2022 ] Training epoch: 20 +[ Wed Sep 14 14:10:46 2022 ] Batch(62/123) done. Loss: 0.3363 lr:0.100000 network_time: 0.0292 +[ Wed Sep 14 14:11:30 2022 ] Eval epoch: 20 +[ Wed Sep 14 14:12:02 2022 ] Mean test loss of 258 batches: 2.408405065536499. +[ Wed Sep 14 14:12:02 2022 ] Top1: 46.35% +[ Wed Sep 14 14:12:03 2022 ] Top5: 79.66% +[ Wed Sep 14 14:12:03 2022 ] Training epoch: 21 +[ Wed Sep 14 14:12:35 2022 ] Batch(39/123) done. Loss: 0.5413 lr:0.100000 network_time: 0.0288 +[ Wed Sep 14 14:13:36 2022 ] Eval epoch: 21 +[ Wed Sep 14 14:14:08 2022 ] Mean test loss of 258 batches: 2.3089609146118164. +[ Wed Sep 14 14:14:08 2022 ] Top1: 45.38% +[ Wed Sep 14 14:14:08 2022 ] Top5: 82.33% +[ Wed Sep 14 14:14:09 2022 ] Training epoch: 22 +[ Wed Sep 14 14:14:24 2022 ] Batch(16/123) done. Loss: 0.4254 lr:0.100000 network_time: 0.0332 +[ Wed Sep 14 14:15:37 2022 ] Batch(116/123) done. Loss: 0.4475 lr:0.100000 network_time: 0.0265 +[ Wed Sep 14 14:15:42 2022 ] Eval epoch: 22 +[ Wed Sep 14 14:16:14 2022 ] Mean test loss of 258 batches: 2.318674087524414. +[ Wed Sep 14 14:16:14 2022 ] Top1: 47.51% +[ Wed Sep 14 14:16:15 2022 ] Top5: 82.03% +[ Wed Sep 14 14:16:15 2022 ] Training epoch: 23 +[ Wed Sep 14 14:17:26 2022 ] Batch(93/123) done. Loss: 0.5333 lr:0.100000 network_time: 0.0286 +[ Wed Sep 14 14:17:48 2022 ] Eval epoch: 23 +[ Wed Sep 14 14:18:20 2022 ] Mean test loss of 258 batches: 2.097595453262329. +[ Wed Sep 14 14:18:20 2022 ] Top1: 49.50% +[ Wed Sep 14 14:18:20 2022 ] Top5: 83.20% +[ Wed Sep 14 14:18:21 2022 ] Training epoch: 24 +[ Wed Sep 14 14:19:15 2022 ] Batch(70/123) done. Loss: 0.5075 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 14:19:54 2022 ] Eval epoch: 24 +[ Wed Sep 14 14:20:26 2022 ] Mean test loss of 258 batches: 2.4013638496398926. +[ Wed Sep 14 14:20:26 2022 ] Top1: 48.10% +[ Wed Sep 14 14:20:26 2022 ] Top5: 81.03% +[ Wed Sep 14 14:20:26 2022 ] Training epoch: 25 +[ Wed Sep 14 14:21:05 2022 ] Batch(47/123) done. Loss: 0.4802 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 14:22:00 2022 ] Eval epoch: 25 +[ Wed Sep 14 14:22:32 2022 ] Mean test loss of 258 batches: 2.116831064224243. +[ Wed Sep 14 14:22:33 2022 ] Top1: 49.18% +[ Wed Sep 14 14:22:33 2022 ] Top5: 83.03% +[ Wed Sep 14 14:22:33 2022 ] Training epoch: 26 +[ Wed Sep 14 14:22:54 2022 ] Batch(24/123) done. Loss: 0.7263 lr:0.100000 network_time: 0.0276 +[ Wed Sep 14 14:24:06 2022 ] Eval epoch: 26 +[ Wed Sep 14 14:24:38 2022 ] Mean test loss of 258 batches: 1.9685046672821045. +[ Wed Sep 14 14:24:38 2022 ] Top1: 53.72% +[ Wed Sep 14 14:24:38 2022 ] Top5: 85.21% +[ Wed Sep 14 14:24:38 2022 ] Training epoch: 27 +[ Wed Sep 14 14:24:43 2022 ] Batch(1/123) done. Loss: 0.1548 lr:0.100000 network_time: 0.0319 +[ Wed Sep 14 14:25:56 2022 ] Batch(101/123) done. Loss: 0.3913 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 14:26:11 2022 ] Eval epoch: 27 +[ Wed Sep 14 14:26:43 2022 ] Mean test loss of 258 batches: 2.30924391746521. +[ Wed Sep 14 14:26:43 2022 ] Top1: 50.16% +[ Wed Sep 14 14:26:43 2022 ] Top5: 82.26% +[ Wed Sep 14 14:26:44 2022 ] Training epoch: 28 +[ Wed Sep 14 14:27:44 2022 ] Batch(78/123) done. Loss: 0.5422 lr:0.100000 network_time: 0.0262 +[ Wed Sep 14 14:28:17 2022 ] Eval epoch: 28 +[ Wed Sep 14 14:28:48 2022 ] Mean test loss of 258 batches: 2.1971280574798584. +[ Wed Sep 14 14:28:49 2022 ] Top1: 52.06% +[ Wed Sep 14 14:28:49 2022 ] Top5: 84.11% +[ Wed Sep 14 14:28:49 2022 ] Training epoch: 29 +[ Wed Sep 14 14:29:32 2022 ] Batch(55/123) done. Loss: 0.3558 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 14:30:22 2022 ] Eval epoch: 29 +[ Wed Sep 14 14:30:53 2022 ] Mean test loss of 258 batches: 1.991234540939331. +[ Wed Sep 14 14:30:54 2022 ] Top1: 53.61% +[ Wed Sep 14 14:30:54 2022 ] Top5: 85.71% +[ Wed Sep 14 14:30:54 2022 ] Training epoch: 30 +[ Wed Sep 14 14:31:21 2022 ] Batch(32/123) done. Loss: 0.3585 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 14:32:26 2022 ] Eval epoch: 30 +[ Wed Sep 14 14:32:59 2022 ] Mean test loss of 258 batches: 1.8016409873962402. +[ Wed Sep 14 14:32:59 2022 ] Top1: 56.30% +[ Wed Sep 14 14:32:59 2022 ] Top5: 86.58% +[ Wed Sep 14 14:32:59 2022 ] Training epoch: 31 +[ Wed Sep 14 14:33:09 2022 ] Batch(9/123) done. Loss: 0.2505 lr:0.100000 network_time: 0.0281 +[ Wed Sep 14 14:34:22 2022 ] Batch(109/123) done. Loss: 0.2635 lr:0.100000 network_time: 0.0264 +[ Wed Sep 14 14:34:32 2022 ] Eval epoch: 31 +[ Wed Sep 14 14:35:05 2022 ] Mean test loss of 258 batches: 2.105398178100586. +[ Wed Sep 14 14:35:05 2022 ] Top1: 51.54% +[ Wed Sep 14 14:35:05 2022 ] Top5: 84.89% +[ Wed Sep 14 14:35:05 2022 ] Training epoch: 32 +[ Wed Sep 14 14:36:11 2022 ] Batch(86/123) done. Loss: 0.4799 lr:0.100000 network_time: 0.0263 +[ Wed Sep 14 14:36:38 2022 ] Eval epoch: 32 +[ Wed Sep 14 14:37:10 2022 ] Mean test loss of 258 batches: 2.0153965950012207. +[ Wed Sep 14 14:37:10 2022 ] Top1: 52.47% +[ Wed Sep 14 14:37:10 2022 ] Top5: 85.09% +[ Wed Sep 14 14:37:10 2022 ] Training epoch: 33 +[ Wed Sep 14 14:38:00 2022 ] Batch(63/123) done. Loss: 0.3095 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 14:38:43 2022 ] Eval epoch: 33 +[ Wed Sep 14 14:39:15 2022 ] Mean test loss of 258 batches: 2.7301554679870605. +[ Wed Sep 14 14:39:15 2022 ] Top1: 47.96% +[ Wed Sep 14 14:39:15 2022 ] Top5: 81.24% +[ Wed Sep 14 14:39:15 2022 ] Training epoch: 34 +[ Wed Sep 14 14:39:48 2022 ] Batch(40/123) done. Loss: 0.3923 lr:0.100000 network_time: 0.0274 +[ Wed Sep 14 14:40:48 2022 ] Eval epoch: 34 +[ Wed Sep 14 14:41:21 2022 ] Mean test loss of 258 batches: 1.9540212154388428. +[ Wed Sep 14 14:41:21 2022 ] Top1: 54.04% +[ Wed Sep 14 14:41:21 2022 ] Top5: 85.81% +[ Wed Sep 14 14:41:21 2022 ] Training epoch: 35 +[ Wed Sep 14 14:41:37 2022 ] Batch(17/123) done. Loss: 0.1987 lr:0.100000 network_time: 0.0280 +[ Wed Sep 14 14:42:50 2022 ] Batch(117/123) done. Loss: 0.3243 lr:0.100000 network_time: 0.0291 +[ Wed Sep 14 14:42:54 2022 ] Eval epoch: 35 +[ Wed Sep 14 14:43:26 2022 ] Mean test loss of 258 batches: 2.593190908432007. +[ Wed Sep 14 14:43:26 2022 ] Top1: 50.09% +[ Wed Sep 14 14:43:26 2022 ] Top5: 81.51% +[ Wed Sep 14 14:43:26 2022 ] Training epoch: 36 +[ Wed Sep 14 14:44:38 2022 ] Batch(94/123) done. Loss: 0.3176 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 14:44:59 2022 ] Eval epoch: 36 +[ Wed Sep 14 14:45:31 2022 ] Mean test loss of 258 batches: 2.1365630626678467. +[ Wed Sep 14 14:45:32 2022 ] Top1: 54.51% +[ Wed Sep 14 14:45:32 2022 ] Top5: 84.55% +[ Wed Sep 14 14:45:32 2022 ] Training epoch: 37 +[ Wed Sep 14 14:46:27 2022 ] Batch(71/123) done. Loss: 0.2958 lr:0.100000 network_time: 0.0255 +[ Wed Sep 14 14:47:05 2022 ] Eval epoch: 37 +[ Wed Sep 14 14:47:36 2022 ] Mean test loss of 258 batches: 2.1151015758514404. +[ Wed Sep 14 14:47:37 2022 ] Top1: 51.03% +[ Wed Sep 14 14:47:37 2022 ] Top5: 84.02% +[ Wed Sep 14 14:47:37 2022 ] Training epoch: 38 +[ Wed Sep 14 14:48:15 2022 ] Batch(48/123) done. Loss: 0.2557 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 14:49:10 2022 ] Eval epoch: 38 +[ Wed Sep 14 14:49:41 2022 ] Mean test loss of 258 batches: 2.130195140838623. +[ Wed Sep 14 14:49:42 2022 ] Top1: 52.40% +[ Wed Sep 14 14:49:42 2022 ] Top5: 83.91% +[ Wed Sep 14 14:49:42 2022 ] Training epoch: 39 +[ Wed Sep 14 14:50:03 2022 ] Batch(25/123) done. Loss: 0.2468 lr:0.100000 network_time: 0.0258 +[ Wed Sep 14 14:51:15 2022 ] Eval epoch: 39 +[ Wed Sep 14 14:51:47 2022 ] Mean test loss of 258 batches: 2.4296348094940186. +[ Wed Sep 14 14:51:47 2022 ] Top1: 52.08% +[ Wed Sep 14 14:51:47 2022 ] Top5: 83.84% +[ Wed Sep 14 14:51:47 2022 ] Training epoch: 40 +[ Wed Sep 14 14:51:52 2022 ] Batch(2/123) done. Loss: 0.4386 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 14:53:05 2022 ] Batch(102/123) done. Loss: 0.2768 lr:0.100000 network_time: 0.0305 +[ Wed Sep 14 14:53:20 2022 ] Eval epoch: 40 +[ Wed Sep 14 14:53:52 2022 ] Mean test loss of 258 batches: 2.3187801837921143. +[ Wed Sep 14 14:53:52 2022 ] Top1: 51.82% +[ Wed Sep 14 14:53:52 2022 ] Top5: 83.99% +[ Wed Sep 14 14:53:52 2022 ] Training epoch: 41 +[ Wed Sep 14 14:54:54 2022 ] Batch(79/123) done. Loss: 0.2464 lr:0.100000 network_time: 0.0303 +[ Wed Sep 14 14:55:25 2022 ] Eval epoch: 41 +[ Wed Sep 14 14:55:58 2022 ] Mean test loss of 258 batches: 2.4942824840545654. +[ Wed Sep 14 14:55:58 2022 ] Top1: 51.19% +[ Wed Sep 14 14:55:58 2022 ] Top5: 83.19% +[ Wed Sep 14 14:55:58 2022 ] Training epoch: 42 +[ Wed Sep 14 14:56:43 2022 ] Batch(56/123) done. Loss: 0.1342 lr:0.100000 network_time: 0.0293 +[ Wed Sep 14 14:57:31 2022 ] Eval epoch: 42 +[ Wed Sep 14 14:58:03 2022 ] Mean test loss of 258 batches: 2.3902950286865234. +[ Wed Sep 14 14:58:03 2022 ] Top1: 52.47% +[ Wed Sep 14 14:58:03 2022 ] Top5: 84.04% +[ Wed Sep 14 14:58:03 2022 ] Training epoch: 43 +[ Wed Sep 14 14:58:31 2022 ] Batch(33/123) done. Loss: 0.1424 lr:0.100000 network_time: 0.0311 +[ Wed Sep 14 14:59:36 2022 ] Eval epoch: 43 +[ Wed Sep 14 15:00:08 2022 ] Mean test loss of 258 batches: 2.4120872020721436. +[ Wed Sep 14 15:00:08 2022 ] Top1: 49.75% +[ Wed Sep 14 15:00:08 2022 ] Top5: 81.69% +[ Wed Sep 14 15:00:08 2022 ] Training epoch: 44 +[ Wed Sep 14 15:00:19 2022 ] Batch(10/123) done. Loss: 0.2357 lr:0.100000 network_time: 0.0313 +[ Wed Sep 14 15:01:32 2022 ] Batch(110/123) done. Loss: 0.2504 lr:0.100000 network_time: 0.0287 +[ Wed Sep 14 15:01:41 2022 ] Eval epoch: 44 +[ Wed Sep 14 15:02:13 2022 ] Mean test loss of 258 batches: 1.7786136865615845. +[ Wed Sep 14 15:02:14 2022 ] Top1: 56.93% +[ Wed Sep 14 15:02:14 2022 ] Top5: 86.68% +[ Wed Sep 14 15:02:14 2022 ] Training epoch: 45 +[ Wed Sep 14 15:03:21 2022 ] Batch(87/123) done. Loss: 0.2842 lr:0.100000 network_time: 0.0310 +[ Wed Sep 14 15:03:47 2022 ] Eval epoch: 45 +[ Wed Sep 14 15:04:19 2022 ] Mean test loss of 258 batches: 2.2016782760620117. +[ Wed Sep 14 15:04:19 2022 ] Top1: 55.22% +[ Wed Sep 14 15:04:19 2022 ] Top5: 86.05% +[ Wed Sep 14 15:04:19 2022 ] Training epoch: 46 +[ Wed Sep 14 15:05:10 2022 ] Batch(64/123) done. Loss: 0.3073 lr:0.100000 network_time: 0.0285 +[ Wed Sep 14 15:05:52 2022 ] Eval epoch: 46 +[ Wed Sep 14 15:06:24 2022 ] Mean test loss of 258 batches: 2.2162413597106934. +[ Wed Sep 14 15:06:24 2022 ] Top1: 52.64% +[ Wed Sep 14 15:06:24 2022 ] Top5: 84.02% +[ Wed Sep 14 15:06:24 2022 ] Training epoch: 47 +[ Wed Sep 14 15:06:58 2022 ] Batch(41/123) done. Loss: 0.6450 lr:0.100000 network_time: 0.0275 +[ Wed Sep 14 15:07:57 2022 ] Eval epoch: 47 +[ Wed Sep 14 15:08:29 2022 ] Mean test loss of 258 batches: 1.919111967086792. +[ Wed Sep 14 15:08:29 2022 ] Top1: 57.42% +[ Wed Sep 14 15:08:30 2022 ] Top5: 86.45% +[ Wed Sep 14 15:08:30 2022 ] Training epoch: 48 +[ Wed Sep 14 15:08:46 2022 ] Batch(18/123) done. Loss: 0.1862 lr:0.100000 network_time: 0.0268 +[ Wed Sep 14 15:09:59 2022 ] Batch(118/123) done. Loss: 0.2942 lr:0.100000 network_time: 0.0270 +[ Wed Sep 14 15:10:02 2022 ] Eval epoch: 48 +[ Wed Sep 14 15:10:35 2022 ] Mean test loss of 258 batches: 2.217275381088257. +[ Wed Sep 14 15:10:35 2022 ] Top1: 53.76% +[ Wed Sep 14 15:10:35 2022 ] Top5: 84.15% +[ Wed Sep 14 15:10:35 2022 ] Training epoch: 49 +[ Wed Sep 14 15:11:48 2022 ] Batch(95/123) done. Loss: 0.2444 lr:0.100000 network_time: 0.0267 +[ Wed Sep 14 15:12:08 2022 ] Eval epoch: 49 +[ Wed Sep 14 15:12:40 2022 ] Mean test loss of 258 batches: 1.9189571142196655. +[ Wed Sep 14 15:12:40 2022 ] Top1: 56.74% +[ Wed Sep 14 15:12:41 2022 ] Top5: 87.08% +[ Wed Sep 14 15:12:41 2022 ] Training epoch: 50 +[ Wed Sep 14 15:13:37 2022 ] Batch(72/123) done. Loss: 0.2989 lr:0.100000 network_time: 0.0322 +[ Wed Sep 14 15:14:14 2022 ] Eval epoch: 50 +[ Wed Sep 14 15:14:46 2022 ] Mean test loss of 258 batches: 1.8722784519195557. +[ Wed Sep 14 15:14:46 2022 ] Top1: 55.79% +[ Wed Sep 14 15:14:46 2022 ] Top5: 86.23% +[ Wed Sep 14 15:14:46 2022 ] Training epoch: 51 +[ Wed Sep 14 15:15:26 2022 ] Batch(49/123) done. Loss: 0.1550 lr:0.100000 network_time: 0.0309 +[ Wed Sep 14 15:16:19 2022 ] Eval epoch: 51 +[ Wed Sep 14 15:16:51 2022 ] Mean test loss of 258 batches: 1.8202694654464722. +[ Wed Sep 14 15:16:51 2022 ] Top1: 57.06% +[ Wed Sep 14 15:16:51 2022 ] Top5: 87.21% +[ Wed Sep 14 15:16:51 2022 ] Training epoch: 52 +[ Wed Sep 14 15:17:14 2022 ] Batch(26/123) done. Loss: 0.1605 lr:0.100000 network_time: 0.0289 +[ Wed Sep 14 15:18:24 2022 ] Eval epoch: 52 +[ Wed Sep 14 15:18:56 2022 ] Mean test loss of 258 batches: 2.127373695373535. +[ Wed Sep 14 15:18:56 2022 ] Top1: 53.71% +[ Wed Sep 14 15:18:57 2022 ] Top5: 84.94% +[ Wed Sep 14 15:18:57 2022 ] Training epoch: 53 +[ Wed Sep 14 15:19:02 2022 ] Batch(3/123) done. Loss: 0.2372 lr:0.100000 network_time: 0.0279 +[ Wed Sep 14 15:20:15 2022 ] Batch(103/123) done. Loss: 0.2728 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 15:20:29 2022 ] Eval epoch: 53 +[ Wed Sep 14 15:21:02 2022 ] Mean test loss of 258 batches: 2.177319049835205. +[ Wed Sep 14 15:21:02 2022 ] Top1: 51.14% +[ Wed Sep 14 15:21:02 2022 ] Top5: 83.88% +[ Wed Sep 14 15:21:02 2022 ] Training epoch: 54 +[ Wed Sep 14 15:22:04 2022 ] Batch(80/123) done. Loss: 0.0702 lr:0.100000 network_time: 0.0273 +[ Wed Sep 14 15:22:35 2022 ] Eval epoch: 54 +[ Wed Sep 14 15:23:07 2022 ] Mean test loss of 258 batches: 1.9530134201049805. +[ Wed Sep 14 15:23:07 2022 ] Top1: 57.14% +[ Wed Sep 14 15:23:07 2022 ] Top5: 87.07% +[ Wed Sep 14 15:23:08 2022 ] Training epoch: 55 +[ Wed Sep 14 15:23:53 2022 ] Batch(57/123) done. Loss: 0.1349 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 15:24:40 2022 ] Eval epoch: 55 +[ Wed Sep 14 15:25:12 2022 ] Mean test loss of 258 batches: 2.054978847503662. +[ Wed Sep 14 15:25:13 2022 ] Top1: 56.96% +[ Wed Sep 14 15:25:13 2022 ] Top5: 85.78% +[ Wed Sep 14 15:25:13 2022 ] Training epoch: 56 +[ Wed Sep 14 15:25:42 2022 ] Batch(34/123) done. Loss: 0.1589 lr:0.100000 network_time: 0.0277 +[ Wed Sep 14 15:26:46 2022 ] Eval epoch: 56 +[ Wed Sep 14 15:27:18 2022 ] Mean test loss of 258 batches: 2.158838987350464. +[ Wed Sep 14 15:27:18 2022 ] Top1: 53.88% +[ Wed Sep 14 15:27:18 2022 ] Top5: 84.66% +[ Wed Sep 14 15:27:19 2022 ] Training epoch: 57 +[ Wed Sep 14 15:27:30 2022 ] Batch(11/123) done. Loss: 0.3453 lr:0.100000 network_time: 0.0283 +[ Wed Sep 14 15:28:43 2022 ] Batch(111/123) done. Loss: 0.2935 lr:0.100000 network_time: 0.0468 +[ Wed Sep 14 15:28:51 2022 ] Eval epoch: 57 +[ Wed Sep 14 15:29:24 2022 ] Mean test loss of 258 batches: 2.115934133529663. +[ Wed Sep 14 15:29:24 2022 ] Top1: 55.32% +[ Wed Sep 14 15:29:24 2022 ] Top5: 86.03% +[ Wed Sep 14 15:29:24 2022 ] Training epoch: 58 +[ Wed Sep 14 15:30:32 2022 ] Batch(88/123) done. Loss: 0.3999 lr:0.100000 network_time: 0.0269 +[ Wed Sep 14 15:30:57 2022 ] Eval epoch: 58 +[ Wed Sep 14 15:31:29 2022 ] Mean test loss of 258 batches: 2.2344048023223877. +[ Wed Sep 14 15:31:29 2022 ] Top1: 53.47% +[ Wed Sep 14 15:31:29 2022 ] Top5: 84.13% +[ Wed Sep 14 15:31:29 2022 ] Training epoch: 59 +[ Wed Sep 14 15:32:21 2022 ] Batch(65/123) done. Loss: 0.2181 lr:0.100000 network_time: 0.0271 +[ Wed Sep 14 15:33:03 2022 ] Eval epoch: 59 +[ Wed Sep 14 15:33:35 2022 ] Mean test loss of 258 batches: 2.1006672382354736. +[ Wed Sep 14 15:33:35 2022 ] Top1: 57.98% +[ Wed Sep 14 15:33:35 2022 ] Top5: 86.38% +[ Wed Sep 14 15:33:35 2022 ] Training epoch: 60 +[ Wed Sep 14 15:34:10 2022 ] Batch(42/123) done. Loss: 0.1203 lr:0.100000 network_time: 0.0380 +[ Wed Sep 14 15:35:08 2022 ] Eval epoch: 60 +[ Wed Sep 14 15:35:40 2022 ] Mean test loss of 258 batches: 2.0645995140075684. +[ Wed Sep 14 15:35:41 2022 ] Top1: 56.20% +[ Wed Sep 14 15:35:41 2022 ] Top5: 84.93% +[ Wed Sep 14 15:35:41 2022 ] Training epoch: 61 +[ Wed Sep 14 15:35:58 2022 ] Batch(19/123) done. Loss: 0.1308 lr:0.010000 network_time: 0.0309 +[ Wed Sep 14 15:37:11 2022 ] Batch(119/123) done. Loss: 0.1849 lr:0.010000 network_time: 0.0279 +[ Wed Sep 14 15:37:13 2022 ] Eval epoch: 61 +[ Wed Sep 14 15:37:46 2022 ] Mean test loss of 258 batches: 1.7348182201385498. +[ Wed Sep 14 15:37:46 2022 ] Top1: 62.35% +[ Wed Sep 14 15:37:46 2022 ] Top5: 88.77% +[ Wed Sep 14 15:37:46 2022 ] Training epoch: 62 +[ Wed Sep 14 15:38:59 2022 ] Batch(96/123) done. Loss: 0.0961 lr:0.010000 network_time: 0.0270 +[ Wed Sep 14 15:39:19 2022 ] Eval epoch: 62 +[ Wed Sep 14 15:39:51 2022 ] Mean test loss of 258 batches: 1.711524486541748. +[ Wed Sep 14 15:39:51 2022 ] Top1: 63.01% +[ Wed Sep 14 15:39:51 2022 ] Top5: 89.26% +[ Wed Sep 14 15:39:51 2022 ] Training epoch: 63 +[ Wed Sep 14 15:40:48 2022 ] Batch(73/123) done. Loss: 0.0374 lr:0.010000 network_time: 0.0273 +[ Wed Sep 14 15:41:24 2022 ] Eval epoch: 63 +[ Wed Sep 14 15:41:56 2022 ] Mean test loss of 258 batches: 1.72061026096344. +[ Wed Sep 14 15:41:56 2022 ] Top1: 63.20% +[ Wed Sep 14 15:41:56 2022 ] Top5: 89.08% +[ Wed Sep 14 15:41:56 2022 ] Training epoch: 64 +[ Wed Sep 14 15:42:37 2022 ] Batch(50/123) done. Loss: 0.0324 lr:0.010000 network_time: 0.0283 +[ Wed Sep 14 15:43:29 2022 ] Eval epoch: 64 +[ Wed Sep 14 15:44:02 2022 ] Mean test loss of 258 batches: 1.731319546699524. +[ Wed Sep 14 15:44:02 2022 ] Top1: 63.34% +[ Wed Sep 14 15:44:02 2022 ] Top5: 89.07% +[ Wed Sep 14 15:44:02 2022 ] Training epoch: 65 +[ Wed Sep 14 15:44:25 2022 ] Batch(27/123) done. Loss: 0.0490 lr:0.010000 network_time: 0.0318 +[ Wed Sep 14 15:45:35 2022 ] Eval epoch: 65 +[ Wed Sep 14 15:46:07 2022 ] Mean test loss of 258 batches: 1.735913634300232. +[ Wed Sep 14 15:46:07 2022 ] Top1: 63.48% +[ Wed Sep 14 15:46:07 2022 ] Top5: 89.29% +[ Wed Sep 14 15:46:08 2022 ] Training epoch: 66 +[ Wed Sep 14 15:46:14 2022 ] Batch(4/123) done. Loss: 0.0260 lr:0.010000 network_time: 0.0324 +[ Wed Sep 14 15:47:27 2022 ] Batch(104/123) done. Loss: 0.0232 lr:0.010000 network_time: 0.0294 +[ Wed Sep 14 15:47:40 2022 ] Eval epoch: 66 +[ Wed Sep 14 15:48:13 2022 ] Mean test loss of 258 batches: 1.7407399415969849. +[ Wed Sep 14 15:48:13 2022 ] Top1: 63.42% +[ Wed Sep 14 15:48:13 2022 ] Top5: 89.34% +[ Wed Sep 14 15:48:13 2022 ] Training epoch: 67 +[ Wed Sep 14 15:49:15 2022 ] Batch(81/123) done. Loss: 0.0287 lr:0.010000 network_time: 0.0282 +[ Wed Sep 14 15:49:46 2022 ] Eval epoch: 67 +[ Wed Sep 14 15:50:18 2022 ] Mean test loss of 258 batches: 1.7624691724777222. +[ Wed Sep 14 15:50:18 2022 ] Top1: 63.43% +[ Wed Sep 14 15:50:18 2022 ] Top5: 89.25% +[ Wed Sep 14 15:50:18 2022 ] Training epoch: 68 +[ Wed Sep 14 15:51:04 2022 ] Batch(58/123) done. Loss: 0.0116 lr:0.010000 network_time: 0.0266 +[ Wed Sep 14 15:51:51 2022 ] Eval epoch: 68 +[ Wed Sep 14 15:52:23 2022 ] Mean test loss of 258 batches: 1.7701879739761353. +[ Wed Sep 14 15:52:24 2022 ] Top1: 63.40% +[ Wed Sep 14 15:52:24 2022 ] Top5: 89.14% +[ Wed Sep 14 15:52:24 2022 ] Training epoch: 69 +[ Wed Sep 14 15:52:53 2022 ] Batch(35/123) done. Loss: 0.0248 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 15:53:57 2022 ] Eval epoch: 69 +[ Wed Sep 14 15:54:29 2022 ] Mean test loss of 258 batches: 1.7476320266723633. +[ Wed Sep 14 15:54:29 2022 ] Top1: 63.65% +[ Wed Sep 14 15:54:29 2022 ] Top5: 89.06% +[ Wed Sep 14 15:54:29 2022 ] Training epoch: 70 +[ Wed Sep 14 15:54:42 2022 ] Batch(12/123) done. Loss: 0.0127 lr:0.010000 network_time: 0.0290 +[ Wed Sep 14 15:55:55 2022 ] Batch(112/123) done. Loss: 0.0187 lr:0.010000 network_time: 0.0298 +[ Wed Sep 14 15:56:02 2022 ] Eval epoch: 70 +[ Wed Sep 14 15:56:35 2022 ] Mean test loss of 258 batches: 1.746610403060913. +[ Wed Sep 14 15:56:35 2022 ] Top1: 63.94% +[ Wed Sep 14 15:56:35 2022 ] Top5: 89.23% +[ Wed Sep 14 15:56:35 2022 ] Training epoch: 71 +[ Wed Sep 14 15:57:43 2022 ] Batch(89/123) done. Loss: 0.0119 lr:0.010000 network_time: 0.0263 +[ Wed Sep 14 15:58:08 2022 ] Eval epoch: 71 +[ Wed Sep 14 15:58:40 2022 ] Mean test loss of 258 batches: 1.7734960317611694. +[ Wed Sep 14 15:58:40 2022 ] Top1: 63.64% +[ Wed Sep 14 15:58:40 2022 ] Top5: 89.36% +[ Wed Sep 14 15:58:40 2022 ] Training epoch: 72 +[ Wed Sep 14 15:59:32 2022 ] Batch(66/123) done. Loss: 0.0212 lr:0.010000 network_time: 0.0329 +[ Wed Sep 14 16:00:13 2022 ] Eval epoch: 72 +[ Wed Sep 14 16:00:46 2022 ] Mean test loss of 258 batches: 1.7869070768356323. +[ Wed Sep 14 16:00:46 2022 ] Top1: 63.77% +[ Wed Sep 14 16:00:46 2022 ] Top5: 89.18% +[ Wed Sep 14 16:00:46 2022 ] Training epoch: 73 +[ Wed Sep 14 16:01:21 2022 ] Batch(43/123) done. Loss: 0.0425 lr:0.010000 network_time: 0.0276 +[ Wed Sep 14 16:02:19 2022 ] Eval epoch: 73 +[ Wed Sep 14 16:02:51 2022 ] Mean test loss of 258 batches: 1.7861807346343994. +[ Wed Sep 14 16:02:51 2022 ] Top1: 63.49% +[ Wed Sep 14 16:02:52 2022 ] Top5: 89.21% +[ Wed Sep 14 16:02:52 2022 ] Training epoch: 74 +[ Wed Sep 14 16:03:10 2022 ] Batch(20/123) done. Loss: 0.0187 lr:0.010000 network_time: 0.0348 +[ Wed Sep 14 16:04:23 2022 ] Batch(120/123) done. Loss: 0.0111 lr:0.010000 network_time: 0.0269 +[ Wed Sep 14 16:04:25 2022 ] Eval epoch: 74 +[ Wed Sep 14 16:04:57 2022 ] Mean test loss of 258 batches: 1.8124498128890991. +[ Wed Sep 14 16:04:57 2022 ] Top1: 63.50% +[ Wed Sep 14 16:04:57 2022 ] Top5: 89.26% +[ Wed Sep 14 16:04:57 2022 ] Training epoch: 75 +[ Wed Sep 14 16:06:11 2022 ] Batch(97/123) done. Loss: 0.0189 lr:0.010000 network_time: 0.0304 +[ Wed Sep 14 16:06:30 2022 ] Eval epoch: 75 +[ Wed Sep 14 16:07:02 2022 ] Mean test loss of 258 batches: 1.7966654300689697. +[ Wed Sep 14 16:07:02 2022 ] Top1: 63.86% +[ Wed Sep 14 16:07:02 2022 ] Top5: 89.43% +[ Wed Sep 14 16:07:02 2022 ] Training epoch: 76 +[ Wed Sep 14 16:08:00 2022 ] Batch(74/123) done. Loss: 0.0252 lr:0.010000 network_time: 0.0298 +[ Wed Sep 14 16:08:35 2022 ] Eval epoch: 76 +[ Wed Sep 14 16:09:08 2022 ] Mean test loss of 258 batches: 1.8263705968856812. +[ Wed Sep 14 16:09:08 2022 ] Top1: 63.57% +[ Wed Sep 14 16:09:08 2022 ] Top5: 88.96% +[ Wed Sep 14 16:09:08 2022 ] Training epoch: 77 +[ Wed Sep 14 16:09:49 2022 ] Batch(51/123) done. Loss: 0.0034 lr:0.010000 network_time: 0.0268 +[ Wed Sep 14 16:10:41 2022 ] Eval epoch: 77 +[ Wed Sep 14 16:11:13 2022 ] Mean test loss of 258 batches: 1.7833702564239502. +[ Wed Sep 14 16:11:13 2022 ] Top1: 63.86% +[ Wed Sep 14 16:11:13 2022 ] Top5: 89.27% +[ Wed Sep 14 16:11:13 2022 ] Training epoch: 78 +[ Wed Sep 14 16:11:37 2022 ] Batch(28/123) done. Loss: 0.0234 lr:0.010000 network_time: 0.0280 +[ Wed Sep 14 16:12:46 2022 ] Eval epoch: 78 +[ Wed Sep 14 16:13:19 2022 ] Mean test loss of 258 batches: 1.8438570499420166. +[ Wed Sep 14 16:13:19 2022 ] Top1: 63.21% +[ Wed Sep 14 16:13:19 2022 ] Top5: 88.88% +[ Wed Sep 14 16:13:19 2022 ] Training epoch: 79 +[ Wed Sep 14 16:13:26 2022 ] Batch(5/123) done. Loss: 0.0124 lr:0.010000 network_time: 0.0236 +[ Wed Sep 14 16:14:39 2022 ] Batch(105/123) done. Loss: 0.0067 lr:0.010000 network_time: 0.0304 +[ Wed Sep 14 16:14:52 2022 ] Eval epoch: 79 +[ Wed Sep 14 16:15:24 2022 ] Mean test loss of 258 batches: 1.7994800806045532. +[ Wed Sep 14 16:15:24 2022 ] Top1: 63.84% +[ Wed Sep 14 16:15:24 2022 ] Top5: 89.23% +[ Wed Sep 14 16:15:24 2022 ] Training epoch: 80 +[ Wed Sep 14 16:16:27 2022 ] Batch(82/123) done. Loss: 0.0106 lr:0.010000 network_time: 0.0276 +[ Wed Sep 14 16:16:57 2022 ] Eval epoch: 80 +[ Wed Sep 14 16:17:28 2022 ] Mean test loss of 258 batches: 1.8134398460388184. +[ Wed Sep 14 16:17:29 2022 ] Top1: 63.86% +[ Wed Sep 14 16:17:29 2022 ] Top5: 89.26% +[ Wed Sep 14 16:17:29 2022 ] Training epoch: 81 +[ Wed Sep 14 16:18:15 2022 ] Batch(59/123) done. Loss: 0.0091 lr:0.001000 network_time: 0.0297 +[ Wed Sep 14 16:19:02 2022 ] Eval epoch: 81 +[ Wed Sep 14 16:19:34 2022 ] Mean test loss of 258 batches: 1.796744704246521. +[ Wed Sep 14 16:19:34 2022 ] Top1: 63.98% +[ Wed Sep 14 16:19:34 2022 ] Top5: 89.26% +[ Wed Sep 14 16:19:34 2022 ] Training epoch: 82 +[ Wed Sep 14 16:20:04 2022 ] Batch(36/123) done. Loss: 0.0092 lr:0.001000 network_time: 0.0268 +[ Wed Sep 14 16:21:07 2022 ] Eval epoch: 82 +[ Wed Sep 14 16:21:40 2022 ] Mean test loss of 258 batches: 1.843143343925476. +[ Wed Sep 14 16:21:40 2022 ] Top1: 63.77% +[ Wed Sep 14 16:21:40 2022 ] Top5: 89.06% +[ Wed Sep 14 16:21:40 2022 ] Training epoch: 83 +[ Wed Sep 14 16:21:53 2022 ] Batch(13/123) done. Loss: 0.0141 lr:0.001000 network_time: 0.0311 +[ Wed Sep 14 16:23:06 2022 ] Batch(113/123) done. Loss: 0.0083 lr:0.001000 network_time: 0.0270 +[ Wed Sep 14 16:23:13 2022 ] Eval epoch: 83 +[ Wed Sep 14 16:23:45 2022 ] Mean test loss of 258 batches: 1.8037279844284058. +[ Wed Sep 14 16:23:45 2022 ] Top1: 63.89% +[ Wed Sep 14 16:23:45 2022 ] Top5: 89.27% +[ Wed Sep 14 16:23:45 2022 ] Training epoch: 84 +[ Wed Sep 14 16:24:55 2022 ] Batch(90/123) done. Loss: 0.0043 lr:0.001000 network_time: 0.0329 +[ Wed Sep 14 16:25:18 2022 ] Eval epoch: 84 +[ Wed Sep 14 16:25:50 2022 ] Mean test loss of 258 batches: 1.8453911542892456. +[ Wed Sep 14 16:25:51 2022 ] Top1: 63.47% +[ Wed Sep 14 16:25:51 2022 ] Top5: 89.03% +[ Wed Sep 14 16:25:51 2022 ] Training epoch: 85 +[ Wed Sep 14 16:26:43 2022 ] Batch(67/123) done. Loss: 0.0063 lr:0.001000 network_time: 0.0268 +[ Wed Sep 14 16:27:24 2022 ] Eval epoch: 85 +[ Wed Sep 14 16:27:56 2022 ] Mean test loss of 258 batches: 1.8150367736816406. +[ Wed Sep 14 16:27:56 2022 ] Top1: 63.74% +[ Wed Sep 14 16:27:56 2022 ] Top5: 89.11% +[ Wed Sep 14 16:27:56 2022 ] Training epoch: 86 +[ Wed Sep 14 16:28:32 2022 ] Batch(44/123) done. Loss: 0.0100 lr:0.001000 network_time: 0.0273 +[ Wed Sep 14 16:29:29 2022 ] Eval epoch: 86 +[ Wed Sep 14 16:30:01 2022 ] Mean test loss of 258 batches: 1.7877106666564941. +[ Wed Sep 14 16:30:01 2022 ] Top1: 64.03% +[ Wed Sep 14 16:30:01 2022 ] Top5: 89.32% +[ Wed Sep 14 16:30:01 2022 ] Training epoch: 87 +[ Wed Sep 14 16:30:21 2022 ] Batch(21/123) done. Loss: 0.0049 lr:0.001000 network_time: 0.0266 +[ Wed Sep 14 16:31:34 2022 ] Batch(121/123) done. Loss: 0.0076 lr:0.001000 network_time: 0.0262 +[ Wed Sep 14 16:31:35 2022 ] Eval epoch: 87 +[ Wed Sep 14 16:32:07 2022 ] Mean test loss of 258 batches: 1.8418843746185303. +[ Wed Sep 14 16:32:07 2022 ] Top1: 63.81% +[ Wed Sep 14 16:32:07 2022 ] Top5: 88.97% +[ Wed Sep 14 16:32:07 2022 ] Training epoch: 88 +[ Wed Sep 14 16:33:23 2022 ] Batch(98/123) done. Loss: 0.0163 lr:0.001000 network_time: 0.0261 +[ Wed Sep 14 16:33:41 2022 ] Eval epoch: 88 +[ Wed Sep 14 16:34:13 2022 ] Mean test loss of 258 batches: 1.8131529092788696. +[ Wed Sep 14 16:34:13 2022 ] Top1: 63.85% +[ Wed Sep 14 16:34:13 2022 ] Top5: 89.12% +[ Wed Sep 14 16:34:13 2022 ] Training epoch: 89 +[ Wed Sep 14 16:35:11 2022 ] Batch(75/123) done. Loss: 0.0171 lr:0.001000 network_time: 0.0283 +[ Wed Sep 14 16:35:46 2022 ] Eval epoch: 89 +[ Wed Sep 14 16:36:18 2022 ] Mean test loss of 258 batches: 1.7912170886993408. +[ Wed Sep 14 16:36:18 2022 ] Top1: 64.00% +[ Wed Sep 14 16:36:18 2022 ] Top5: 89.34% +[ Wed Sep 14 16:36:18 2022 ] Training epoch: 90 +[ Wed Sep 14 16:37:00 2022 ] Batch(52/123) done. Loss: 0.0198 lr:0.001000 network_time: 0.0267 +[ Wed Sep 14 16:37:51 2022 ] Eval epoch: 90 +[ Wed Sep 14 16:38:23 2022 ] Mean test loss of 258 batches: 1.8016085624694824. +[ Wed Sep 14 16:38:24 2022 ] Top1: 64.03% +[ Wed Sep 14 16:38:24 2022 ] Top5: 89.33% +[ Wed Sep 14 16:38:24 2022 ] Training epoch: 91 +[ Wed Sep 14 16:38:49 2022 ] Batch(29/123) done. Loss: 0.0090 lr:0.001000 network_time: 0.0474 +[ Wed Sep 14 16:39:57 2022 ] Eval epoch: 91 +[ Wed Sep 14 16:40:29 2022 ] Mean test loss of 258 batches: 1.809605360031128. +[ Wed Sep 14 16:40:29 2022 ] Top1: 64.01% +[ Wed Sep 14 16:40:29 2022 ] Top5: 89.17% +[ Wed Sep 14 16:40:29 2022 ] Training epoch: 92 +[ Wed Sep 14 16:40:37 2022 ] Batch(6/123) done. Loss: 0.0152 lr:0.001000 network_time: 0.0268 +[ Wed Sep 14 16:41:50 2022 ] Batch(106/123) done. Loss: 0.0232 lr:0.001000 network_time: 0.0269 +[ Wed Sep 14 16:42:02 2022 ] Eval epoch: 92 +[ Wed Sep 14 16:42:34 2022 ] Mean test loss of 258 batches: 1.8451917171478271. +[ Wed Sep 14 16:42:34 2022 ] Top1: 63.58% +[ Wed Sep 14 16:42:35 2022 ] Top5: 89.08% +[ Wed Sep 14 16:42:35 2022 ] Training epoch: 93 +[ Wed Sep 14 16:43:39 2022 ] Batch(83/123) done. Loss: 0.0068 lr:0.001000 network_time: 0.0269 +[ Wed Sep 14 16:44:08 2022 ] Eval epoch: 93 +[ Wed Sep 14 16:44:40 2022 ] Mean test loss of 258 batches: 1.8375056982040405. +[ Wed Sep 14 16:44:40 2022 ] Top1: 63.52% +[ Wed Sep 14 16:44:40 2022 ] Top5: 89.05% +[ Wed Sep 14 16:44:40 2022 ] Training epoch: 94 +[ Wed Sep 14 16:45:28 2022 ] Batch(60/123) done. Loss: 0.0073 lr:0.001000 network_time: 0.0271 +[ Wed Sep 14 16:46:13 2022 ] Eval epoch: 94 +[ Wed Sep 14 16:46:45 2022 ] Mean test loss of 258 batches: 1.7938319444656372. +[ Wed Sep 14 16:46:46 2022 ] Top1: 64.17% +[ Wed Sep 14 16:46:46 2022 ] Top5: 89.06% +[ Wed Sep 14 16:46:46 2022 ] Training epoch: 95 +[ Wed Sep 14 16:47:17 2022 ] Batch(37/123) done. Loss: 0.0082 lr:0.001000 network_time: 0.0267 +[ Wed Sep 14 16:48:19 2022 ] Eval epoch: 95 +[ Wed Sep 14 16:48:52 2022 ] Mean test loss of 258 batches: 1.8545280694961548. +[ Wed Sep 14 16:48:52 2022 ] Top1: 63.48% +[ Wed Sep 14 16:48:52 2022 ] Top5: 89.26% +[ Wed Sep 14 16:48:52 2022 ] Training epoch: 96 +[ Wed Sep 14 16:49:06 2022 ] Batch(14/123) done. Loss: 0.0101 lr:0.001000 network_time: 0.0299 +[ Wed Sep 14 16:50:19 2022 ] Batch(114/123) done. Loss: 0.0086 lr:0.001000 network_time: 0.0273 +[ Wed Sep 14 16:50:25 2022 ] Eval epoch: 96 +[ Wed Sep 14 16:50:58 2022 ] Mean test loss of 258 batches: 1.7887837886810303. +[ Wed Sep 14 16:50:58 2022 ] Top1: 64.00% +[ Wed Sep 14 16:50:58 2022 ] Top5: 89.36% +[ Wed Sep 14 16:50:58 2022 ] Training epoch: 97 +[ Wed Sep 14 16:52:08 2022 ] Batch(91/123) done. Loss: 0.0042 lr:0.001000 network_time: 0.0311 +[ Wed Sep 14 16:52:31 2022 ] Eval epoch: 97 +[ Wed Sep 14 16:53:03 2022 ] Mean test loss of 258 batches: 1.8494559526443481. +[ Wed Sep 14 16:53:03 2022 ] Top1: 63.47% +[ Wed Sep 14 16:53:03 2022 ] Top5: 89.13% +[ Wed Sep 14 16:53:03 2022 ] Training epoch: 98 +[ Wed Sep 14 16:53:56 2022 ] Batch(68/123) done. Loss: 0.0138 lr:0.001000 network_time: 0.0329 +[ Wed Sep 14 16:54:36 2022 ] Eval epoch: 98 +[ Wed Sep 14 16:55:08 2022 ] Mean test loss of 258 batches: 1.7884374856948853. +[ Wed Sep 14 16:55:08 2022 ] Top1: 63.95% +[ Wed Sep 14 16:55:08 2022 ] Top5: 89.45% +[ Wed Sep 14 16:55:08 2022 ] Training epoch: 99 +[ Wed Sep 14 16:55:45 2022 ] Batch(45/123) done. Loss: 0.0193 lr:0.001000 network_time: 0.0327 +[ Wed Sep 14 16:56:41 2022 ] Eval epoch: 99 +[ Wed Sep 14 16:57:14 2022 ] Mean test loss of 258 batches: 1.8234803676605225. +[ Wed Sep 14 16:57:14 2022 ] Top1: 63.93% +[ Wed Sep 14 16:57:14 2022 ] Top5: 89.20% +[ Wed Sep 14 16:57:14 2022 ] Training epoch: 100 +[ Wed Sep 14 16:57:34 2022 ] Batch(22/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0282 +[ Wed Sep 14 16:58:47 2022 ] Batch(122/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0290 +[ Wed Sep 14 16:58:47 2022 ] Eval epoch: 100 +[ Wed Sep 14 16:59:19 2022 ] Mean test loss of 258 batches: 1.8526291847229004. +[ Wed Sep 14 16:59:19 2022 ] Top1: 63.60% +[ Wed Sep 14 16:59:19 2022 ] Top5: 89.09% +[ Wed Sep 14 16:59:20 2022 ] Training epoch: 101 +[ Wed Sep 14 17:00:36 2022 ] Batch(99/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0436 +[ Wed Sep 14 17:00:53 2022 ] Eval epoch: 101 +[ Wed Sep 14 17:01:25 2022 ] Mean test loss of 258 batches: 1.860023021697998. +[ Wed Sep 14 17:01:25 2022 ] Top1: 63.39% +[ Wed Sep 14 17:01:26 2022 ] Top5: 89.03% +[ Wed Sep 14 17:01:26 2022 ] Training epoch: 102 +[ Wed Sep 14 17:02:25 2022 ] Batch(76/123) done. Loss: 0.0103 lr:0.000100 network_time: 0.0254 +[ Wed Sep 14 17:02:59 2022 ] Eval epoch: 102 +[ Wed Sep 14 17:03:31 2022 ] Mean test loss of 258 batches: 1.8325798511505127. +[ Wed Sep 14 17:03:31 2022 ] Top1: 63.43% +[ Wed Sep 14 17:03:31 2022 ] Top5: 89.12% +[ Wed Sep 14 17:03:31 2022 ] Training epoch: 103 +[ Wed Sep 14 17:04:13 2022 ] Batch(53/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0269 +[ Wed Sep 14 17:05:04 2022 ] Eval epoch: 103 +[ Wed Sep 14 17:05:36 2022 ] Mean test loss of 258 batches: 1.7819689512252808. +[ Wed Sep 14 17:05:36 2022 ] Top1: 64.24% +[ Wed Sep 14 17:05:36 2022 ] Top5: 89.63% +[ Wed Sep 14 17:05:36 2022 ] Training epoch: 104 +[ Wed Sep 14 17:06:02 2022 ] Batch(30/123) done. Loss: 0.0073 lr:0.000100 network_time: 0.0252 +[ Wed Sep 14 17:07:09 2022 ] Eval epoch: 104 +[ Wed Sep 14 17:07:42 2022 ] Mean test loss of 258 batches: 1.8267995119094849. +[ Wed Sep 14 17:07:42 2022 ] Top1: 63.73% +[ Wed Sep 14 17:07:42 2022 ] Top5: 89.19% +[ Wed Sep 14 17:07:42 2022 ] Training epoch: 105 +[ Wed Sep 14 17:07:51 2022 ] Batch(7/123) done. Loss: 0.0092 lr:0.000100 network_time: 0.0260 +[ Wed Sep 14 17:09:04 2022 ] Batch(107/123) done. Loss: 0.0127 lr:0.000100 network_time: 0.0264 +[ Wed Sep 14 17:09:15 2022 ] Eval epoch: 105 +[ Wed Sep 14 17:09:47 2022 ] Mean test loss of 258 batches: 1.8645508289337158. +[ Wed Sep 14 17:09:47 2022 ] Top1: 63.26% +[ Wed Sep 14 17:09:47 2022 ] Top5: 88.95% +[ Wed Sep 14 17:09:47 2022 ] Training epoch: 106 +[ Wed Sep 14 17:10:52 2022 ] Batch(84/123) done. Loss: 0.0067 lr:0.000100 network_time: 0.0281 +[ Wed Sep 14 17:11:20 2022 ] Eval epoch: 106 +[ Wed Sep 14 17:11:53 2022 ] Mean test loss of 258 batches: 1.7852859497070312. +[ Wed Sep 14 17:11:53 2022 ] Top1: 64.12% +[ Wed Sep 14 17:11:53 2022 ] Top5: 89.42% +[ Wed Sep 14 17:11:53 2022 ] Training epoch: 107 +[ Wed Sep 14 17:12:41 2022 ] Batch(61/123) done. Loss: 0.0074 lr:0.000100 network_time: 0.0451 +[ Wed Sep 14 17:13:26 2022 ] Eval epoch: 107 +[ Wed Sep 14 17:13:58 2022 ] Mean test loss of 258 batches: 1.8736193180084229. +[ Wed Sep 14 17:13:58 2022 ] Top1: 63.23% +[ Wed Sep 14 17:13:58 2022 ] Top5: 88.91% +[ Wed Sep 14 17:13:58 2022 ] Training epoch: 108 +[ Wed Sep 14 17:14:29 2022 ] Batch(38/123) done. Loss: 0.0282 lr:0.000100 network_time: 0.0286 +[ Wed Sep 14 17:15:31 2022 ] Eval epoch: 108 +[ Wed Sep 14 17:16:03 2022 ] Mean test loss of 258 batches: 1.816180944442749. +[ Wed Sep 14 17:16:03 2022 ] Top1: 64.07% +[ Wed Sep 14 17:16:03 2022 ] Top5: 89.38% +[ Wed Sep 14 17:16:03 2022 ] Training epoch: 109 +[ Wed Sep 14 17:16:18 2022 ] Batch(15/123) done. Loss: 0.0097 lr:0.000100 network_time: 0.0254 +[ Wed Sep 14 17:17:30 2022 ] Batch(115/123) done. Loss: 0.0088 lr:0.000100 network_time: 0.0285 +[ Wed Sep 14 17:17:36 2022 ] Eval epoch: 109 +[ Wed Sep 14 17:18:08 2022 ] Mean test loss of 258 batches: 1.824914813041687. +[ Wed Sep 14 17:18:08 2022 ] Top1: 64.07% +[ Wed Sep 14 17:18:08 2022 ] Top5: 89.18% +[ Wed Sep 14 17:18:09 2022 ] Training epoch: 110 +[ Wed Sep 14 17:19:19 2022 ] Batch(92/123) done. Loss: 0.0085 lr:0.000100 network_time: 0.0321 +[ Wed Sep 14 17:19:41 2022 ] Eval epoch: 110 +[ Wed Sep 14 17:20:14 2022 ] Mean test loss of 258 batches: 1.8336659669876099. +[ Wed Sep 14 17:20:14 2022 ] Top1: 63.71% +[ Wed Sep 14 17:20:14 2022 ] Top5: 89.11% +[ Wed Sep 14 17:20:14 2022 ] Training epoch: 111 +[ Wed Sep 14 17:21:08 2022 ] Batch(69/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0321 +[ Wed Sep 14 17:21:47 2022 ] Eval epoch: 111 +[ Wed Sep 14 17:22:19 2022 ] Mean test loss of 258 batches: 1.8149291276931763. +[ Wed Sep 14 17:22:19 2022 ] Top1: 64.02% +[ Wed Sep 14 17:22:19 2022 ] Top5: 89.28% +[ Wed Sep 14 17:22:19 2022 ] Training epoch: 112 +[ Wed Sep 14 17:22:57 2022 ] Batch(46/123) done. Loss: 0.0036 lr:0.000100 network_time: 0.0277 +[ Wed Sep 14 17:23:53 2022 ] Eval epoch: 112 +[ Wed Sep 14 17:24:24 2022 ] Mean test loss of 258 batches: 1.805535912513733. +[ Wed Sep 14 17:24:25 2022 ] Top1: 63.88% +[ Wed Sep 14 17:24:25 2022 ] Top5: 89.33% +[ Wed Sep 14 17:24:25 2022 ] Training epoch: 113 +[ Wed Sep 14 17:24:45 2022 ] Batch(23/123) done. Loss: 0.0105 lr:0.000100 network_time: 0.0273 +[ Wed Sep 14 17:25:58 2022 ] Eval epoch: 113 +[ Wed Sep 14 17:26:30 2022 ] Mean test loss of 258 batches: 1.8021278381347656. +[ Wed Sep 14 17:26:31 2022 ] Top1: 63.98% +[ Wed Sep 14 17:26:31 2022 ] Top5: 89.25% +[ Wed Sep 14 17:26:31 2022 ] Training epoch: 114 +[ Wed Sep 14 17:26:34 2022 ] Batch(0/123) done. Loss: 0.0183 lr:0.000100 network_time: 0.0544 +[ Wed Sep 14 17:27:47 2022 ] Batch(100/123) done. Loss: 0.0082 lr:0.000100 network_time: 0.0264 +[ Wed Sep 14 17:28:03 2022 ] Eval epoch: 114 +[ Wed Sep 14 17:28:36 2022 ] Mean test loss of 258 batches: 1.8260608911514282. +[ Wed Sep 14 17:28:36 2022 ] Top1: 63.96% +[ Wed Sep 14 17:28:36 2022 ] Top5: 89.14% +[ Wed Sep 14 17:28:36 2022 ] Training epoch: 115 +[ Wed Sep 14 17:29:36 2022 ] Batch(77/123) done. Loss: 0.0054 lr:0.000100 network_time: 0.0308 +[ Wed Sep 14 17:30:09 2022 ] Eval epoch: 115 +[ Wed Sep 14 17:30:41 2022 ] Mean test loss of 258 batches: 1.7994446754455566. +[ Wed Sep 14 17:30:41 2022 ] Top1: 64.01% +[ Wed Sep 14 17:30:41 2022 ] Top5: 89.37% +[ Wed Sep 14 17:30:41 2022 ] Training epoch: 116 +[ Wed Sep 14 17:31:24 2022 ] Batch(54/123) done. Loss: 0.0119 lr:0.000100 network_time: 0.0250 +[ Wed Sep 14 17:32:14 2022 ] Eval epoch: 116 +[ Wed Sep 14 17:32:47 2022 ] Mean test loss of 258 batches: 1.830991506576538. +[ Wed Sep 14 17:32:47 2022 ] Top1: 63.83% +[ Wed Sep 14 17:32:47 2022 ] Top5: 89.22% +[ Wed Sep 14 17:32:47 2022 ] Training epoch: 117 +[ Wed Sep 14 17:33:14 2022 ] Batch(31/123) done. Loss: 0.0070 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 17:34:20 2022 ] Eval epoch: 117 +[ Wed Sep 14 17:34:53 2022 ] Mean test loss of 258 batches: 1.827774167060852. +[ Wed Sep 14 17:34:53 2022 ] Top1: 63.74% +[ Wed Sep 14 17:34:53 2022 ] Top5: 89.23% +[ Wed Sep 14 17:34:53 2022 ] Training epoch: 118 +[ Wed Sep 14 17:35:03 2022 ] Batch(8/123) done. Loss: 0.0177 lr:0.000100 network_time: 0.0267 +[ Wed Sep 14 17:36:16 2022 ] Batch(108/123) done. Loss: 0.0133 lr:0.000100 network_time: 0.0270 +[ Wed Sep 14 17:36:26 2022 ] Eval epoch: 118 +[ Wed Sep 14 17:36:59 2022 ] Mean test loss of 258 batches: 1.833808183670044. +[ Wed Sep 14 17:36:59 2022 ] Top1: 63.37% +[ Wed Sep 14 17:36:59 2022 ] Top5: 89.25% +[ Wed Sep 14 17:36:59 2022 ] Training epoch: 119 +[ Wed Sep 14 17:38:05 2022 ] Batch(85/123) done. Loss: 0.0074 lr:0.000100 network_time: 0.0282 +[ Wed Sep 14 17:38:32 2022 ] Eval epoch: 119 +[ Wed Sep 14 17:39:04 2022 ] Mean test loss of 258 batches: 1.8108863830566406. +[ Wed Sep 14 17:39:04 2022 ] Top1: 64.01% +[ Wed Sep 14 17:39:04 2022 ] Top5: 89.40% +[ Wed Sep 14 17:39:04 2022 ] Training epoch: 120 +[ Wed Sep 14 17:39:53 2022 ] Batch(62/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0308 +[ Wed Sep 14 17:40:37 2022 ] Eval epoch: 120 +[ Wed Sep 14 17:41:10 2022 ] Mean test loss of 258 batches: 1.8470932245254517. +[ Wed Sep 14 17:41:10 2022 ] Top1: 63.45% +[ Wed Sep 14 17:41:10 2022 ] Top5: 88.98% +[ Wed Sep 14 17:41:10 2022 ] Training epoch: 121 +[ Wed Sep 14 17:41:43 2022 ] Batch(39/123) done. Loss: 0.0086 lr:0.000100 network_time: 0.0267 +[ Wed Sep 14 17:42:43 2022 ] Eval epoch: 121 +[ Wed Sep 14 17:43:15 2022 ] Mean test loss of 258 batches: 1.8321284055709839. +[ Wed Sep 14 17:43:16 2022 ] Top1: 63.67% +[ Wed Sep 14 17:43:16 2022 ] Top5: 89.13% +[ Wed Sep 14 17:43:16 2022 ] Training epoch: 122 +[ Wed Sep 14 17:43:31 2022 ] Batch(16/123) done. Loss: 0.0431 lr:0.000100 network_time: 0.0316 +[ Wed Sep 14 17:44:44 2022 ] Batch(116/123) done. Loss: 0.0082 lr:0.000100 network_time: 0.0269 +[ Wed Sep 14 17:44:49 2022 ] Eval epoch: 122 +[ Wed Sep 14 17:45:21 2022 ] Mean test loss of 258 batches: 1.8278491497039795. +[ Wed Sep 14 17:45:21 2022 ] Top1: 63.84% +[ Wed Sep 14 17:45:21 2022 ] Top5: 89.29% +[ Wed Sep 14 17:45:21 2022 ] Training epoch: 123 +[ Wed Sep 14 17:46:33 2022 ] Batch(93/123) done. Loss: 0.0059 lr:0.000100 network_time: 0.0308 +[ Wed Sep 14 17:46:54 2022 ] Eval epoch: 123 +[ Wed Sep 14 17:47:27 2022 ] Mean test loss of 258 batches: 1.840074062347412. +[ Wed Sep 14 17:47:27 2022 ] Top1: 63.57% +[ Wed Sep 14 17:47:27 2022 ] Top5: 89.11% +[ Wed Sep 14 17:47:27 2022 ] Training epoch: 124 +[ Wed Sep 14 17:48:22 2022 ] Batch(70/123) done. Loss: 0.0087 lr:0.000100 network_time: 0.0309 +[ Wed Sep 14 17:49:00 2022 ] Eval epoch: 124 +[ Wed Sep 14 17:49:32 2022 ] Mean test loss of 258 batches: 1.8375566005706787. +[ Wed Sep 14 17:49:32 2022 ] Top1: 63.77% +[ Wed Sep 14 17:49:32 2022 ] Top5: 89.02% +[ Wed Sep 14 17:49:32 2022 ] Training epoch: 125 +[ Wed Sep 14 17:50:11 2022 ] Batch(47/123) done. Loss: 0.0099 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 17:51:06 2022 ] Eval epoch: 125 +[ Wed Sep 14 17:51:38 2022 ] Mean test loss of 258 batches: 1.7994847297668457. +[ Wed Sep 14 17:51:38 2022 ] Top1: 64.32% +[ Wed Sep 14 17:51:38 2022 ] Top5: 89.43% +[ Wed Sep 14 17:51:38 2022 ] Training epoch: 126 +[ Wed Sep 14 17:52:00 2022 ] Batch(24/123) done. Loss: 0.0080 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 17:53:12 2022 ] Eval epoch: 126 +[ Wed Sep 14 17:53:44 2022 ] Mean test loss of 258 batches: 1.8445631265640259. +[ Wed Sep 14 17:53:44 2022 ] Top1: 63.87% +[ Wed Sep 14 17:53:44 2022 ] Top5: 89.10% +[ Wed Sep 14 17:53:44 2022 ] Training epoch: 127 +[ Wed Sep 14 17:53:49 2022 ] Batch(1/123) done. Loss: 0.0095 lr:0.000100 network_time: 0.0316 +[ Wed Sep 14 17:55:02 2022 ] Batch(101/123) done. Loss: 0.0048 lr:0.000100 network_time: 0.0280 +[ Wed Sep 14 17:55:17 2022 ] Eval epoch: 127 +[ Wed Sep 14 17:55:50 2022 ] Mean test loss of 258 batches: 1.853245496749878. +[ Wed Sep 14 17:55:50 2022 ] Top1: 63.81% +[ Wed Sep 14 17:55:50 2022 ] Top5: 89.00% +[ Wed Sep 14 17:55:50 2022 ] Training epoch: 128 +[ Wed Sep 14 17:56:51 2022 ] Batch(78/123) done. Loss: 0.0021 lr:0.000100 network_time: 0.0309 +[ Wed Sep 14 17:57:23 2022 ] Eval epoch: 128 +[ Wed Sep 14 17:57:55 2022 ] Mean test loss of 258 batches: 1.8543254137039185. +[ Wed Sep 14 17:57:55 2022 ] Top1: 63.38% +[ Wed Sep 14 17:57:56 2022 ] Top5: 89.08% +[ Wed Sep 14 17:57:56 2022 ] Training epoch: 129 +[ Wed Sep 14 17:58:40 2022 ] Batch(55/123) done. Loss: 0.0053 lr:0.000100 network_time: 0.0322 +[ Wed Sep 14 17:59:29 2022 ] Eval epoch: 129 +[ Wed Sep 14 18:00:01 2022 ] Mean test loss of 258 batches: 1.8299524784088135. +[ Wed Sep 14 18:00:01 2022 ] Top1: 63.84% +[ Wed Sep 14 18:00:01 2022 ] Top5: 89.05% +[ Wed Sep 14 18:00:01 2022 ] Training epoch: 130 +[ Wed Sep 14 18:00:28 2022 ] Batch(32/123) done. Loss: 0.0081 lr:0.000100 network_time: 0.0266 +[ Wed Sep 14 18:01:34 2022 ] Eval epoch: 130 +[ Wed Sep 14 18:02:07 2022 ] Mean test loss of 258 batches: 1.827720046043396. +[ Wed Sep 14 18:02:07 2022 ] Top1: 63.86% +[ Wed Sep 14 18:02:07 2022 ] Top5: 89.22% +[ Wed Sep 14 18:02:07 2022 ] Training epoch: 131 +[ Wed Sep 14 18:02:18 2022 ] Batch(9/123) done. Loss: 0.0106 lr:0.000100 network_time: 0.0325 +[ Wed Sep 14 18:03:31 2022 ] Batch(109/123) done. Loss: 0.0237 lr:0.000100 network_time: 0.0271 +[ Wed Sep 14 18:03:40 2022 ] Eval epoch: 131 +[ Wed Sep 14 18:04:13 2022 ] Mean test loss of 258 batches: 1.862596869468689. +[ Wed Sep 14 18:04:13 2022 ] Top1: 63.49% +[ Wed Sep 14 18:04:13 2022 ] Top5: 88.98% +[ Wed Sep 14 18:04:13 2022 ] Training epoch: 132 +[ Wed Sep 14 18:05:19 2022 ] Batch(86/123) done. Loss: 0.0090 lr:0.000100 network_time: 0.0276 +[ Wed Sep 14 18:05:46 2022 ] Eval epoch: 132 +[ Wed Sep 14 18:06:18 2022 ] Mean test loss of 258 batches: 1.8246906995773315. +[ Wed Sep 14 18:06:18 2022 ] Top1: 63.55% +[ Wed Sep 14 18:06:18 2022 ] Top5: 89.17% +[ Wed Sep 14 18:06:18 2022 ] Training epoch: 133 +[ Wed Sep 14 18:07:08 2022 ] Batch(63/123) done. Loss: 0.0128 lr:0.000100 network_time: 0.0278 +[ Wed Sep 14 18:07:52 2022 ] Eval epoch: 133 +[ Wed Sep 14 18:08:24 2022 ] Mean test loss of 258 batches: 1.843893051147461. +[ Wed Sep 14 18:08:24 2022 ] Top1: 63.65% +[ Wed Sep 14 18:08:24 2022 ] Top5: 88.95% +[ Wed Sep 14 18:08:24 2022 ] Training epoch: 134 +[ Wed Sep 14 18:08:57 2022 ] Batch(40/123) done. Loss: 0.0098 lr:0.000100 network_time: 0.0323 +[ Wed Sep 14 18:09:57 2022 ] Eval epoch: 134 +[ Wed Sep 14 18:10:30 2022 ] Mean test loss of 258 batches: 1.8185917139053345. +[ Wed Sep 14 18:10:30 2022 ] Top1: 63.82% +[ Wed Sep 14 18:10:30 2022 ] Top5: 89.31% +[ Wed Sep 14 18:10:30 2022 ] Training epoch: 135 +[ Wed Sep 14 18:10:46 2022 ] Batch(17/123) done. Loss: 0.0097 lr:0.000100 network_time: 0.0330 +[ Wed Sep 14 18:11:59 2022 ] Batch(117/123) done. Loss: 0.0066 lr:0.000100 network_time: 0.0273 +[ Wed Sep 14 18:12:03 2022 ] Eval epoch: 135 +[ Wed Sep 14 18:12:36 2022 ] Mean test loss of 258 batches: 1.8323661088943481. +[ Wed Sep 14 18:12:36 2022 ] Top1: 63.80% +[ Wed Sep 14 18:12:36 2022 ] Top5: 89.21% +[ Wed Sep 14 18:12:36 2022 ] Training epoch: 136 +[ Wed Sep 14 18:13:49 2022 ] Batch(94/123) done. Loss: 0.0069 lr:0.000100 network_time: 0.0274 +[ Wed Sep 14 18:14:09 2022 ] Eval epoch: 136 +[ Wed Sep 14 18:14:42 2022 ] Mean test loss of 258 batches: 1.8389002084732056. +[ Wed Sep 14 18:14:42 2022 ] Top1: 63.81% +[ Wed Sep 14 18:14:42 2022 ] Top5: 89.14% +[ Wed Sep 14 18:14:42 2022 ] Training epoch: 137 +[ Wed Sep 14 18:15:37 2022 ] Batch(71/123) done. Loss: 0.0042 lr:0.000100 network_time: 0.0273 +[ Wed Sep 14 18:16:15 2022 ] Eval epoch: 137 +[ Wed Sep 14 18:16:47 2022 ] Mean test loss of 258 batches: 1.789794683456421. +[ Wed Sep 14 18:16:47 2022 ] Top1: 63.95% +[ Wed Sep 14 18:16:47 2022 ] Top5: 89.40% +[ Wed Sep 14 18:16:47 2022 ] Training epoch: 138 +[ Wed Sep 14 18:17:26 2022 ] Batch(48/123) done. Loss: 0.0349 lr:0.000100 network_time: 0.0269 +[ Wed Sep 14 18:18:20 2022 ] Eval epoch: 138 +[ Wed Sep 14 18:18:53 2022 ] Mean test loss of 258 batches: 1.8242590427398682. +[ Wed Sep 14 18:18:53 2022 ] Top1: 63.83% +[ Wed Sep 14 18:18:53 2022 ] Top5: 89.13% +[ Wed Sep 14 18:18:53 2022 ] Training epoch: 139 +[ Wed Sep 14 18:19:15 2022 ] Batch(25/123) done. Loss: 0.0128 lr:0.000100 network_time: 0.0299 +[ Wed Sep 14 18:20:26 2022 ] Eval epoch: 139 +[ Wed Sep 14 18:20:59 2022 ] Mean test loss of 258 batches: 1.815767526626587. +[ Wed Sep 14 18:20:59 2022 ] Top1: 63.94% +[ Wed Sep 14 18:20:59 2022 ] Top5: 89.37% +[ Wed Sep 14 18:20:59 2022 ] Training epoch: 140 +[ Wed Sep 14 18:21:04 2022 ] Batch(2/123) done. Loss: 0.0084 lr:0.000100 network_time: 0.0263 +[ Wed Sep 14 18:22:17 2022 ] Batch(102/123) done. Loss: 0.0049 lr:0.000100 network_time: 0.0259 +[ Wed Sep 14 18:22:32 2022 ] Eval epoch: 140 +[ Wed Sep 14 18:23:05 2022 ] Mean test loss of 258 batches: 1.8298001289367676. +[ Wed Sep 14 18:23:05 2022 ] Top1: 63.69% +[ Wed Sep 14 18:23:05 2022 ] Top5: 89.31% diff --git a/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xsub/ntu_ShiftGCN_joint_xsub/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..045cbeaa47391d2de3325e60e5eae34b471368a1 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_bone_motion_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_bone_motion.yaml +device: +- 2 +- 3 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_bone_motion_xview +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_bone_motion_xview diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..1eaef10c6a0428090c6605f2614a6acee18d4c9e --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:442f8022455e68553c84d6873a7ba2458ea90e3fe15beec0070d7d45b01ef029 +size 5718404 diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/log.txt b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..3af9e4edce329cc860db193cba6c8c102762d4fb --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 09:03:57 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_bone_motion_xview', 'model_saved_name': './save_models/ntu_ShiftGCN_bone_motion_xview', 'Experiment_name': 'ntu_ShiftGCN_bone_motion_xview', 'config': './config/nturgbd-cross-view/train_bone_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [2, 3], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 09:03:57 2022 ] Training epoch: 1 +[ Thu Sep 15 09:05:15 2022 ] Batch(99/123) done. Loss: 2.6131 lr:0.100000 network_time: 0.0251 +[ Thu Sep 15 09:05:32 2022 ] Eval epoch: 1 +[ Thu Sep 15 09:06:09 2022 ] Mean test loss of 296 batches: 6.484724998474121. +[ Thu Sep 15 09:06:09 2022 ] Top1: 11.21% +[ Thu Sep 15 09:06:09 2022 ] Top5: 35.48% +[ Thu Sep 15 09:06:09 2022 ] Training epoch: 2 +[ Thu Sep 15 09:07:08 2022 ] Batch(76/123) done. Loss: 2.1897 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:07:42 2022 ] Eval epoch: 2 +[ Thu Sep 15 09:08:19 2022 ] Mean test loss of 296 batches: 4.131118297576904. +[ Thu Sep 15 09:08:19 2022 ] Top1: 21.02% +[ Thu Sep 15 09:08:20 2022 ] Top5: 47.50% +[ Thu Sep 15 09:08:20 2022 ] Training epoch: 3 +[ Thu Sep 15 09:09:02 2022 ] Batch(53/123) done. Loss: 2.3481 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 09:09:52 2022 ] Eval epoch: 3 +[ Thu Sep 15 09:10:30 2022 ] Mean test loss of 296 batches: 3.105668783187866. +[ Thu Sep 15 09:10:30 2022 ] Top1: 28.74% +[ Thu Sep 15 09:10:30 2022 ] Top5: 61.51% +[ Thu Sep 15 09:10:30 2022 ] Training epoch: 4 +[ Thu Sep 15 09:10:55 2022 ] Batch(30/123) done. Loss: 1.0917 lr:0.100000 network_time: 0.0334 +[ Thu Sep 15 09:12:02 2022 ] Eval epoch: 4 +[ Thu Sep 15 09:12:39 2022 ] Mean test loss of 296 batches: 3.0978710651397705. +[ Thu Sep 15 09:12:39 2022 ] Top1: 30.22% +[ Thu Sep 15 09:12:39 2022 ] Top5: 63.70% +[ Thu Sep 15 09:12:40 2022 ] Training epoch: 5 +[ Thu Sep 15 09:12:48 2022 ] Batch(7/123) done. Loss: 1.7507 lr:0.100000 network_time: 0.0300 +[ Thu Sep 15 09:14:01 2022 ] Batch(107/123) done. Loss: 1.0336 lr:0.100000 network_time: 0.0251 +[ Thu Sep 15 09:14:12 2022 ] Eval epoch: 5 +[ Thu Sep 15 09:14:49 2022 ] Mean test loss of 296 batches: 2.619866371154785. +[ Thu Sep 15 09:14:49 2022 ] Top1: 34.71% +[ Thu Sep 15 09:14:49 2022 ] Top5: 73.11% +[ Thu Sep 15 09:14:50 2022 ] Training epoch: 6 +[ Thu Sep 15 09:15:54 2022 ] Batch(84/123) done. Loss: 1.3840 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 09:16:22 2022 ] Eval epoch: 6 +[ Thu Sep 15 09:16:59 2022 ] Mean test loss of 296 batches: 2.070925712585449. +[ Thu Sep 15 09:16:59 2022 ] Top1: 42.54% +[ Thu Sep 15 09:16:59 2022 ] Top5: 80.90% +[ Thu Sep 15 09:16:59 2022 ] Training epoch: 7 +[ Thu Sep 15 09:17:47 2022 ] Batch(61/123) done. Loss: 0.8255 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 09:18:32 2022 ] Eval epoch: 7 +[ Thu Sep 15 09:19:09 2022 ] Mean test loss of 296 batches: 2.4881949424743652. +[ Thu Sep 15 09:19:09 2022 ] Top1: 40.03% +[ Thu Sep 15 09:19:09 2022 ] Top5: 77.36% +[ Thu Sep 15 09:19:09 2022 ] Training epoch: 8 +[ Thu Sep 15 09:19:41 2022 ] Batch(38/123) done. Loss: 1.1871 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:20:42 2022 ] Eval epoch: 8 +[ Thu Sep 15 09:21:19 2022 ] Mean test loss of 296 batches: 2.448838949203491. +[ Thu Sep 15 09:21:19 2022 ] Top1: 37.61% +[ Thu Sep 15 09:21:19 2022 ] Top5: 77.34% +[ Thu Sep 15 09:21:19 2022 ] Training epoch: 9 +[ Thu Sep 15 09:21:34 2022 ] Batch(15/123) done. Loss: 0.8302 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 09:22:46 2022 ] Batch(115/123) done. Loss: 1.0505 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:22:52 2022 ] Eval epoch: 9 +[ Thu Sep 15 09:23:29 2022 ] Mean test loss of 296 batches: 2.613361358642578. +[ Thu Sep 15 09:23:29 2022 ] Top1: 40.27% +[ Thu Sep 15 09:23:29 2022 ] Top5: 80.23% +[ Thu Sep 15 09:23:29 2022 ] Training epoch: 10 +[ Thu Sep 15 09:24:39 2022 ] Batch(92/123) done. Loss: 0.7750 lr:0.100000 network_time: 0.0461 +[ Thu Sep 15 09:25:01 2022 ] Eval epoch: 10 +[ Thu Sep 15 09:25:39 2022 ] Mean test loss of 296 batches: 3.1923351287841797. +[ Thu Sep 15 09:25:39 2022 ] Top1: 36.97% +[ Thu Sep 15 09:25:39 2022 ] Top5: 69.45% +[ Thu Sep 15 09:25:39 2022 ] Training epoch: 11 +[ Thu Sep 15 09:26:33 2022 ] Batch(69/123) done. Loss: 0.6639 lr:0.100000 network_time: 0.0260 +[ Thu Sep 15 09:27:12 2022 ] Eval epoch: 11 +[ Thu Sep 15 09:27:48 2022 ] Mean test loss of 296 batches: 2.489170789718628. +[ Thu Sep 15 09:27:49 2022 ] Top1: 41.99% +[ Thu Sep 15 09:27:49 2022 ] Top5: 78.26% +[ Thu Sep 15 09:27:49 2022 ] Training epoch: 12 +[ Thu Sep 15 09:28:26 2022 ] Batch(46/123) done. Loss: 0.6471 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 09:29:21 2022 ] Eval epoch: 12 +[ Thu Sep 15 09:29:59 2022 ] Mean test loss of 296 batches: 2.168633460998535. +[ Thu Sep 15 09:29:59 2022 ] Top1: 48.05% +[ Thu Sep 15 09:29:59 2022 ] Top5: 85.01% +[ Thu Sep 15 09:29:59 2022 ] Training epoch: 13 +[ Thu Sep 15 09:30:20 2022 ] Batch(23/123) done. Loss: 0.8013 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 09:31:31 2022 ] Eval epoch: 13 +[ Thu Sep 15 09:32:08 2022 ] Mean test loss of 296 batches: 2.487778902053833. +[ Thu Sep 15 09:32:09 2022 ] Top1: 43.59% +[ Thu Sep 15 09:32:09 2022 ] Top5: 80.48% +[ Thu Sep 15 09:32:09 2022 ] Training epoch: 14 +[ Thu Sep 15 09:32:13 2022 ] Batch(0/123) done. Loss: 0.4309 lr:0.100000 network_time: 0.0530 +[ Thu Sep 15 09:33:25 2022 ] Batch(100/123) done. Loss: 0.7143 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 09:33:42 2022 ] Eval epoch: 14 +[ Thu Sep 15 09:34:19 2022 ] Mean test loss of 296 batches: 1.8329846858978271. +[ Thu Sep 15 09:34:19 2022 ] Top1: 55.72% +[ Thu Sep 15 09:34:19 2022 ] Top5: 86.46% +[ Thu Sep 15 09:34:19 2022 ] Training epoch: 15 +[ Thu Sep 15 09:35:19 2022 ] Batch(77/123) done. Loss: 0.6663 lr:0.100000 network_time: 0.0354 +[ Thu Sep 15 09:35:52 2022 ] Eval epoch: 15 +[ Thu Sep 15 09:36:29 2022 ] Mean test loss of 296 batches: 2.190173387527466. +[ Thu Sep 15 09:36:29 2022 ] Top1: 49.88% +[ Thu Sep 15 09:36:29 2022 ] Top5: 81.67% +[ Thu Sep 15 09:36:29 2022 ] Training epoch: 16 +[ Thu Sep 15 09:37:12 2022 ] Batch(54/123) done. Loss: 0.6985 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 09:38:02 2022 ] Eval epoch: 16 +[ Thu Sep 15 09:38:39 2022 ] Mean test loss of 296 batches: 3.664393663406372. +[ Thu Sep 15 09:38:39 2022 ] Top1: 35.97% +[ Thu Sep 15 09:38:39 2022 ] Top5: 72.74% +[ Thu Sep 15 09:38:39 2022 ] Training epoch: 17 +[ Thu Sep 15 09:39:06 2022 ] Batch(31/123) done. Loss: 0.4079 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 09:40:12 2022 ] Eval epoch: 17 +[ Thu Sep 15 09:40:49 2022 ] Mean test loss of 296 batches: 2.4024436473846436. +[ Thu Sep 15 09:40:49 2022 ] Top1: 46.30% +[ Thu Sep 15 09:40:49 2022 ] Top5: 84.18% +[ Thu Sep 15 09:40:49 2022 ] Training epoch: 18 +[ Thu Sep 15 09:40:59 2022 ] Batch(8/123) done. Loss: 0.4557 lr:0.100000 network_time: 0.0354 +[ Thu Sep 15 09:42:12 2022 ] Batch(108/123) done. Loss: 0.5491 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:42:22 2022 ] Eval epoch: 18 +[ Thu Sep 15 09:42:59 2022 ] Mean test loss of 296 batches: 2.2452008724212646. +[ Thu Sep 15 09:42:59 2022 ] Top1: 50.81% +[ Thu Sep 15 09:42:59 2022 ] Top5: 85.67% +[ Thu Sep 15 09:42:59 2022 ] Training epoch: 19 +[ Thu Sep 15 09:44:05 2022 ] Batch(85/123) done. Loss: 0.5944 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:44:32 2022 ] Eval epoch: 19 +[ Thu Sep 15 09:45:10 2022 ] Mean test loss of 296 batches: 2.6345906257629395. +[ Thu Sep 15 09:45:10 2022 ] Top1: 46.21% +[ Thu Sep 15 09:45:10 2022 ] Top5: 81.68% +[ Thu Sep 15 09:45:10 2022 ] Training epoch: 20 +[ Thu Sep 15 09:45:59 2022 ] Batch(62/123) done. Loss: 0.4808 lr:0.100000 network_time: 0.0317 +[ Thu Sep 15 09:46:43 2022 ] Eval epoch: 20 +[ Thu Sep 15 09:47:20 2022 ] Mean test loss of 296 batches: 2.1439366340637207. +[ Thu Sep 15 09:47:20 2022 ] Top1: 48.37% +[ Thu Sep 15 09:47:20 2022 ] Top5: 83.58% +[ Thu Sep 15 09:47:20 2022 ] Training epoch: 21 +[ Thu Sep 15 09:47:53 2022 ] Batch(39/123) done. Loss: 0.4347 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 09:48:53 2022 ] Eval epoch: 21 +[ Thu Sep 15 09:49:30 2022 ] Mean test loss of 296 batches: 2.1287572383880615. +[ Thu Sep 15 09:49:30 2022 ] Top1: 51.27% +[ Thu Sep 15 09:49:30 2022 ] Top5: 87.12% +[ Thu Sep 15 09:49:30 2022 ] Training epoch: 22 +[ Thu Sep 15 09:49:46 2022 ] Batch(16/123) done. Loss: 0.4875 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 09:50:58 2022 ] Batch(116/123) done. Loss: 0.6139 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 09:51:03 2022 ] Eval epoch: 22 +[ Thu Sep 15 09:51:40 2022 ] Mean test loss of 296 batches: 3.064183473587036. +[ Thu Sep 15 09:51:40 2022 ] Top1: 41.69% +[ Thu Sep 15 09:51:40 2022 ] Top5: 72.06% +[ Thu Sep 15 09:51:40 2022 ] Training epoch: 23 +[ Thu Sep 15 09:52:52 2022 ] Batch(93/123) done. Loss: 0.2867 lr:0.100000 network_time: 0.0262 +[ Thu Sep 15 09:53:13 2022 ] Eval epoch: 23 +[ Thu Sep 15 09:53:50 2022 ] Mean test loss of 296 batches: 2.9143168926239014. +[ Thu Sep 15 09:53:50 2022 ] Top1: 45.10% +[ Thu Sep 15 09:53:50 2022 ] Top5: 77.17% +[ Thu Sep 15 09:53:51 2022 ] Training epoch: 24 +[ Thu Sep 15 09:54:46 2022 ] Batch(70/123) done. Loss: 0.2261 lr:0.100000 network_time: 0.0319 +[ Thu Sep 15 09:55:24 2022 ] Eval epoch: 24 +[ Thu Sep 15 09:56:01 2022 ] Mean test loss of 296 batches: 2.717808246612549. +[ Thu Sep 15 09:56:01 2022 ] Top1: 46.10% +[ Thu Sep 15 09:56:01 2022 ] Top5: 79.62% +[ Thu Sep 15 09:56:01 2022 ] Training epoch: 25 +[ Thu Sep 15 09:56:40 2022 ] Batch(47/123) done. Loss: 0.3114 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 09:57:34 2022 ] Eval epoch: 25 +[ Thu Sep 15 09:58:12 2022 ] Mean test loss of 296 batches: 2.2825570106506348. +[ Thu Sep 15 09:58:12 2022 ] Top1: 49.15% +[ Thu Sep 15 09:58:12 2022 ] Top5: 84.54% +[ Thu Sep 15 09:58:12 2022 ] Training epoch: 26 +[ Thu Sep 15 09:58:34 2022 ] Batch(24/123) done. Loss: 0.2909 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 09:59:45 2022 ] Eval epoch: 26 +[ Thu Sep 15 10:00:23 2022 ] Mean test loss of 296 batches: 2.4419898986816406. +[ Thu Sep 15 10:00:23 2022 ] Top1: 49.13% +[ Thu Sep 15 10:00:23 2022 ] Top5: 83.83% +[ Thu Sep 15 10:00:23 2022 ] Training epoch: 27 +[ Thu Sep 15 10:00:28 2022 ] Batch(1/123) done. Loss: 0.2238 lr:0.100000 network_time: 0.0228 +[ Thu Sep 15 10:01:40 2022 ] Batch(101/123) done. Loss: 0.3946 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:01:56 2022 ] Eval epoch: 27 +[ Thu Sep 15 10:02:33 2022 ] Mean test loss of 296 batches: 1.6155883073806763. +[ Thu Sep 15 10:02:34 2022 ] Top1: 60.64% +[ Thu Sep 15 10:02:34 2022 ] Top5: 91.22% +[ Thu Sep 15 10:02:34 2022 ] Training epoch: 28 +[ Thu Sep 15 10:03:34 2022 ] Batch(78/123) done. Loss: 0.2941 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 10:04:06 2022 ] Eval epoch: 28 +[ Thu Sep 15 10:04:44 2022 ] Mean test loss of 296 batches: 1.4993915557861328. +[ Thu Sep 15 10:04:44 2022 ] Top1: 62.62% +[ Thu Sep 15 10:04:44 2022 ] Top5: 90.71% +[ Thu Sep 15 10:04:44 2022 ] Training epoch: 29 +[ Thu Sep 15 10:05:28 2022 ] Batch(55/123) done. Loss: 0.1900 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:06:17 2022 ] Eval epoch: 29 +[ Thu Sep 15 10:06:54 2022 ] Mean test loss of 296 batches: 3.2341105937957764. +[ Thu Sep 15 10:06:54 2022 ] Top1: 42.87% +[ Thu Sep 15 10:06:54 2022 ] Top5: 79.10% +[ Thu Sep 15 10:06:54 2022 ] Training epoch: 30 +[ Thu Sep 15 10:07:22 2022 ] Batch(32/123) done. Loss: 0.1860 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 10:08:27 2022 ] Eval epoch: 30 +[ Thu Sep 15 10:09:04 2022 ] Mean test loss of 296 batches: 2.356532573699951. +[ Thu Sep 15 10:09:04 2022 ] Top1: 53.23% +[ Thu Sep 15 10:09:04 2022 ] Top5: 87.02% +[ Thu Sep 15 10:09:05 2022 ] Training epoch: 31 +[ Thu Sep 15 10:09:15 2022 ] Batch(9/123) done. Loss: 0.2974 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 10:10:28 2022 ] Batch(109/123) done. Loss: 0.3141 lr:0.100000 network_time: 0.0280 +[ Thu Sep 15 10:10:37 2022 ] Eval epoch: 31 +[ Thu Sep 15 10:11:15 2022 ] Mean test loss of 296 batches: 1.9594652652740479. +[ Thu Sep 15 10:11:15 2022 ] Top1: 59.25% +[ Thu Sep 15 10:11:15 2022 ] Top5: 89.41% +[ Thu Sep 15 10:11:15 2022 ] Training epoch: 32 +[ Thu Sep 15 10:12:21 2022 ] Batch(86/123) done. Loss: 0.4962 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 10:12:47 2022 ] Eval epoch: 32 +[ Thu Sep 15 10:13:25 2022 ] Mean test loss of 296 batches: 2.337282657623291. +[ Thu Sep 15 10:13:25 2022 ] Top1: 56.34% +[ Thu Sep 15 10:13:25 2022 ] Top5: 85.71% +[ Thu Sep 15 10:13:25 2022 ] Training epoch: 33 +[ Thu Sep 15 10:14:15 2022 ] Batch(63/123) done. Loss: 0.2910 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:14:58 2022 ] Eval epoch: 33 +[ Thu Sep 15 10:15:35 2022 ] Mean test loss of 296 batches: 2.1571309566497803. +[ Thu Sep 15 10:15:35 2022 ] Top1: 54.95% +[ Thu Sep 15 10:15:35 2022 ] Top5: 86.98% +[ Thu Sep 15 10:15:35 2022 ] Training epoch: 34 +[ Thu Sep 15 10:16:08 2022 ] Batch(40/123) done. Loss: 0.2236 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 10:17:08 2022 ] Eval epoch: 34 +[ Thu Sep 15 10:17:45 2022 ] Mean test loss of 296 batches: 2.1891050338745117. +[ Thu Sep 15 10:17:45 2022 ] Top1: 53.70% +[ Thu Sep 15 10:17:45 2022 ] Top5: 86.30% +[ Thu Sep 15 10:17:46 2022 ] Training epoch: 35 +[ Thu Sep 15 10:18:02 2022 ] Batch(17/123) done. Loss: 0.2902 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:19:14 2022 ] Batch(117/123) done. Loss: 0.3418 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 10:19:18 2022 ] Eval epoch: 35 +[ Thu Sep 15 10:19:55 2022 ] Mean test loss of 296 batches: 2.0238428115844727. +[ Thu Sep 15 10:19:55 2022 ] Top1: 55.77% +[ Thu Sep 15 10:19:55 2022 ] Top5: 88.44% +[ Thu Sep 15 10:19:55 2022 ] Training epoch: 36 +[ Thu Sep 15 10:21:07 2022 ] Batch(94/123) done. Loss: 0.1522 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 10:21:28 2022 ] Eval epoch: 36 +[ Thu Sep 15 10:22:05 2022 ] Mean test loss of 296 batches: 4.892279624938965. +[ Thu Sep 15 10:22:05 2022 ] Top1: 29.76% +[ Thu Sep 15 10:22:06 2022 ] Top5: 57.60% +[ Thu Sep 15 10:22:06 2022 ] Training epoch: 37 +[ Thu Sep 15 10:23:01 2022 ] Batch(71/123) done. Loss: 0.1705 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:23:39 2022 ] Eval epoch: 37 +[ Thu Sep 15 10:24:16 2022 ] Mean test loss of 296 batches: 2.313214063644409. +[ Thu Sep 15 10:24:16 2022 ] Top1: 56.13% +[ Thu Sep 15 10:24:16 2022 ] Top5: 88.47% +[ Thu Sep 15 10:24:16 2022 ] Training epoch: 38 +[ Thu Sep 15 10:24:55 2022 ] Batch(48/123) done. Loss: 0.3477 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 10:25:49 2022 ] Eval epoch: 38 +[ Thu Sep 15 10:26:26 2022 ] Mean test loss of 296 batches: 2.116729736328125. +[ Thu Sep 15 10:26:26 2022 ] Top1: 57.32% +[ Thu Sep 15 10:26:26 2022 ] Top5: 87.82% +[ Thu Sep 15 10:26:26 2022 ] Training epoch: 39 +[ Thu Sep 15 10:26:48 2022 ] Batch(25/123) done. Loss: 0.1691 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 10:27:59 2022 ] Eval epoch: 39 +[ Thu Sep 15 10:28:37 2022 ] Mean test loss of 296 batches: 3.156540632247925. +[ Thu Sep 15 10:28:37 2022 ] Top1: 46.70% +[ Thu Sep 15 10:28:37 2022 ] Top5: 81.56% +[ Thu Sep 15 10:28:37 2022 ] Training epoch: 40 +[ Thu Sep 15 10:28:43 2022 ] Batch(2/123) done. Loss: 0.2628 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:29:55 2022 ] Batch(102/123) done. Loss: 0.2880 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 10:30:10 2022 ] Eval epoch: 40 +[ Thu Sep 15 10:30:47 2022 ] Mean test loss of 296 batches: 3.3064286708831787. +[ Thu Sep 15 10:30:48 2022 ] Top1: 46.53% +[ Thu Sep 15 10:30:48 2022 ] Top5: 79.17% +[ Thu Sep 15 10:30:48 2022 ] Training epoch: 41 +[ Thu Sep 15 10:31:49 2022 ] Batch(79/123) done. Loss: 0.3381 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 10:32:20 2022 ] Eval epoch: 41 +[ Thu Sep 15 10:32:57 2022 ] Mean test loss of 296 batches: 2.0563619136810303. +[ Thu Sep 15 10:32:57 2022 ] Top1: 56.81% +[ Thu Sep 15 10:32:57 2022 ] Top5: 87.28% +[ Thu Sep 15 10:32:57 2022 ] Training epoch: 42 +[ Thu Sep 15 10:33:42 2022 ] Batch(56/123) done. Loss: 0.6265 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:34:30 2022 ] Eval epoch: 42 +[ Thu Sep 15 10:35:07 2022 ] Mean test loss of 296 batches: 2.2891159057617188. +[ Thu Sep 15 10:35:07 2022 ] Top1: 53.65% +[ Thu Sep 15 10:35:07 2022 ] Top5: 86.78% +[ Thu Sep 15 10:35:07 2022 ] Training epoch: 43 +[ Thu Sep 15 10:35:35 2022 ] Batch(33/123) done. Loss: 0.0740 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 10:36:40 2022 ] Eval epoch: 43 +[ Thu Sep 15 10:37:17 2022 ] Mean test loss of 296 batches: 2.506985664367676. +[ Thu Sep 15 10:37:17 2022 ] Top1: 52.06% +[ Thu Sep 15 10:37:17 2022 ] Top5: 83.70% +[ Thu Sep 15 10:37:17 2022 ] Training epoch: 44 +[ Thu Sep 15 10:37:28 2022 ] Batch(10/123) done. Loss: 0.1948 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 10:38:41 2022 ] Batch(110/123) done. Loss: 0.1420 lr:0.100000 network_time: 0.0311 +[ Thu Sep 15 10:38:50 2022 ] Eval epoch: 44 +[ Thu Sep 15 10:39:27 2022 ] Mean test loss of 296 batches: 2.3365886211395264. +[ Thu Sep 15 10:39:27 2022 ] Top1: 54.13% +[ Thu Sep 15 10:39:27 2022 ] Top5: 86.19% +[ Thu Sep 15 10:39:27 2022 ] Training epoch: 45 +[ Thu Sep 15 10:40:34 2022 ] Batch(87/123) done. Loss: 0.1868 lr:0.100000 network_time: 0.0317 +[ Thu Sep 15 10:40:59 2022 ] Eval epoch: 45 +[ Thu Sep 15 10:41:36 2022 ] Mean test loss of 296 batches: 1.9444831609725952. +[ Thu Sep 15 10:41:37 2022 ] Top1: 56.08% +[ Thu Sep 15 10:41:37 2022 ] Top5: 88.44% +[ Thu Sep 15 10:41:37 2022 ] Training epoch: 46 +[ Thu Sep 15 10:42:27 2022 ] Batch(64/123) done. Loss: 0.1241 lr:0.100000 network_time: 0.0432 +[ Thu Sep 15 10:43:09 2022 ] Eval epoch: 46 +[ Thu Sep 15 10:43:46 2022 ] Mean test loss of 296 batches: 2.809426784515381. +[ Thu Sep 15 10:43:47 2022 ] Top1: 50.92% +[ Thu Sep 15 10:43:47 2022 ] Top5: 85.25% +[ Thu Sep 15 10:43:47 2022 ] Training epoch: 47 +[ Thu Sep 15 10:44:20 2022 ] Batch(41/123) done. Loss: 0.1681 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 10:45:19 2022 ] Eval epoch: 47 +[ Thu Sep 15 10:45:56 2022 ] Mean test loss of 296 batches: 1.7866301536560059. +[ Thu Sep 15 10:45:56 2022 ] Top1: 58.79% +[ Thu Sep 15 10:45:56 2022 ] Top5: 89.96% +[ Thu Sep 15 10:45:56 2022 ] Training epoch: 48 +[ Thu Sep 15 10:46:13 2022 ] Batch(18/123) done. Loss: 0.1326 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:47:26 2022 ] Batch(118/123) done. Loss: 0.1227 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:47:29 2022 ] Eval epoch: 48 +[ Thu Sep 15 10:48:06 2022 ] Mean test loss of 296 batches: 2.019131898880005. +[ Thu Sep 15 10:48:06 2022 ] Top1: 57.35% +[ Thu Sep 15 10:48:06 2022 ] Top5: 87.56% +[ Thu Sep 15 10:48:06 2022 ] Training epoch: 49 +[ Thu Sep 15 10:49:19 2022 ] Batch(95/123) done. Loss: 0.0989 lr:0.100000 network_time: 0.0324 +[ Thu Sep 15 10:49:39 2022 ] Eval epoch: 49 +[ Thu Sep 15 10:50:16 2022 ] Mean test loss of 296 batches: 2.910382032394409. +[ Thu Sep 15 10:50:16 2022 ] Top1: 48.01% +[ Thu Sep 15 10:50:16 2022 ] Top5: 81.00% +[ Thu Sep 15 10:50:16 2022 ] Training epoch: 50 +[ Thu Sep 15 10:51:12 2022 ] Batch(72/123) done. Loss: 0.1728 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 10:51:49 2022 ] Eval epoch: 50 +[ Thu Sep 15 10:52:26 2022 ] Mean test loss of 296 batches: 2.7986414432525635. +[ Thu Sep 15 10:52:26 2022 ] Top1: 51.86% +[ Thu Sep 15 10:52:26 2022 ] Top5: 84.63% +[ Thu Sep 15 10:52:26 2022 ] Training epoch: 51 +[ Thu Sep 15 10:53:06 2022 ] Batch(49/123) done. Loss: 0.2222 lr:0.100000 network_time: 0.0280 +[ Thu Sep 15 10:53:59 2022 ] Eval epoch: 51 +[ Thu Sep 15 10:54:36 2022 ] Mean test loss of 296 batches: 1.8727854490280151. +[ Thu Sep 15 10:54:36 2022 ] Top1: 61.93% +[ Thu Sep 15 10:54:36 2022 ] Top5: 89.79% +[ Thu Sep 15 10:54:36 2022 ] Training epoch: 52 +[ Thu Sep 15 10:54:59 2022 ] Batch(26/123) done. Loss: 0.1402 lr:0.100000 network_time: 0.0316 +[ Thu Sep 15 10:56:09 2022 ] Eval epoch: 52 +[ Thu Sep 15 10:56:46 2022 ] Mean test loss of 296 batches: 2.0904793739318848. +[ Thu Sep 15 10:56:46 2022 ] Top1: 56.88% +[ Thu Sep 15 10:56:46 2022 ] Top5: 87.20% +[ Thu Sep 15 10:56:46 2022 ] Training epoch: 53 +[ Thu Sep 15 10:56:52 2022 ] Batch(3/123) done. Loss: 0.1607 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:58:04 2022 ] Batch(103/123) done. Loss: 0.2134 lr:0.100000 network_time: 0.0319 +[ Thu Sep 15 10:58:18 2022 ] Eval epoch: 53 +[ Thu Sep 15 10:58:55 2022 ] Mean test loss of 296 batches: 2.0244033336639404. +[ Thu Sep 15 10:58:55 2022 ] Top1: 60.21% +[ Thu Sep 15 10:58:56 2022 ] Top5: 88.27% +[ Thu Sep 15 10:58:56 2022 ] Training epoch: 54 +[ Thu Sep 15 10:59:58 2022 ] Batch(80/123) done. Loss: 0.0372 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 11:00:29 2022 ] Eval epoch: 54 +[ Thu Sep 15 11:01:06 2022 ] Mean test loss of 296 batches: 2.1244254112243652. +[ Thu Sep 15 11:01:06 2022 ] Top1: 57.17% +[ Thu Sep 15 11:01:06 2022 ] Top5: 88.00% +[ Thu Sep 15 11:01:06 2022 ] Training epoch: 55 +[ Thu Sep 15 11:01:51 2022 ] Batch(57/123) done. Loss: 0.1212 lr:0.100000 network_time: 0.0307 +[ Thu Sep 15 11:02:38 2022 ] Eval epoch: 55 +[ Thu Sep 15 11:03:16 2022 ] Mean test loss of 296 batches: 2.4335763454437256. +[ Thu Sep 15 11:03:16 2022 ] Top1: 53.61% +[ Thu Sep 15 11:03:16 2022 ] Top5: 87.17% +[ Thu Sep 15 11:03:16 2022 ] Training epoch: 56 +[ Thu Sep 15 11:03:44 2022 ] Batch(34/123) done. Loss: 0.1479 lr:0.100000 network_time: 0.0284 +[ Thu Sep 15 11:04:48 2022 ] Eval epoch: 56 +[ Thu Sep 15 11:05:25 2022 ] Mean test loss of 296 batches: 2.1516454219818115. +[ Thu Sep 15 11:05:26 2022 ] Top1: 57.51% +[ Thu Sep 15 11:05:26 2022 ] Top5: 88.57% +[ Thu Sep 15 11:05:26 2022 ] Training epoch: 57 +[ Thu Sep 15 11:05:37 2022 ] Batch(11/123) done. Loss: 0.1997 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 11:06:50 2022 ] Batch(111/123) done. Loss: 0.1157 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 11:06:58 2022 ] Eval epoch: 57 +[ Thu Sep 15 11:07:36 2022 ] Mean test loss of 296 batches: 3.3739845752716064. +[ Thu Sep 15 11:07:36 2022 ] Top1: 47.72% +[ Thu Sep 15 11:07:36 2022 ] Top5: 80.09% +[ Thu Sep 15 11:07:36 2022 ] Training epoch: 58 +[ Thu Sep 15 11:08:43 2022 ] Batch(88/123) done. Loss: 0.2178 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 11:09:08 2022 ] Eval epoch: 58 +[ Thu Sep 15 11:09:46 2022 ] Mean test loss of 296 batches: 2.0241432189941406. +[ Thu Sep 15 11:09:46 2022 ] Top1: 57.62% +[ Thu Sep 15 11:09:46 2022 ] Top5: 88.07% +[ Thu Sep 15 11:09:46 2022 ] Training epoch: 59 +[ Thu Sep 15 11:10:37 2022 ] Batch(65/123) done. Loss: 0.2124 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 11:11:18 2022 ] Eval epoch: 59 +[ Thu Sep 15 11:11:55 2022 ] Mean test loss of 296 batches: 3.121405839920044. +[ Thu Sep 15 11:11:56 2022 ] Top1: 48.69% +[ Thu Sep 15 11:11:56 2022 ] Top5: 82.21% +[ Thu Sep 15 11:11:56 2022 ] Training epoch: 60 +[ Thu Sep 15 11:12:30 2022 ] Batch(42/123) done. Loss: 0.1682 lr:0.100000 network_time: 0.0312 +[ Thu Sep 15 11:13:29 2022 ] Eval epoch: 60 +[ Thu Sep 15 11:14:06 2022 ] Mean test loss of 296 batches: 1.6957861185073853. +[ Thu Sep 15 11:14:06 2022 ] Top1: 64.11% +[ Thu Sep 15 11:14:06 2022 ] Top5: 92.02% +[ Thu Sep 15 11:14:06 2022 ] Training epoch: 61 +[ Thu Sep 15 11:14:24 2022 ] Batch(19/123) done. Loss: 0.1335 lr:0.010000 network_time: 0.0360 +[ Thu Sep 15 11:15:36 2022 ] Batch(119/123) done. Loss: 0.0373 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 11:15:39 2022 ] Eval epoch: 61 +[ Thu Sep 15 11:16:16 2022 ] Mean test loss of 296 batches: 1.3807268142700195. +[ Thu Sep 15 11:16:16 2022 ] Top1: 69.15% +[ Thu Sep 15 11:16:16 2022 ] Top5: 93.66% +[ Thu Sep 15 11:16:16 2022 ] Training epoch: 62 +[ Thu Sep 15 11:17:30 2022 ] Batch(96/123) done. Loss: 0.0451 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 11:17:49 2022 ] Eval epoch: 62 +[ Thu Sep 15 11:18:26 2022 ] Mean test loss of 296 batches: 1.3717727661132812. +[ Thu Sep 15 11:18:26 2022 ] Top1: 69.63% +[ Thu Sep 15 11:18:26 2022 ] Top5: 93.85% +[ Thu Sep 15 11:18:26 2022 ] Training epoch: 63 +[ Thu Sep 15 11:19:23 2022 ] Batch(73/123) done. Loss: 0.0145 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 11:19:59 2022 ] Eval epoch: 63 +[ Thu Sep 15 11:20:37 2022 ] Mean test loss of 296 batches: 1.375012755393982. +[ Thu Sep 15 11:20:37 2022 ] Top1: 69.79% +[ Thu Sep 15 11:20:37 2022 ] Top5: 94.03% +[ Thu Sep 15 11:20:37 2022 ] Training epoch: 64 +[ Thu Sep 15 11:21:17 2022 ] Batch(50/123) done. Loss: 0.0140 lr:0.010000 network_time: 0.0269 +[ Thu Sep 15 11:22:09 2022 ] Eval epoch: 64 +[ Thu Sep 15 11:22:47 2022 ] Mean test loss of 296 batches: 1.3713973760604858. +[ Thu Sep 15 11:22:47 2022 ] Top1: 69.93% +[ Thu Sep 15 11:22:47 2022 ] Top5: 93.86% +[ Thu Sep 15 11:22:47 2022 ] Training epoch: 65 +[ Thu Sep 15 11:23:11 2022 ] Batch(27/123) done. Loss: 0.0095 lr:0.010000 network_time: 0.0533 +[ Thu Sep 15 11:24:20 2022 ] Eval epoch: 65 +[ Thu Sep 15 11:24:57 2022 ] Mean test loss of 296 batches: 1.639125943183899. +[ Thu Sep 15 11:24:57 2022 ] Top1: 66.05% +[ Thu Sep 15 11:24:57 2022 ] Top5: 91.74% +[ Thu Sep 15 11:24:57 2022 ] Training epoch: 66 +[ Thu Sep 15 11:25:04 2022 ] Batch(4/123) done. Loss: 0.0070 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 11:26:17 2022 ] Batch(104/123) done. Loss: 0.0246 lr:0.010000 network_time: 0.0277 +[ Thu Sep 15 11:26:30 2022 ] Eval epoch: 66 +[ Thu Sep 15 11:27:07 2022 ] Mean test loss of 296 batches: 1.4183951616287231. +[ Thu Sep 15 11:27:07 2022 ] Top1: 69.30% +[ Thu Sep 15 11:27:08 2022 ] Top5: 93.72% +[ Thu Sep 15 11:27:08 2022 ] Training epoch: 67 +[ Thu Sep 15 11:28:10 2022 ] Batch(81/123) done. Loss: 0.0100 lr:0.010000 network_time: 0.0314 +[ Thu Sep 15 11:28:41 2022 ] Eval epoch: 67 +[ Thu Sep 15 11:29:18 2022 ] Mean test loss of 296 batches: 1.3433952331542969. +[ Thu Sep 15 11:29:18 2022 ] Top1: 70.55% +[ Thu Sep 15 11:29:18 2022 ] Top5: 94.08% +[ Thu Sep 15 11:29:18 2022 ] Training epoch: 68 +[ Thu Sep 15 11:30:04 2022 ] Batch(58/123) done. Loss: 0.0051 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 11:30:51 2022 ] Eval epoch: 68 +[ Thu Sep 15 11:31:28 2022 ] Mean test loss of 296 batches: 1.3850868940353394. +[ Thu Sep 15 11:31:28 2022 ] Top1: 70.08% +[ Thu Sep 15 11:31:28 2022 ] Top5: 93.92% +[ Thu Sep 15 11:31:28 2022 ] Training epoch: 69 +[ Thu Sep 15 11:31:58 2022 ] Batch(35/123) done. Loss: 0.0060 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 11:33:01 2022 ] Eval epoch: 69 +[ Thu Sep 15 11:33:38 2022 ] Mean test loss of 296 batches: 1.418121099472046. +[ Thu Sep 15 11:33:38 2022 ] Top1: 69.64% +[ Thu Sep 15 11:33:39 2022 ] Top5: 93.59% +[ Thu Sep 15 11:33:39 2022 ] Training epoch: 70 +[ Thu Sep 15 11:33:52 2022 ] Batch(12/123) done. Loss: 0.0073 lr:0.010000 network_time: 0.0369 +[ Thu Sep 15 11:35:04 2022 ] Batch(112/123) done. Loss: 0.0065 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 11:35:12 2022 ] Eval epoch: 70 +[ Thu Sep 15 11:35:49 2022 ] Mean test loss of 296 batches: 1.3829166889190674. +[ Thu Sep 15 11:35:49 2022 ] Top1: 70.24% +[ Thu Sep 15 11:35:49 2022 ] Top5: 93.92% +[ Thu Sep 15 11:35:49 2022 ] Training epoch: 71 +[ Thu Sep 15 11:36:58 2022 ] Batch(89/123) done. Loss: 0.0047 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 11:37:22 2022 ] Eval epoch: 71 +[ Thu Sep 15 11:38:00 2022 ] Mean test loss of 296 batches: 1.3762127161026. +[ Thu Sep 15 11:38:00 2022 ] Top1: 70.66% +[ Thu Sep 15 11:38:00 2022 ] Top5: 93.93% +[ Thu Sep 15 11:38:00 2022 ] Training epoch: 72 +[ Thu Sep 15 11:38:52 2022 ] Batch(66/123) done. Loss: 0.0044 lr:0.010000 network_time: 0.0310 +[ Thu Sep 15 11:39:33 2022 ] Eval epoch: 72 +[ Thu Sep 15 11:40:10 2022 ] Mean test loss of 296 batches: 1.4460725784301758. +[ Thu Sep 15 11:40:11 2022 ] Top1: 69.75% +[ Thu Sep 15 11:40:11 2022 ] Top5: 93.60% +[ Thu Sep 15 11:40:11 2022 ] Training epoch: 73 +[ Thu Sep 15 11:40:46 2022 ] Batch(43/123) done. Loss: 0.0057 lr:0.010000 network_time: 0.0315 +[ Thu Sep 15 11:41:43 2022 ] Eval epoch: 73 +[ Thu Sep 15 11:42:20 2022 ] Mean test loss of 296 batches: 1.4069595336914062. +[ Thu Sep 15 11:42:20 2022 ] Top1: 69.82% +[ Thu Sep 15 11:42:20 2022 ] Top5: 93.64% +[ Thu Sep 15 11:42:20 2022 ] Training epoch: 74 +[ Thu Sep 15 11:42:39 2022 ] Batch(20/123) done. Loss: 0.0044 lr:0.010000 network_time: 0.0320 +[ Thu Sep 15 11:43:51 2022 ] Batch(120/123) done. Loss: 0.0036 lr:0.010000 network_time: 0.0270 +[ Thu Sep 15 11:43:53 2022 ] Eval epoch: 74 +[ Thu Sep 15 11:44:30 2022 ] Mean test loss of 296 batches: 1.4463022947311401. +[ Thu Sep 15 11:44:30 2022 ] Top1: 69.46% +[ Thu Sep 15 11:44:30 2022 ] Top5: 93.54% +[ Thu Sep 15 11:44:30 2022 ] Training epoch: 75 +[ Thu Sep 15 11:45:44 2022 ] Batch(97/123) done. Loss: 0.0038 lr:0.010000 network_time: 0.0259 +[ Thu Sep 15 11:46:03 2022 ] Eval epoch: 75 +[ Thu Sep 15 11:46:40 2022 ] Mean test loss of 296 batches: 1.4314552545547485. +[ Thu Sep 15 11:46:40 2022 ] Top1: 69.55% +[ Thu Sep 15 11:46:40 2022 ] Top5: 93.77% +[ Thu Sep 15 11:46:40 2022 ] Training epoch: 76 +[ Thu Sep 15 11:47:38 2022 ] Batch(74/123) done. Loss: 0.0031 lr:0.010000 network_time: 0.0260 +[ Thu Sep 15 11:48:13 2022 ] Eval epoch: 76 +[ Thu Sep 15 11:48:50 2022 ] Mean test loss of 296 batches: 1.3858096599578857. +[ Thu Sep 15 11:48:50 2022 ] Top1: 70.44% +[ Thu Sep 15 11:48:50 2022 ] Top5: 93.96% +[ Thu Sep 15 11:48:50 2022 ] Training epoch: 77 +[ Thu Sep 15 11:49:31 2022 ] Batch(51/123) done. Loss: 0.0052 lr:0.010000 network_time: 0.0306 +[ Thu Sep 15 11:50:23 2022 ] Eval epoch: 77 +[ Thu Sep 15 11:51:00 2022 ] Mean test loss of 296 batches: 1.3691257238388062. +[ Thu Sep 15 11:51:00 2022 ] Top1: 70.42% +[ Thu Sep 15 11:51:00 2022 ] Top5: 94.06% +[ Thu Sep 15 11:51:00 2022 ] Training epoch: 78 +[ Thu Sep 15 11:51:25 2022 ] Batch(28/123) done. Loss: 0.0079 lr:0.010000 network_time: 0.0284 +[ Thu Sep 15 11:52:33 2022 ] Eval epoch: 78 +[ Thu Sep 15 11:53:11 2022 ] Mean test loss of 296 batches: 1.4213460683822632. +[ Thu Sep 15 11:53:11 2022 ] Top1: 69.89% +[ Thu Sep 15 11:53:11 2022 ] Top5: 93.67% +[ Thu Sep 15 11:53:11 2022 ] Training epoch: 79 +[ Thu Sep 15 11:53:18 2022 ] Batch(5/123) done. Loss: 0.0075 lr:0.010000 network_time: 0.0271 +[ Thu Sep 15 11:54:31 2022 ] Batch(105/123) done. Loss: 0.0192 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 11:54:44 2022 ] Eval epoch: 79 +[ Thu Sep 15 11:55:21 2022 ] Mean test loss of 296 batches: 1.4418567419052124. +[ Thu Sep 15 11:55:21 2022 ] Top1: 69.80% +[ Thu Sep 15 11:55:21 2022 ] Top5: 93.80% +[ Thu Sep 15 11:55:21 2022 ] Training epoch: 80 +[ Thu Sep 15 11:56:25 2022 ] Batch(82/123) done. Loss: 0.0115 lr:0.010000 network_time: 0.0284 +[ Thu Sep 15 11:56:54 2022 ] Eval epoch: 80 +[ Thu Sep 15 11:57:31 2022 ] Mean test loss of 296 batches: 1.4773520231246948. +[ Thu Sep 15 11:57:31 2022 ] Top1: 69.66% +[ Thu Sep 15 11:57:31 2022 ] Top5: 93.76% +[ Thu Sep 15 11:57:31 2022 ] Training epoch: 81 +[ Thu Sep 15 11:58:18 2022 ] Batch(59/123) done. Loss: 0.0034 lr:0.001000 network_time: 0.0305 +[ Thu Sep 15 11:59:04 2022 ] Eval epoch: 81 +[ Thu Sep 15 11:59:41 2022 ] Mean test loss of 296 batches: 1.3706154823303223. +[ Thu Sep 15 11:59:42 2022 ] Top1: 70.39% +[ Thu Sep 15 11:59:42 2022 ] Top5: 94.08% +[ Thu Sep 15 11:59:42 2022 ] Training epoch: 82 +[ Thu Sep 15 12:00:12 2022 ] Batch(36/123) done. Loss: 0.0022 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 12:01:15 2022 ] Eval epoch: 82 +[ Thu Sep 15 12:01:52 2022 ] Mean test loss of 296 batches: 1.4759480953216553. +[ Thu Sep 15 12:01:52 2022 ] Top1: 69.46% +[ Thu Sep 15 12:01:52 2022 ] Top5: 93.50% +[ Thu Sep 15 12:01:52 2022 ] Training epoch: 83 +[ Thu Sep 15 12:02:05 2022 ] Batch(13/123) done. Loss: 0.0040 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 12:03:18 2022 ] Batch(113/123) done. Loss: 0.0082 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 12:03:25 2022 ] Eval epoch: 83 +[ Thu Sep 15 12:04:02 2022 ] Mean test loss of 296 batches: 1.4123865365982056. +[ Thu Sep 15 12:04:02 2022 ] Top1: 70.23% +[ Thu Sep 15 12:04:02 2022 ] Top5: 93.79% +[ Thu Sep 15 12:04:02 2022 ] Training epoch: 84 +[ Thu Sep 15 12:05:12 2022 ] Batch(90/123) done. Loss: 0.0043 lr:0.001000 network_time: 0.0350 +[ Thu Sep 15 12:05:35 2022 ] Eval epoch: 84 +[ Thu Sep 15 12:06:13 2022 ] Mean test loss of 296 batches: 1.458462119102478. +[ Thu Sep 15 12:06:13 2022 ] Top1: 69.72% +[ Thu Sep 15 12:06:13 2022 ] Top5: 93.77% +[ Thu Sep 15 12:06:13 2022 ] Training epoch: 85 +[ Thu Sep 15 12:07:05 2022 ] Batch(67/123) done. Loss: 0.0068 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 12:07:46 2022 ] Eval epoch: 85 +[ Thu Sep 15 12:08:23 2022 ] Mean test loss of 296 batches: 1.4187955856323242. +[ Thu Sep 15 12:08:23 2022 ] Top1: 70.33% +[ Thu Sep 15 12:08:23 2022 ] Top5: 93.83% +[ Thu Sep 15 12:08:23 2022 ] Training epoch: 86 +[ Thu Sep 15 12:08:59 2022 ] Batch(44/123) done. Loss: 0.0154 lr:0.001000 network_time: 0.0253 +[ Thu Sep 15 12:09:56 2022 ] Eval epoch: 86 +[ Thu Sep 15 12:10:33 2022 ] Mean test loss of 296 batches: 1.4447499513626099. +[ Thu Sep 15 12:10:33 2022 ] Top1: 70.12% +[ Thu Sep 15 12:10:33 2022 ] Top5: 94.02% +[ Thu Sep 15 12:10:33 2022 ] Training epoch: 87 +[ Thu Sep 15 12:10:53 2022 ] Batch(21/123) done. Loss: 0.0030 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 12:12:05 2022 ] Batch(121/123) done. Loss: 0.0054 lr:0.001000 network_time: 0.0264 +[ Thu Sep 15 12:12:06 2022 ] Eval epoch: 87 +[ Thu Sep 15 12:12:43 2022 ] Mean test loss of 296 batches: 1.4733309745788574. +[ Thu Sep 15 12:12:43 2022 ] Top1: 69.17% +[ Thu Sep 15 12:12:43 2022 ] Top5: 93.59% +[ Thu Sep 15 12:12:44 2022 ] Training epoch: 88 +[ Thu Sep 15 12:13:59 2022 ] Batch(98/123) done. Loss: 0.0022 lr:0.001000 network_time: 0.0256 +[ Thu Sep 15 12:14:16 2022 ] Eval epoch: 88 +[ Thu Sep 15 12:14:54 2022 ] Mean test loss of 296 batches: 1.4006907939910889. +[ Thu Sep 15 12:14:54 2022 ] Top1: 70.31% +[ Thu Sep 15 12:14:54 2022 ] Top5: 94.02% +[ Thu Sep 15 12:14:54 2022 ] Training epoch: 89 +[ Thu Sep 15 12:15:52 2022 ] Batch(75/123) done. Loss: 0.0043 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 12:16:26 2022 ] Eval epoch: 89 +[ Thu Sep 15 12:17:04 2022 ] Mean test loss of 296 batches: 1.4046229124069214. +[ Thu Sep 15 12:17:04 2022 ] Top1: 70.33% +[ Thu Sep 15 12:17:04 2022 ] Top5: 93.90% +[ Thu Sep 15 12:17:04 2022 ] Training epoch: 90 +[ Thu Sep 15 12:17:46 2022 ] Batch(52/123) done. Loss: 0.0100 lr:0.001000 network_time: 0.0313 +[ Thu Sep 15 12:18:37 2022 ] Eval epoch: 90 +[ Thu Sep 15 12:19:14 2022 ] Mean test loss of 296 batches: 1.3752048015594482. +[ Thu Sep 15 12:19:14 2022 ] Top1: 70.78% +[ Thu Sep 15 12:19:14 2022 ] Top5: 94.12% +[ Thu Sep 15 12:19:14 2022 ] Training epoch: 91 +[ Thu Sep 15 12:19:39 2022 ] Batch(29/123) done. Loss: 0.0030 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 12:20:47 2022 ] Eval epoch: 91 +[ Thu Sep 15 12:21:24 2022 ] Mean test loss of 296 batches: 1.4108084440231323. +[ Thu Sep 15 12:21:24 2022 ] Top1: 70.25% +[ Thu Sep 15 12:21:24 2022 ] Top5: 93.97% +[ Thu Sep 15 12:21:25 2022 ] Training epoch: 92 +[ Thu Sep 15 12:21:33 2022 ] Batch(6/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0364 +[ Thu Sep 15 12:22:46 2022 ] Batch(106/123) done. Loss: 0.0022 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 12:22:58 2022 ] Eval epoch: 92 +[ Thu Sep 15 12:23:35 2022 ] Mean test loss of 296 batches: 1.4383344650268555. +[ Thu Sep 15 12:23:35 2022 ] Top1: 70.00% +[ Thu Sep 15 12:23:35 2022 ] Top5: 93.89% +[ Thu Sep 15 12:23:35 2022 ] Training epoch: 93 +[ Thu Sep 15 12:24:39 2022 ] Batch(83/123) done. Loss: 0.0017 lr:0.001000 network_time: 0.0318 +[ Thu Sep 15 12:25:08 2022 ] Eval epoch: 93 +[ Thu Sep 15 12:25:45 2022 ] Mean test loss of 296 batches: 1.4299418926239014. +[ Thu Sep 15 12:25:45 2022 ] Top1: 70.14% +[ Thu Sep 15 12:25:45 2022 ] Top5: 93.83% +[ Thu Sep 15 12:25:45 2022 ] Training epoch: 94 +[ Thu Sep 15 12:26:33 2022 ] Batch(60/123) done. Loss: 0.0104 lr:0.001000 network_time: 0.0283 +[ Thu Sep 15 12:27:18 2022 ] Eval epoch: 94 +[ Thu Sep 15 12:27:56 2022 ] Mean test loss of 296 batches: 1.4101285934448242. +[ Thu Sep 15 12:27:56 2022 ] Top1: 70.43% +[ Thu Sep 15 12:27:56 2022 ] Top5: 94.02% +[ Thu Sep 15 12:27:56 2022 ] Training epoch: 95 +[ Thu Sep 15 12:28:27 2022 ] Batch(37/123) done. Loss: 0.0026 lr:0.001000 network_time: 0.0263 +[ Thu Sep 15 12:29:29 2022 ] Eval epoch: 95 +[ Thu Sep 15 12:30:06 2022 ] Mean test loss of 296 batches: 1.4179308414459229. +[ Thu Sep 15 12:30:06 2022 ] Top1: 70.16% +[ Thu Sep 15 12:30:06 2022 ] Top5: 93.98% +[ Thu Sep 15 12:30:06 2022 ] Training epoch: 96 +[ Thu Sep 15 12:30:20 2022 ] Batch(14/123) done. Loss: 0.0028 lr:0.001000 network_time: 0.0314 +[ Thu Sep 15 12:31:32 2022 ] Batch(114/123) done. Loss: 0.0038 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 12:31:38 2022 ] Eval epoch: 96 +[ Thu Sep 15 12:32:16 2022 ] Mean test loss of 296 batches: 1.460994005203247. +[ Thu Sep 15 12:32:16 2022 ] Top1: 69.51% +[ Thu Sep 15 12:32:16 2022 ] Top5: 93.63% +[ Thu Sep 15 12:32:16 2022 ] Training epoch: 97 +[ Thu Sep 15 12:33:26 2022 ] Batch(91/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0273 +[ Thu Sep 15 12:33:48 2022 ] Eval epoch: 97 +[ Thu Sep 15 12:34:25 2022 ] Mean test loss of 296 batches: 1.6118953227996826. +[ Thu Sep 15 12:34:25 2022 ] Top1: 66.60% +[ Thu Sep 15 12:34:26 2022 ] Top5: 92.86% +[ Thu Sep 15 12:34:26 2022 ] Training epoch: 98 +[ Thu Sep 15 12:35:19 2022 ] Batch(68/123) done. Loss: 0.0035 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 12:35:58 2022 ] Eval epoch: 98 +[ Thu Sep 15 12:36:35 2022 ] Mean test loss of 296 batches: 1.4163037538528442. +[ Thu Sep 15 12:36:35 2022 ] Top1: 69.98% +[ Thu Sep 15 12:36:35 2022 ] Top5: 93.93% +[ Thu Sep 15 12:36:35 2022 ] Training epoch: 99 +[ Thu Sep 15 12:37:12 2022 ] Batch(45/123) done. Loss: 0.0038 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 12:38:08 2022 ] Eval epoch: 99 +[ Thu Sep 15 12:38:45 2022 ] Mean test loss of 296 batches: 1.4162578582763672. +[ Thu Sep 15 12:38:45 2022 ] Top1: 70.16% +[ Thu Sep 15 12:38:45 2022 ] Top5: 93.90% +[ Thu Sep 15 12:38:45 2022 ] Training epoch: 100 +[ Thu Sep 15 12:39:05 2022 ] Batch(22/123) done. Loss: 0.0026 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 12:40:17 2022 ] Batch(122/123) done. Loss: 0.0139 lr:0.001000 network_time: 0.0411 +[ Thu Sep 15 12:40:18 2022 ] Eval epoch: 100 +[ Thu Sep 15 12:40:54 2022 ] Mean test loss of 296 batches: 1.3574291467666626. +[ Thu Sep 15 12:40:55 2022 ] Top1: 70.71% +[ Thu Sep 15 12:40:55 2022 ] Top5: 94.08% +[ Thu Sep 15 12:40:55 2022 ] Training epoch: 101 +[ Thu Sep 15 12:42:11 2022 ] Batch(99/123) done. Loss: 0.0035 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 12:42:28 2022 ] Eval epoch: 101 +[ Thu Sep 15 12:43:04 2022 ] Mean test loss of 296 batches: 1.4264718294143677. +[ Thu Sep 15 12:43:05 2022 ] Top1: 69.64% +[ Thu Sep 15 12:43:05 2022 ] Top5: 93.68% +[ Thu Sep 15 12:43:05 2022 ] Training epoch: 102 +[ Thu Sep 15 12:44:04 2022 ] Batch(76/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0310 +[ Thu Sep 15 12:44:38 2022 ] Eval epoch: 102 +[ Thu Sep 15 12:45:15 2022 ] Mean test loss of 296 batches: 1.4177623987197876. +[ Thu Sep 15 12:45:15 2022 ] Top1: 69.91% +[ Thu Sep 15 12:45:15 2022 ] Top5: 93.70% +[ Thu Sep 15 12:45:15 2022 ] Training epoch: 103 +[ Thu Sep 15 12:45:58 2022 ] Batch(53/123) done. Loss: 0.0093 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 12:46:48 2022 ] Eval epoch: 103 +[ Thu Sep 15 12:47:25 2022 ] Mean test loss of 296 batches: 1.4366966485977173. +[ Thu Sep 15 12:47:25 2022 ] Top1: 69.72% +[ Thu Sep 15 12:47:25 2022 ] Top5: 93.55% +[ Thu Sep 15 12:47:25 2022 ] Training epoch: 104 +[ Thu Sep 15 12:47:51 2022 ] Batch(30/123) done. Loss: 0.0222 lr:0.000100 network_time: 0.0321 +[ Thu Sep 15 12:48:58 2022 ] Eval epoch: 104 +[ Thu Sep 15 12:49:35 2022 ] Mean test loss of 296 batches: 1.4212855100631714. +[ Thu Sep 15 12:49:35 2022 ] Top1: 70.40% +[ Thu Sep 15 12:49:35 2022 ] Top5: 93.95% +[ Thu Sep 15 12:49:35 2022 ] Training epoch: 105 +[ Thu Sep 15 12:49:44 2022 ] Batch(7/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 12:50:57 2022 ] Batch(107/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 12:51:08 2022 ] Eval epoch: 105 +[ Thu Sep 15 12:51:46 2022 ] Mean test loss of 296 batches: 1.4680324792861938. +[ Thu Sep 15 12:51:46 2022 ] Top1: 69.88% +[ Thu Sep 15 12:51:46 2022 ] Top5: 93.79% +[ Thu Sep 15 12:51:46 2022 ] Training epoch: 106 +[ Thu Sep 15 12:52:51 2022 ] Batch(84/123) done. Loss: 0.0017 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 12:53:19 2022 ] Eval epoch: 106 +[ Thu Sep 15 12:53:56 2022 ] Mean test loss of 296 batches: 1.4331753253936768. +[ Thu Sep 15 12:53:56 2022 ] Top1: 70.09% +[ Thu Sep 15 12:53:56 2022 ] Top5: 93.93% +[ Thu Sep 15 12:53:56 2022 ] Training epoch: 107 +[ Thu Sep 15 12:54:44 2022 ] Batch(61/123) done. Loss: 0.0033 lr:0.000100 network_time: 0.0257 +[ Thu Sep 15 12:55:29 2022 ] Eval epoch: 107 +[ Thu Sep 15 12:56:07 2022 ] Mean test loss of 296 batches: 1.6662111282348633. +[ Thu Sep 15 12:56:07 2022 ] Top1: 66.17% +[ Thu Sep 15 12:56:07 2022 ] Top5: 92.62% +[ Thu Sep 15 12:56:07 2022 ] Training epoch: 108 +[ Thu Sep 15 12:56:39 2022 ] Batch(38/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 12:57:40 2022 ] Eval epoch: 108 +[ Thu Sep 15 12:58:17 2022 ] Mean test loss of 296 batches: 1.4470510482788086. +[ Thu Sep 15 12:58:17 2022 ] Top1: 70.26% +[ Thu Sep 15 12:58:17 2022 ] Top5: 93.64% +[ Thu Sep 15 12:58:17 2022 ] Training epoch: 109 +[ Thu Sep 15 12:58:32 2022 ] Batch(15/123) done. Loss: 0.0048 lr:0.000100 network_time: 0.0315 +[ Thu Sep 15 12:59:45 2022 ] Batch(115/123) done. Loss: 0.0048 lr:0.000100 network_time: 0.0312 +[ Thu Sep 15 12:59:50 2022 ] Eval epoch: 109 +[ Thu Sep 15 13:00:27 2022 ] Mean test loss of 296 batches: 1.437639594078064. +[ Thu Sep 15 13:00:27 2022 ] Top1: 69.84% +[ Thu Sep 15 13:00:27 2022 ] Top5: 93.76% +[ Thu Sep 15 13:00:27 2022 ] Training epoch: 110 +[ Thu Sep 15 13:01:38 2022 ] Batch(92/123) done. Loss: 0.0025 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 13:02:00 2022 ] Eval epoch: 110 +[ Thu Sep 15 13:02:37 2022 ] Mean test loss of 296 batches: 1.4859776496887207. +[ Thu Sep 15 13:02:37 2022 ] Top1: 69.20% +[ Thu Sep 15 13:02:37 2022 ] Top5: 93.53% +[ Thu Sep 15 13:02:37 2022 ] Training epoch: 111 +[ Thu Sep 15 13:03:31 2022 ] Batch(69/123) done. Loss: 0.0048 lr:0.000100 network_time: 0.0296 +[ Thu Sep 15 13:04:10 2022 ] Eval epoch: 111 +[ Thu Sep 15 13:04:47 2022 ] Mean test loss of 296 batches: 1.476363182067871. +[ Thu Sep 15 13:04:47 2022 ] Top1: 69.09% +[ Thu Sep 15 13:04:48 2022 ] Top5: 93.55% +[ Thu Sep 15 13:04:48 2022 ] Training epoch: 112 +[ Thu Sep 15 13:05:25 2022 ] Batch(46/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 13:06:20 2022 ] Eval epoch: 112 +[ Thu Sep 15 13:06:57 2022 ] Mean test loss of 296 batches: 1.3783425092697144. +[ Thu Sep 15 13:06:57 2022 ] Top1: 70.54% +[ Thu Sep 15 13:06:57 2022 ] Top5: 94.04% +[ Thu Sep 15 13:06:57 2022 ] Training epoch: 113 +[ Thu Sep 15 13:07:18 2022 ] Batch(23/123) done. Loss: 0.0211 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 13:08:30 2022 ] Eval epoch: 113 +[ Thu Sep 15 13:09:07 2022 ] Mean test loss of 296 batches: 1.4771239757537842. +[ Thu Sep 15 13:09:07 2022 ] Top1: 69.06% +[ Thu Sep 15 13:09:07 2022 ] Top5: 93.50% +[ Thu Sep 15 13:09:07 2022 ] Training epoch: 114 +[ Thu Sep 15 13:09:11 2022 ] Batch(0/123) done. Loss: 0.0049 lr:0.000100 network_time: 0.0648 +[ Thu Sep 15 13:10:23 2022 ] Batch(100/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 13:10:39 2022 ] Eval epoch: 114 +[ Thu Sep 15 13:11:16 2022 ] Mean test loss of 296 batches: 1.3957993984222412. +[ Thu Sep 15 13:11:17 2022 ] Top1: 70.67% +[ Thu Sep 15 13:11:17 2022 ] Top5: 94.07% +[ Thu Sep 15 13:11:17 2022 ] Training epoch: 115 +[ Thu Sep 15 13:12:17 2022 ] Batch(77/123) done. Loss: 0.0010 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 13:12:50 2022 ] Eval epoch: 115 +[ Thu Sep 15 13:13:26 2022 ] Mean test loss of 296 batches: 1.4090287685394287. +[ Thu Sep 15 13:13:27 2022 ] Top1: 70.11% +[ Thu Sep 15 13:13:27 2022 ] Top5: 93.97% +[ Thu Sep 15 13:13:27 2022 ] Training epoch: 116 +[ Thu Sep 15 13:14:10 2022 ] Batch(54/123) done. Loss: 0.0029 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 13:14:59 2022 ] Eval epoch: 116 +[ Thu Sep 15 13:15:37 2022 ] Mean test loss of 296 batches: 1.3961960077285767. +[ Thu Sep 15 13:15:37 2022 ] Top1: 70.73% +[ Thu Sep 15 13:15:37 2022 ] Top5: 94.07% +[ Thu Sep 15 13:15:37 2022 ] Training epoch: 117 +[ Thu Sep 15 13:16:03 2022 ] Batch(31/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 13:17:10 2022 ] Eval epoch: 117 +[ Thu Sep 15 13:17:47 2022 ] Mean test loss of 296 batches: 1.4363863468170166. +[ Thu Sep 15 13:17:47 2022 ] Top1: 69.97% +[ Thu Sep 15 13:17:47 2022 ] Top5: 93.85% +[ Thu Sep 15 13:17:47 2022 ] Training epoch: 118 +[ Thu Sep 15 13:17:57 2022 ] Batch(8/123) done. Loss: 0.0145 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 13:19:09 2022 ] Batch(108/123) done. Loss: 0.0053 lr:0.000100 network_time: 0.0416 +[ Thu Sep 15 13:19:20 2022 ] Eval epoch: 118 +[ Thu Sep 15 13:19:57 2022 ] Mean test loss of 296 batches: 1.4169232845306396. +[ Thu Sep 15 13:19:57 2022 ] Top1: 70.38% +[ Thu Sep 15 13:19:57 2022 ] Top5: 93.94% +[ Thu Sep 15 13:19:57 2022 ] Training epoch: 119 +[ Thu Sep 15 13:21:02 2022 ] Batch(85/123) done. Loss: 0.0050 lr:0.000100 network_time: 0.0327 +[ Thu Sep 15 13:21:29 2022 ] Eval epoch: 119 +[ Thu Sep 15 13:22:06 2022 ] Mean test loss of 296 batches: 1.435914158821106. +[ Thu Sep 15 13:22:07 2022 ] Top1: 69.98% +[ Thu Sep 15 13:22:07 2022 ] Top5: 93.87% +[ Thu Sep 15 13:22:07 2022 ] Training epoch: 120 +[ Thu Sep 15 13:22:55 2022 ] Batch(62/123) done. Loss: 0.0060 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 13:23:39 2022 ] Eval epoch: 120 +[ Thu Sep 15 13:24:17 2022 ] Mean test loss of 296 batches: 1.4465103149414062. +[ Thu Sep 15 13:24:17 2022 ] Top1: 69.86% +[ Thu Sep 15 13:24:17 2022 ] Top5: 93.39% +[ Thu Sep 15 13:24:17 2022 ] Training epoch: 121 +[ Thu Sep 15 13:24:49 2022 ] Batch(39/123) done. Loss: 0.0074 lr:0.000100 network_time: 0.0287 +[ Thu Sep 15 13:25:50 2022 ] Eval epoch: 121 +[ Thu Sep 15 13:26:27 2022 ] Mean test loss of 296 batches: 1.4033489227294922. +[ Thu Sep 15 13:26:27 2022 ] Top1: 70.19% +[ Thu Sep 15 13:26:27 2022 ] Top5: 93.92% +[ Thu Sep 15 13:26:27 2022 ] Training epoch: 122 +[ Thu Sep 15 13:26:42 2022 ] Batch(16/123) done. Loss: 0.0037 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 13:27:55 2022 ] Batch(116/123) done. Loss: 0.0018 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 13:28:00 2022 ] Eval epoch: 122 +[ Thu Sep 15 13:28:37 2022 ] Mean test loss of 296 batches: 1.5143283605575562. +[ Thu Sep 15 13:28:37 2022 ] Top1: 68.90% +[ Thu Sep 15 13:28:37 2022 ] Top5: 93.11% +[ Thu Sep 15 13:28:37 2022 ] Training epoch: 123 +[ Thu Sep 15 13:29:48 2022 ] Batch(93/123) done. Loss: 0.0075 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 13:30:09 2022 ] Eval epoch: 123 +[ Thu Sep 15 13:30:46 2022 ] Mean test loss of 296 batches: 1.4552180767059326. +[ Thu Sep 15 13:30:46 2022 ] Top1: 70.01% +[ Thu Sep 15 13:30:46 2022 ] Top5: 93.85% +[ Thu Sep 15 13:30:46 2022 ] Training epoch: 124 +[ Thu Sep 15 13:31:41 2022 ] Batch(70/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 13:32:19 2022 ] Eval epoch: 124 +[ Thu Sep 15 13:32:56 2022 ] Mean test loss of 296 batches: 1.397505283355713. +[ Thu Sep 15 13:32:56 2022 ] Top1: 70.35% +[ Thu Sep 15 13:32:56 2022 ] Top5: 93.86% +[ Thu Sep 15 13:32:56 2022 ] Training epoch: 125 +[ Thu Sep 15 13:33:34 2022 ] Batch(47/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 13:34:29 2022 ] Eval epoch: 125 +[ Thu Sep 15 13:35:06 2022 ] Mean test loss of 296 batches: 1.3981990814208984. +[ Thu Sep 15 13:35:06 2022 ] Top1: 70.37% +[ Thu Sep 15 13:35:06 2022 ] Top5: 93.90% +[ Thu Sep 15 13:35:06 2022 ] Training epoch: 126 +[ Thu Sep 15 13:35:28 2022 ] Batch(24/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0329 +[ Thu Sep 15 13:36:39 2022 ] Eval epoch: 126 +[ Thu Sep 15 13:37:16 2022 ] Mean test loss of 296 batches: 1.5163475275039673. +[ Thu Sep 15 13:37:16 2022 ] Top1: 69.13% +[ Thu Sep 15 13:37:16 2022 ] Top5: 93.27% +[ Thu Sep 15 13:37:16 2022 ] Training epoch: 127 +[ Thu Sep 15 13:37:21 2022 ] Batch(1/123) done. Loss: 0.0099 lr:0.000100 network_time: 0.0288 +[ Thu Sep 15 13:38:34 2022 ] Batch(101/123) done. Loss: 0.0705 lr:0.000100 network_time: 0.0499 +[ Thu Sep 15 13:38:49 2022 ] Eval epoch: 127 +[ Thu Sep 15 13:39:26 2022 ] Mean test loss of 296 batches: 1.3958736658096313. +[ Thu Sep 15 13:39:26 2022 ] Top1: 70.35% +[ Thu Sep 15 13:39:26 2022 ] Top5: 94.03% +[ Thu Sep 15 13:39:26 2022 ] Training epoch: 128 +[ Thu Sep 15 13:40:27 2022 ] Batch(78/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 13:40:59 2022 ] Eval epoch: 128 +[ Thu Sep 15 13:41:36 2022 ] Mean test loss of 296 batches: 1.3976573944091797. +[ Thu Sep 15 13:41:36 2022 ] Top1: 70.38% +[ Thu Sep 15 13:41:36 2022 ] Top5: 94.00% +[ Thu Sep 15 13:41:36 2022 ] Training epoch: 129 +[ Thu Sep 15 13:42:20 2022 ] Batch(55/123) done. Loss: 0.0042 lr:0.000100 network_time: 0.0314 +[ Thu Sep 15 13:43:09 2022 ] Eval epoch: 129 +[ Thu Sep 15 13:43:46 2022 ] Mean test loss of 296 batches: 1.416542649269104. +[ Thu Sep 15 13:43:46 2022 ] Top1: 70.19% +[ Thu Sep 15 13:43:46 2022 ] Top5: 93.83% +[ Thu Sep 15 13:43:46 2022 ] Training epoch: 130 +[ Thu Sep 15 13:44:13 2022 ] Batch(32/123) done. Loss: 0.0038 lr:0.000100 network_time: 0.0292 +[ Thu Sep 15 13:45:19 2022 ] Eval epoch: 130 +[ Thu Sep 15 13:45:56 2022 ] Mean test loss of 296 batches: 1.4036211967468262. +[ Thu Sep 15 13:45:56 2022 ] Top1: 70.32% +[ Thu Sep 15 13:45:56 2022 ] Top5: 93.90% +[ Thu Sep 15 13:45:56 2022 ] Training epoch: 131 +[ Thu Sep 15 13:46:06 2022 ] Batch(9/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0341 +[ Thu Sep 15 13:47:19 2022 ] Batch(109/123) done. Loss: 0.0017 lr:0.000100 network_time: 0.0301 +[ Thu Sep 15 13:47:28 2022 ] Eval epoch: 131 +[ Thu Sep 15 13:48:05 2022 ] Mean test loss of 296 batches: 1.4399094581604004. +[ Thu Sep 15 13:48:05 2022 ] Top1: 69.91% +[ Thu Sep 15 13:48:05 2022 ] Top5: 93.78% +[ Thu Sep 15 13:48:06 2022 ] Training epoch: 132 +[ Thu Sep 15 13:49:12 2022 ] Batch(86/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 13:49:38 2022 ] Eval epoch: 132 +[ Thu Sep 15 13:50:15 2022 ] Mean test loss of 296 batches: 1.4652665853500366. +[ Thu Sep 15 13:50:15 2022 ] Top1: 69.33% +[ Thu Sep 15 13:50:15 2022 ] Top5: 93.65% +[ Thu Sep 15 13:50:15 2022 ] Training epoch: 133 +[ Thu Sep 15 13:51:05 2022 ] Batch(63/123) done. Loss: 0.0025 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 13:51:48 2022 ] Eval epoch: 133 +[ Thu Sep 15 13:52:25 2022 ] Mean test loss of 296 batches: 1.4147453308105469. +[ Thu Sep 15 13:52:25 2022 ] Top1: 70.33% +[ Thu Sep 15 13:52:25 2022 ] Top5: 93.96% +[ Thu Sep 15 13:52:25 2022 ] Training epoch: 134 +[ Thu Sep 15 13:52:58 2022 ] Batch(40/123) done. Loss: 0.0045 lr:0.000100 network_time: 0.0313 +[ Thu Sep 15 13:53:58 2022 ] Eval epoch: 134 +[ Thu Sep 15 13:54:35 2022 ] Mean test loss of 296 batches: 1.5046864748001099. +[ Thu Sep 15 13:54:35 2022 ] Top1: 68.84% +[ Thu Sep 15 13:54:35 2022 ] Top5: 93.06% +[ Thu Sep 15 13:54:35 2022 ] Training epoch: 135 +[ Thu Sep 15 13:54:51 2022 ] Batch(17/123) done. Loss: 0.0074 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 13:56:04 2022 ] Batch(117/123) done. Loss: 0.0062 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 13:56:08 2022 ] Eval epoch: 135 +[ Thu Sep 15 13:56:45 2022 ] Mean test loss of 296 batches: 1.4389432668685913. +[ Thu Sep 15 13:56:45 2022 ] Top1: 69.70% +[ Thu Sep 15 13:56:45 2022 ] Top5: 93.60% +[ Thu Sep 15 13:56:45 2022 ] Training epoch: 136 +[ Thu Sep 15 13:57:57 2022 ] Batch(94/123) done. Loss: 0.0042 lr:0.000100 network_time: 0.0412 +[ Thu Sep 15 13:58:17 2022 ] Eval epoch: 136 +[ Thu Sep 15 13:58:55 2022 ] Mean test loss of 296 batches: 1.3823795318603516. +[ Thu Sep 15 13:58:55 2022 ] Top1: 70.65% +[ Thu Sep 15 13:58:55 2022 ] Top5: 94.19% +[ Thu Sep 15 13:58:55 2022 ] Training epoch: 137 +[ Thu Sep 15 13:59:50 2022 ] Batch(71/123) done. Loss: 0.0023 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 14:00:28 2022 ] Eval epoch: 137 +[ Thu Sep 15 14:01:05 2022 ] Mean test loss of 296 batches: 1.379677653312683. +[ Thu Sep 15 14:01:05 2022 ] Top1: 70.68% +[ Thu Sep 15 14:01:05 2022 ] Top5: 94.12% +[ Thu Sep 15 14:01:05 2022 ] Training epoch: 138 +[ Thu Sep 15 14:01:44 2022 ] Batch(48/123) done. Loss: 0.0042 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 14:02:38 2022 ] Eval epoch: 138 +[ Thu Sep 15 14:03:15 2022 ] Mean test loss of 296 batches: 1.4020054340362549. +[ Thu Sep 15 14:03:15 2022 ] Top1: 70.43% +[ Thu Sep 15 14:03:15 2022 ] Top5: 94.08% +[ Thu Sep 15 14:03:15 2022 ] Training epoch: 139 +[ Thu Sep 15 14:03:37 2022 ] Batch(25/123) done. Loss: 0.0145 lr:0.000100 network_time: 0.0292 +[ Thu Sep 15 14:04:48 2022 ] Eval epoch: 139 +[ Thu Sep 15 14:05:25 2022 ] Mean test loss of 296 batches: 1.6587318181991577. +[ Thu Sep 15 14:05:25 2022 ] Top1: 66.12% +[ Thu Sep 15 14:05:25 2022 ] Top5: 92.70% +[ Thu Sep 15 14:05:25 2022 ] Training epoch: 140 +[ Thu Sep 15 14:05:30 2022 ] Batch(2/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 14:06:43 2022 ] Batch(102/123) done. Loss: 0.0083 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 14:06:58 2022 ] Eval epoch: 140 +[ Thu Sep 15 14:07:34 2022 ] Mean test loss of 296 batches: 1.5352147817611694. +[ Thu Sep 15 14:07:35 2022 ] Top1: 68.75% +[ Thu Sep 15 14:07:35 2022 ] Top5: 93.09% diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_motion_xview/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..74ba0680e7ec795fe10012d9079f0faaa5404fad --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_bone_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_bone.yaml +device: +- 0 +- 1 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_bone_xview +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_bone_xview diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..61fccb9d427ca5dad3509cf2ee40852a28d48bc8 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5deb152290b33631084b1e4dee75da9f31a6b7cbe41798dc304918ae577a06df +size 5718404 diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/log.txt b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..b48572e6dd9fa1297a7378b1acdc3d72325f724d --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 09:03:50 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_bone_xview', 'model_saved_name': './save_models/ntu_ShiftGCN_bone_xview', 'Experiment_name': 'ntu_ShiftGCN_bone_xview', 'config': './config/nturgbd-cross-view/train_bone.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_bone.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [0, 1], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 09:03:50 2022 ] Training epoch: 1 +[ Thu Sep 15 09:05:11 2022 ] Batch(99/123) done. Loss: 2.6740 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 09:05:28 2022 ] Eval epoch: 1 +[ Thu Sep 15 09:06:05 2022 ] Mean test loss of 296 batches: 4.13906717300415. +[ Thu Sep 15 09:06:05 2022 ] Top1: 12.17% +[ Thu Sep 15 09:06:05 2022 ] Top5: 36.13% +[ Thu Sep 15 09:06:05 2022 ] Training epoch: 2 +[ Thu Sep 15 09:07:04 2022 ] Batch(76/123) done. Loss: 2.0076 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 09:07:38 2022 ] Eval epoch: 2 +[ Thu Sep 15 09:08:16 2022 ] Mean test loss of 296 batches: 3.7832303047180176. +[ Thu Sep 15 09:08:16 2022 ] Top1: 19.16% +[ Thu Sep 15 09:08:16 2022 ] Top5: 46.11% +[ Thu Sep 15 09:08:16 2022 ] Training epoch: 3 +[ Thu Sep 15 09:08:59 2022 ] Batch(53/123) done. Loss: 2.3582 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 09:09:49 2022 ] Eval epoch: 3 +[ Thu Sep 15 09:10:27 2022 ] Mean test loss of 296 batches: 3.2134745121002197. +[ Thu Sep 15 09:10:27 2022 ] Top1: 24.46% +[ Thu Sep 15 09:10:27 2022 ] Top5: 61.18% +[ Thu Sep 15 09:10:27 2022 ] Training epoch: 4 +[ Thu Sep 15 09:10:52 2022 ] Batch(30/123) done. Loss: 1.5009 lr:0.100000 network_time: 0.0513 +[ Thu Sep 15 09:12:00 2022 ] Eval epoch: 4 +[ Thu Sep 15 09:12:38 2022 ] Mean test loss of 296 batches: 3.1750173568725586. +[ Thu Sep 15 09:12:38 2022 ] Top1: 25.35% +[ Thu Sep 15 09:12:38 2022 ] Top5: 58.36% +[ Thu Sep 15 09:12:38 2022 ] Training epoch: 5 +[ Thu Sep 15 09:12:47 2022 ] Batch(7/123) done. Loss: 1.6397 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 09:14:00 2022 ] Batch(107/123) done. Loss: 1.4090 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:14:11 2022 ] Eval epoch: 5 +[ Thu Sep 15 09:14:49 2022 ] Mean test loss of 296 batches: 2.7258002758026123. +[ Thu Sep 15 09:14:49 2022 ] Top1: 35.73% +[ Thu Sep 15 09:14:49 2022 ] Top5: 66.08% +[ Thu Sep 15 09:14:49 2022 ] Training epoch: 6 +[ Thu Sep 15 09:15:55 2022 ] Batch(84/123) done. Loss: 1.4054 lr:0.100000 network_time: 0.0297 +[ Thu Sep 15 09:16:23 2022 ] Eval epoch: 6 +[ Thu Sep 15 09:17:00 2022 ] Mean test loss of 296 batches: 2.7449729442596436. +[ Thu Sep 15 09:17:00 2022 ] Top1: 34.15% +[ Thu Sep 15 09:17:01 2022 ] Top5: 70.44% +[ Thu Sep 15 09:17:01 2022 ] Training epoch: 7 +[ Thu Sep 15 09:17:49 2022 ] Batch(61/123) done. Loss: 1.1807 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 09:18:34 2022 ] Eval epoch: 7 +[ Thu Sep 15 09:19:11 2022 ] Mean test loss of 296 batches: 2.3552918434143066. +[ Thu Sep 15 09:19:11 2022 ] Top1: 39.92% +[ Thu Sep 15 09:19:11 2022 ] Top5: 75.92% +[ Thu Sep 15 09:19:12 2022 ] Training epoch: 8 +[ Thu Sep 15 09:19:43 2022 ] Batch(38/123) done. Loss: 1.0275 lr:0.100000 network_time: 0.0287 +[ Thu Sep 15 09:20:45 2022 ] Eval epoch: 8 +[ Thu Sep 15 09:21:23 2022 ] Mean test loss of 296 batches: 2.121938467025757. +[ Thu Sep 15 09:21:23 2022 ] Top1: 41.10% +[ Thu Sep 15 09:21:23 2022 ] Top5: 80.00% +[ Thu Sep 15 09:21:23 2022 ] Training epoch: 9 +[ Thu Sep 15 09:21:38 2022 ] Batch(15/123) done. Loss: 1.1593 lr:0.100000 network_time: 0.0351 +[ Thu Sep 15 09:22:51 2022 ] Batch(115/123) done. Loss: 1.2646 lr:0.100000 network_time: 0.0294 +[ Thu Sep 15 09:22:56 2022 ] Eval epoch: 9 +[ Thu Sep 15 09:23:34 2022 ] Mean test loss of 296 batches: 2.7253289222717285. +[ Thu Sep 15 09:23:34 2022 ] Top1: 38.13% +[ Thu Sep 15 09:23:34 2022 ] Top5: 73.09% +[ Thu Sep 15 09:23:34 2022 ] Training epoch: 10 +[ Thu Sep 15 09:24:45 2022 ] Batch(92/123) done. Loss: 1.1856 lr:0.100000 network_time: 0.0296 +[ Thu Sep 15 09:25:08 2022 ] Eval epoch: 10 +[ Thu Sep 15 09:25:45 2022 ] Mean test loss of 296 batches: 2.0424673557281494. +[ Thu Sep 15 09:25:45 2022 ] Top1: 47.45% +[ Thu Sep 15 09:25:46 2022 ] Top5: 83.56% +[ Thu Sep 15 09:25:46 2022 ] Training epoch: 11 +[ Thu Sep 15 09:26:40 2022 ] Batch(69/123) done. Loss: 1.0547 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 09:27:19 2022 ] Eval epoch: 11 +[ Thu Sep 15 09:27:56 2022 ] Mean test loss of 296 batches: 1.9141647815704346. +[ Thu Sep 15 09:27:56 2022 ] Top1: 49.58% +[ Thu Sep 15 09:27:56 2022 ] Top5: 83.57% +[ Thu Sep 15 09:27:56 2022 ] Training epoch: 12 +[ Thu Sep 15 09:28:33 2022 ] Batch(46/123) done. Loss: 0.7644 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 09:29:29 2022 ] Eval epoch: 12 +[ Thu Sep 15 09:30:06 2022 ] Mean test loss of 296 batches: 1.7302242517471313. +[ Thu Sep 15 09:30:06 2022 ] Top1: 51.30% +[ Thu Sep 15 09:30:06 2022 ] Top5: 85.26% +[ Thu Sep 15 09:30:06 2022 ] Training epoch: 13 +[ Thu Sep 15 09:30:26 2022 ] Batch(23/123) done. Loss: 0.8732 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 09:31:39 2022 ] Eval epoch: 13 +[ Thu Sep 15 09:32:16 2022 ] Mean test loss of 296 batches: 1.7739286422729492. +[ Thu Sep 15 09:32:16 2022 ] Top1: 52.45% +[ Thu Sep 15 09:32:16 2022 ] Top5: 86.34% +[ Thu Sep 15 09:32:16 2022 ] Training epoch: 14 +[ Thu Sep 15 09:32:20 2022 ] Batch(0/123) done. Loss: 0.5218 lr:0.100000 network_time: 0.0542 +[ Thu Sep 15 09:33:33 2022 ] Batch(100/123) done. Loss: 0.7886 lr:0.100000 network_time: 0.0254 +[ Thu Sep 15 09:33:49 2022 ] Eval epoch: 14 +[ Thu Sep 15 09:34:26 2022 ] Mean test loss of 296 batches: 1.6972070932388306. +[ Thu Sep 15 09:34:26 2022 ] Top1: 53.01% +[ Thu Sep 15 09:34:27 2022 ] Top5: 87.28% +[ Thu Sep 15 09:34:27 2022 ] Training epoch: 15 +[ Thu Sep 15 09:35:26 2022 ] Batch(77/123) done. Loss: 0.8448 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:36:00 2022 ] Eval epoch: 15 +[ Thu Sep 15 09:36:36 2022 ] Mean test loss of 296 batches: 1.97208571434021. +[ Thu Sep 15 09:36:36 2022 ] Top1: 48.83% +[ Thu Sep 15 09:36:37 2022 ] Top5: 82.75% +[ Thu Sep 15 09:36:37 2022 ] Training epoch: 16 +[ Thu Sep 15 09:37:20 2022 ] Batch(54/123) done. Loss: 0.6288 lr:0.100000 network_time: 0.0257 +[ Thu Sep 15 09:38:09 2022 ] Eval epoch: 16 +[ Thu Sep 15 09:38:46 2022 ] Mean test loss of 296 batches: 2.459351062774658. +[ Thu Sep 15 09:38:46 2022 ] Top1: 48.01% +[ Thu Sep 15 09:38:46 2022 ] Top5: 81.74% +[ Thu Sep 15 09:38:47 2022 ] Training epoch: 17 +[ Thu Sep 15 09:39:13 2022 ] Batch(31/123) done. Loss: 0.5582 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 09:40:19 2022 ] Eval epoch: 17 +[ Thu Sep 15 09:40:56 2022 ] Mean test loss of 296 batches: 2.2460615634918213. +[ Thu Sep 15 09:40:57 2022 ] Top1: 49.27% +[ Thu Sep 15 09:40:57 2022 ] Top5: 80.32% +[ Thu Sep 15 09:40:57 2022 ] Training epoch: 18 +[ Thu Sep 15 09:41:06 2022 ] Batch(8/123) done. Loss: 0.3564 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 09:42:19 2022 ] Batch(108/123) done. Loss: 0.8401 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 09:42:30 2022 ] Eval epoch: 18 +[ Thu Sep 15 09:43:07 2022 ] Mean test loss of 296 batches: 1.9964317083358765. +[ Thu Sep 15 09:43:07 2022 ] Top1: 52.15% +[ Thu Sep 15 09:43:07 2022 ] Top5: 86.16% +[ Thu Sep 15 09:43:07 2022 ] Training epoch: 19 +[ Thu Sep 15 09:44:12 2022 ] Batch(85/123) done. Loss: 0.5355 lr:0.100000 network_time: 0.0252 +[ Thu Sep 15 09:44:40 2022 ] Eval epoch: 19 +[ Thu Sep 15 09:45:16 2022 ] Mean test loss of 296 batches: 2.220966339111328. +[ Thu Sep 15 09:45:17 2022 ] Top1: 48.38% +[ Thu Sep 15 09:45:17 2022 ] Top5: 85.00% +[ Thu Sep 15 09:45:17 2022 ] Training epoch: 20 +[ Thu Sep 15 09:46:06 2022 ] Batch(62/123) done. Loss: 0.5404 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 09:46:50 2022 ] Eval epoch: 20 +[ Thu Sep 15 09:47:27 2022 ] Mean test loss of 296 batches: 1.5105681419372559. +[ Thu Sep 15 09:47:27 2022 ] Top1: 59.52% +[ Thu Sep 15 09:47:27 2022 ] Top5: 89.67% +[ Thu Sep 15 09:47:27 2022 ] Training epoch: 21 +[ Thu Sep 15 09:47:59 2022 ] Batch(39/123) done. Loss: 0.4978 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:49:00 2022 ] Eval epoch: 21 +[ Thu Sep 15 09:49:37 2022 ] Mean test loss of 296 batches: 2.4977619647979736. +[ Thu Sep 15 09:49:37 2022 ] Top1: 48.45% +[ Thu Sep 15 09:49:37 2022 ] Top5: 82.50% +[ Thu Sep 15 09:49:37 2022 ] Training epoch: 22 +[ Thu Sep 15 09:49:52 2022 ] Batch(16/123) done. Loss: 0.5871 lr:0.100000 network_time: 0.0299 +[ Thu Sep 15 09:51:05 2022 ] Batch(116/123) done. Loss: 0.6700 lr:0.100000 network_time: 0.0313 +[ Thu Sep 15 09:51:10 2022 ] Eval epoch: 22 +[ Thu Sep 15 09:51:47 2022 ] Mean test loss of 296 batches: 1.8339920043945312. +[ Thu Sep 15 09:51:47 2022 ] Top1: 55.89% +[ Thu Sep 15 09:51:47 2022 ] Top5: 87.98% +[ Thu Sep 15 09:51:47 2022 ] Training epoch: 23 +[ Thu Sep 15 09:52:58 2022 ] Batch(93/123) done. Loss: 0.5115 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:53:20 2022 ] Eval epoch: 23 +[ Thu Sep 15 09:53:57 2022 ] Mean test loss of 296 batches: 1.8879512548446655. +[ Thu Sep 15 09:53:57 2022 ] Top1: 54.29% +[ Thu Sep 15 09:53:57 2022 ] Top5: 86.34% +[ Thu Sep 15 09:53:57 2022 ] Training epoch: 24 +[ Thu Sep 15 09:54:52 2022 ] Batch(70/123) done. Loss: 0.4546 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 09:55:30 2022 ] Eval epoch: 24 +[ Thu Sep 15 09:56:07 2022 ] Mean test loss of 296 batches: 1.7269116640090942. +[ Thu Sep 15 09:56:07 2022 ] Top1: 58.01% +[ Thu Sep 15 09:56:07 2022 ] Top5: 88.70% +[ Thu Sep 15 09:56:07 2022 ] Training epoch: 25 +[ Thu Sep 15 09:56:45 2022 ] Batch(47/123) done. Loss: 0.4607 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 09:57:40 2022 ] Eval epoch: 25 +[ Thu Sep 15 09:58:17 2022 ] Mean test loss of 296 batches: 1.8617223501205444. +[ Thu Sep 15 09:58:17 2022 ] Top1: 55.51% +[ Thu Sep 15 09:58:17 2022 ] Top5: 85.25% +[ Thu Sep 15 09:58:17 2022 ] Training epoch: 26 +[ Thu Sep 15 09:58:38 2022 ] Batch(24/123) done. Loss: 0.2577 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 09:59:50 2022 ] Eval epoch: 26 +[ Thu Sep 15 10:00:27 2022 ] Mean test loss of 296 batches: 1.5492947101593018. +[ Thu Sep 15 10:00:27 2022 ] Top1: 59.60% +[ Thu Sep 15 10:00:27 2022 ] Top5: 89.55% +[ Thu Sep 15 10:00:27 2022 ] Training epoch: 27 +[ Thu Sep 15 10:00:31 2022 ] Batch(1/123) done. Loss: 0.2179 lr:0.100000 network_time: 0.0242 +[ Thu Sep 15 10:01:44 2022 ] Batch(101/123) done. Loss: 0.5261 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 10:02:00 2022 ] Eval epoch: 27 +[ Thu Sep 15 10:02:37 2022 ] Mean test loss of 296 batches: 1.7541275024414062. +[ Thu Sep 15 10:02:37 2022 ] Top1: 57.52% +[ Thu Sep 15 10:02:37 2022 ] Top5: 89.57% +[ Thu Sep 15 10:02:38 2022 ] Training epoch: 28 +[ Thu Sep 15 10:03:38 2022 ] Batch(78/123) done. Loss: 0.3746 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 10:04:10 2022 ] Eval epoch: 28 +[ Thu Sep 15 10:04:47 2022 ] Mean test loss of 296 batches: 1.7211239337921143. +[ Thu Sep 15 10:04:47 2022 ] Top1: 57.03% +[ Thu Sep 15 10:04:48 2022 ] Top5: 87.88% +[ Thu Sep 15 10:04:48 2022 ] Training epoch: 29 +[ Thu Sep 15 10:05:32 2022 ] Batch(55/123) done. Loss: 0.3726 lr:0.100000 network_time: 0.0251 +[ Thu Sep 15 10:06:21 2022 ] Eval epoch: 29 +[ Thu Sep 15 10:06:57 2022 ] Mean test loss of 296 batches: 1.8222599029541016. +[ Thu Sep 15 10:06:57 2022 ] Top1: 57.99% +[ Thu Sep 15 10:06:58 2022 ] Top5: 89.09% +[ Thu Sep 15 10:06:58 2022 ] Training epoch: 30 +[ Thu Sep 15 10:07:25 2022 ] Batch(32/123) done. Loss: 0.2967 lr:0.100000 network_time: 0.0319 +[ Thu Sep 15 10:08:31 2022 ] Eval epoch: 30 +[ Thu Sep 15 10:09:07 2022 ] Mean test loss of 296 batches: 1.8540018796920776. +[ Thu Sep 15 10:09:08 2022 ] Top1: 55.35% +[ Thu Sep 15 10:09:08 2022 ] Top5: 89.17% +[ Thu Sep 15 10:09:08 2022 ] Training epoch: 31 +[ Thu Sep 15 10:09:17 2022 ] Batch(9/123) done. Loss: 0.2456 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:10:30 2022 ] Batch(109/123) done. Loss: 0.2646 lr:0.100000 network_time: 0.0297 +[ Thu Sep 15 10:10:40 2022 ] Eval epoch: 31 +[ Thu Sep 15 10:11:17 2022 ] Mean test loss of 296 batches: 1.9810574054718018. +[ Thu Sep 15 10:11:17 2022 ] Top1: 56.85% +[ Thu Sep 15 10:11:17 2022 ] Top5: 86.92% +[ Thu Sep 15 10:11:17 2022 ] Training epoch: 32 +[ Thu Sep 15 10:12:24 2022 ] Batch(86/123) done. Loss: 0.2999 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 10:12:50 2022 ] Eval epoch: 32 +[ Thu Sep 15 10:13:28 2022 ] Mean test loss of 296 batches: 1.7129456996917725. +[ Thu Sep 15 10:13:28 2022 ] Top1: 58.91% +[ Thu Sep 15 10:13:28 2022 ] Top5: 89.43% +[ Thu Sep 15 10:13:28 2022 ] Training epoch: 33 +[ Thu Sep 15 10:14:18 2022 ] Batch(63/123) done. Loss: 0.2233 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:15:01 2022 ] Eval epoch: 33 +[ Thu Sep 15 10:15:38 2022 ] Mean test loss of 296 batches: 2.0769295692443848. +[ Thu Sep 15 10:15:38 2022 ] Top1: 54.74% +[ Thu Sep 15 10:15:38 2022 ] Top5: 86.14% +[ Thu Sep 15 10:15:38 2022 ] Training epoch: 34 +[ Thu Sep 15 10:16:11 2022 ] Batch(40/123) done. Loss: 0.2054 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 10:17:11 2022 ] Eval epoch: 34 +[ Thu Sep 15 10:17:48 2022 ] Mean test loss of 296 batches: 2.1247406005859375. +[ Thu Sep 15 10:17:48 2022 ] Top1: 54.54% +[ Thu Sep 15 10:17:48 2022 ] Top5: 85.31% +[ Thu Sep 15 10:17:49 2022 ] Training epoch: 35 +[ Thu Sep 15 10:18:05 2022 ] Batch(17/123) done. Loss: 0.4368 lr:0.100000 network_time: 0.0302 +[ Thu Sep 15 10:19:17 2022 ] Batch(117/123) done. Loss: 0.4212 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:19:21 2022 ] Eval epoch: 35 +[ Thu Sep 15 10:19:58 2022 ] Mean test loss of 296 batches: 2.210211753845215. +[ Thu Sep 15 10:19:58 2022 ] Top1: 52.54% +[ Thu Sep 15 10:19:59 2022 ] Top5: 85.84% +[ Thu Sep 15 10:19:59 2022 ] Training epoch: 36 +[ Thu Sep 15 10:21:11 2022 ] Batch(94/123) done. Loss: 0.2953 lr:0.100000 network_time: 0.0260 +[ Thu Sep 15 10:21:32 2022 ] Eval epoch: 36 +[ Thu Sep 15 10:22:08 2022 ] Mean test loss of 296 batches: 1.7848888635635376. +[ Thu Sep 15 10:22:08 2022 ] Top1: 59.08% +[ Thu Sep 15 10:22:09 2022 ] Top5: 90.09% +[ Thu Sep 15 10:22:09 2022 ] Training epoch: 37 +[ Thu Sep 15 10:23:04 2022 ] Batch(71/123) done. Loss: 0.2519 lr:0.100000 network_time: 0.0309 +[ Thu Sep 15 10:23:42 2022 ] Eval epoch: 37 +[ Thu Sep 15 10:24:18 2022 ] Mean test loss of 296 batches: 1.9224181175231934. +[ Thu Sep 15 10:24:18 2022 ] Top1: 57.91% +[ Thu Sep 15 10:24:19 2022 ] Top5: 88.04% +[ Thu Sep 15 10:24:19 2022 ] Training epoch: 38 +[ Thu Sep 15 10:24:57 2022 ] Batch(48/123) done. Loss: 0.3168 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 10:25:52 2022 ] Eval epoch: 38 +[ Thu Sep 15 10:26:29 2022 ] Mean test loss of 296 batches: 1.7332240343093872. +[ Thu Sep 15 10:26:29 2022 ] Top1: 59.56% +[ Thu Sep 15 10:26:29 2022 ] Top5: 89.11% +[ Thu Sep 15 10:26:29 2022 ] Training epoch: 39 +[ Thu Sep 15 10:26:51 2022 ] Batch(25/123) done. Loss: 0.2456 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:28:02 2022 ] Eval epoch: 39 +[ Thu Sep 15 10:28:39 2022 ] Mean test loss of 296 batches: 2.2077972888946533. +[ Thu Sep 15 10:28:39 2022 ] Top1: 52.85% +[ Thu Sep 15 10:28:39 2022 ] Top5: 86.06% +[ Thu Sep 15 10:28:39 2022 ] Training epoch: 40 +[ Thu Sep 15 10:28:45 2022 ] Batch(2/123) done. Loss: 0.2163 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 10:29:58 2022 ] Batch(102/123) done. Loss: 0.3630 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 10:30:12 2022 ] Eval epoch: 40 +[ Thu Sep 15 10:30:49 2022 ] Mean test loss of 296 batches: 1.7486727237701416. +[ Thu Sep 15 10:30:49 2022 ] Top1: 57.74% +[ Thu Sep 15 10:30:50 2022 ] Top5: 88.91% +[ Thu Sep 15 10:30:50 2022 ] Training epoch: 41 +[ Thu Sep 15 10:31:51 2022 ] Batch(79/123) done. Loss: 0.1285 lr:0.100000 network_time: 0.0304 +[ Thu Sep 15 10:32:22 2022 ] Eval epoch: 41 +[ Thu Sep 15 10:32:59 2022 ] Mean test loss of 296 batches: 2.24014949798584. +[ Thu Sep 15 10:32:59 2022 ] Top1: 53.09% +[ Thu Sep 15 10:32:59 2022 ] Top5: 86.23% +[ Thu Sep 15 10:33:00 2022 ] Training epoch: 42 +[ Thu Sep 15 10:33:44 2022 ] Batch(56/123) done. Loss: 0.2312 lr:0.100000 network_time: 0.0296 +[ Thu Sep 15 10:34:32 2022 ] Eval epoch: 42 +[ Thu Sep 15 10:35:10 2022 ] Mean test loss of 296 batches: 1.6431164741516113. +[ Thu Sep 15 10:35:10 2022 ] Top1: 61.43% +[ Thu Sep 15 10:35:10 2022 ] Top5: 90.29% +[ Thu Sep 15 10:35:10 2022 ] Training epoch: 43 +[ Thu Sep 15 10:35:37 2022 ] Batch(33/123) done. Loss: 0.1273 lr:0.100000 network_time: 0.0294 +[ Thu Sep 15 10:36:43 2022 ] Eval epoch: 43 +[ Thu Sep 15 10:37:20 2022 ] Mean test loss of 296 batches: 2.0126781463623047. +[ Thu Sep 15 10:37:20 2022 ] Top1: 57.65% +[ Thu Sep 15 10:37:20 2022 ] Top5: 88.02% +[ Thu Sep 15 10:37:20 2022 ] Training epoch: 44 +[ Thu Sep 15 10:37:31 2022 ] Batch(10/123) done. Loss: 0.1384 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:38:44 2022 ] Batch(110/123) done. Loss: 0.1920 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 10:38:53 2022 ] Eval epoch: 44 +[ Thu Sep 15 10:39:30 2022 ] Mean test loss of 296 batches: 1.7614115476608276. +[ Thu Sep 15 10:39:30 2022 ] Top1: 58.85% +[ Thu Sep 15 10:39:30 2022 ] Top5: 89.00% +[ Thu Sep 15 10:39:30 2022 ] Training epoch: 45 +[ Thu Sep 15 10:40:37 2022 ] Batch(87/123) done. Loss: 0.3256 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:41:03 2022 ] Eval epoch: 45 +[ Thu Sep 15 10:41:40 2022 ] Mean test loss of 296 batches: 2.09079909324646. +[ Thu Sep 15 10:41:40 2022 ] Top1: 55.14% +[ Thu Sep 15 10:41:40 2022 ] Top5: 85.76% +[ Thu Sep 15 10:41:40 2022 ] Training epoch: 46 +[ Thu Sep 15 10:42:31 2022 ] Batch(64/123) done. Loss: 0.2347 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:43:13 2022 ] Eval epoch: 46 +[ Thu Sep 15 10:43:50 2022 ] Mean test loss of 296 batches: 2.158007860183716. +[ Thu Sep 15 10:43:50 2022 ] Top1: 57.34% +[ Thu Sep 15 10:43:50 2022 ] Top5: 87.80% +[ Thu Sep 15 10:43:50 2022 ] Training epoch: 47 +[ Thu Sep 15 10:44:24 2022 ] Batch(41/123) done. Loss: 0.2093 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:45:23 2022 ] Eval epoch: 47 +[ Thu Sep 15 10:46:01 2022 ] Mean test loss of 296 batches: 2.112342119216919. +[ Thu Sep 15 10:46:01 2022 ] Top1: 55.79% +[ Thu Sep 15 10:46:01 2022 ] Top5: 87.32% +[ Thu Sep 15 10:46:01 2022 ] Training epoch: 48 +[ Thu Sep 15 10:46:18 2022 ] Batch(18/123) done. Loss: 0.2621 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 10:47:31 2022 ] Batch(118/123) done. Loss: 0.3443 lr:0.100000 network_time: 0.0322 +[ Thu Sep 15 10:47:34 2022 ] Eval epoch: 48 +[ Thu Sep 15 10:48:11 2022 ] Mean test loss of 296 batches: 1.8050693273544312. +[ Thu Sep 15 10:48:11 2022 ] Top1: 59.34% +[ Thu Sep 15 10:48:11 2022 ] Top5: 88.33% +[ Thu Sep 15 10:48:11 2022 ] Training epoch: 49 +[ Thu Sep 15 10:49:25 2022 ] Batch(95/123) done. Loss: 0.1170 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 10:49:45 2022 ] Eval epoch: 49 +[ Thu Sep 15 10:50:22 2022 ] Mean test loss of 296 batches: 1.852452039718628. +[ Thu Sep 15 10:50:22 2022 ] Top1: 59.14% +[ Thu Sep 15 10:50:22 2022 ] Top5: 87.89% +[ Thu Sep 15 10:50:22 2022 ] Training epoch: 50 +[ Thu Sep 15 10:51:19 2022 ] Batch(72/123) done. Loss: 0.0930 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:51:55 2022 ] Eval epoch: 50 +[ Thu Sep 15 10:52:33 2022 ] Mean test loss of 296 batches: 2.087465286254883. +[ Thu Sep 15 10:52:33 2022 ] Top1: 58.65% +[ Thu Sep 15 10:52:33 2022 ] Top5: 86.39% +[ Thu Sep 15 10:52:33 2022 ] Training epoch: 51 +[ Thu Sep 15 10:53:13 2022 ] Batch(49/123) done. Loss: 0.1617 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 10:54:06 2022 ] Eval epoch: 51 +[ Thu Sep 15 10:54:43 2022 ] Mean test loss of 296 batches: 1.905587911605835. +[ Thu Sep 15 10:54:43 2022 ] Top1: 59.78% +[ Thu Sep 15 10:54:44 2022 ] Top5: 88.90% +[ Thu Sep 15 10:54:44 2022 ] Training epoch: 52 +[ Thu Sep 15 10:55:06 2022 ] Batch(26/123) done. Loss: 0.2051 lr:0.100000 network_time: 0.0260 +[ Thu Sep 15 10:56:17 2022 ] Eval epoch: 52 +[ Thu Sep 15 10:56:54 2022 ] Mean test loss of 296 batches: 1.7152221202850342. +[ Thu Sep 15 10:56:54 2022 ] Top1: 60.78% +[ Thu Sep 15 10:56:54 2022 ] Top5: 89.10% +[ Thu Sep 15 10:56:54 2022 ] Training epoch: 53 +[ Thu Sep 15 10:57:00 2022 ] Batch(3/123) done. Loss: 0.2539 lr:0.100000 network_time: 0.0261 +[ Thu Sep 15 10:58:13 2022 ] Batch(103/123) done. Loss: 0.2035 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 10:58:27 2022 ] Eval epoch: 53 +[ Thu Sep 15 10:59:04 2022 ] Mean test loss of 296 batches: 1.6236615180969238. +[ Thu Sep 15 10:59:04 2022 ] Top1: 61.91% +[ Thu Sep 15 10:59:05 2022 ] Top5: 89.67% +[ Thu Sep 15 10:59:05 2022 ] Training epoch: 54 +[ Thu Sep 15 11:00:07 2022 ] Batch(80/123) done. Loss: 0.1902 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 11:00:38 2022 ] Eval epoch: 54 +[ Thu Sep 15 11:01:15 2022 ] Mean test loss of 296 batches: 1.7064021825790405. +[ Thu Sep 15 11:01:15 2022 ] Top1: 62.66% +[ Thu Sep 15 11:01:15 2022 ] Top5: 89.67% +[ Thu Sep 15 11:01:15 2022 ] Training epoch: 55 +[ Thu Sep 15 11:02:01 2022 ] Batch(57/123) done. Loss: 0.2334 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 11:02:48 2022 ] Eval epoch: 55 +[ Thu Sep 15 11:03:26 2022 ] Mean test loss of 296 batches: 2.3813352584838867. +[ Thu Sep 15 11:03:26 2022 ] Top1: 54.53% +[ Thu Sep 15 11:03:26 2022 ] Top5: 85.84% +[ Thu Sep 15 11:03:26 2022 ] Training epoch: 56 +[ Thu Sep 15 11:03:55 2022 ] Batch(34/123) done. Loss: 0.2033 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 11:04:59 2022 ] Eval epoch: 56 +[ Thu Sep 15 11:05:36 2022 ] Mean test loss of 296 batches: 1.7573802471160889. +[ Thu Sep 15 11:05:36 2022 ] Top1: 62.56% +[ Thu Sep 15 11:05:36 2022 ] Top5: 90.39% +[ Thu Sep 15 11:05:36 2022 ] Training epoch: 57 +[ Thu Sep 15 11:05:48 2022 ] Batch(11/123) done. Loss: 0.1427 lr:0.100000 network_time: 0.0299 +[ Thu Sep 15 11:07:01 2022 ] Batch(111/123) done. Loss: 0.2397 lr:0.100000 network_time: 0.0333 +[ Thu Sep 15 11:07:10 2022 ] Eval epoch: 57 +[ Thu Sep 15 11:07:47 2022 ] Mean test loss of 296 batches: 1.8162293434143066. +[ Thu Sep 15 11:07:47 2022 ] Top1: 59.19% +[ Thu Sep 15 11:07:47 2022 ] Top5: 89.99% +[ Thu Sep 15 11:07:47 2022 ] Training epoch: 58 +[ Thu Sep 15 11:08:55 2022 ] Batch(88/123) done. Loss: 0.2798 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 11:09:20 2022 ] Eval epoch: 58 +[ Thu Sep 15 11:09:58 2022 ] Mean test loss of 296 batches: 2.3236100673675537. +[ Thu Sep 15 11:09:58 2022 ] Top1: 55.70% +[ Thu Sep 15 11:09:58 2022 ] Top5: 87.75% +[ Thu Sep 15 11:09:58 2022 ] Training epoch: 59 +[ Thu Sep 15 11:10:49 2022 ] Batch(65/123) done. Loss: 0.1962 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 11:11:31 2022 ] Eval epoch: 59 +[ Thu Sep 15 11:12:08 2022 ] Mean test loss of 296 batches: 2.053630828857422. +[ Thu Sep 15 11:12:08 2022 ] Top1: 58.58% +[ Thu Sep 15 11:12:08 2022 ] Top5: 88.25% +[ Thu Sep 15 11:12:08 2022 ] Training epoch: 60 +[ Thu Sep 15 11:12:43 2022 ] Batch(42/123) done. Loss: 0.1015 lr:0.100000 network_time: 0.0249 +[ Thu Sep 15 11:13:42 2022 ] Eval epoch: 60 +[ Thu Sep 15 11:14:19 2022 ] Mean test loss of 296 batches: 1.6993542909622192. +[ Thu Sep 15 11:14:19 2022 ] Top1: 62.03% +[ Thu Sep 15 11:14:19 2022 ] Top5: 90.67% +[ Thu Sep 15 11:14:19 2022 ] Training epoch: 61 +[ Thu Sep 15 11:14:37 2022 ] Batch(19/123) done. Loss: 0.0292 lr:0.010000 network_time: 0.0269 +[ Thu Sep 15 11:15:50 2022 ] Batch(119/123) done. Loss: 0.0511 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 11:15:52 2022 ] Eval epoch: 61 +[ Thu Sep 15 11:16:29 2022 ] Mean test loss of 296 batches: 1.3484454154968262. +[ Thu Sep 15 11:16:30 2022 ] Top1: 68.61% +[ Thu Sep 15 11:16:30 2022 ] Top5: 93.01% +[ Thu Sep 15 11:16:30 2022 ] Training epoch: 62 +[ Thu Sep 15 11:17:43 2022 ] Batch(96/123) done. Loss: 0.0489 lr:0.010000 network_time: 0.0305 +[ Thu Sep 15 11:18:03 2022 ] Eval epoch: 62 +[ Thu Sep 15 11:18:40 2022 ] Mean test loss of 296 batches: 1.3323984146118164. +[ Thu Sep 15 11:18:40 2022 ] Top1: 69.78% +[ Thu Sep 15 11:18:40 2022 ] Top5: 93.25% +[ Thu Sep 15 11:18:40 2022 ] Training epoch: 63 +[ Thu Sep 15 11:19:37 2022 ] Batch(73/123) done. Loss: 0.0094 lr:0.010000 network_time: 0.0258 +[ Thu Sep 15 11:20:13 2022 ] Eval epoch: 63 +[ Thu Sep 15 11:20:50 2022 ] Mean test loss of 296 batches: 1.361667513847351. +[ Thu Sep 15 11:20:50 2022 ] Top1: 69.23% +[ Thu Sep 15 11:20:50 2022 ] Top5: 93.19% +[ Thu Sep 15 11:20:50 2022 ] Training epoch: 64 +[ Thu Sep 15 11:21:30 2022 ] Batch(50/123) done. Loss: 0.0066 lr:0.010000 network_time: 0.0264 +[ Thu Sep 15 11:22:23 2022 ] Eval epoch: 64 +[ Thu Sep 15 11:23:00 2022 ] Mean test loss of 296 batches: 1.3657195568084717. +[ Thu Sep 15 11:23:00 2022 ] Top1: 69.12% +[ Thu Sep 15 11:23:00 2022 ] Top5: 92.94% +[ Thu Sep 15 11:23:00 2022 ] Training epoch: 65 +[ Thu Sep 15 11:23:24 2022 ] Batch(27/123) done. Loss: 0.0252 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 11:24:33 2022 ] Eval epoch: 65 +[ Thu Sep 15 11:25:10 2022 ] Mean test loss of 296 batches: 1.3531643152236938. +[ Thu Sep 15 11:25:10 2022 ] Top1: 69.38% +[ Thu Sep 15 11:25:11 2022 ] Top5: 93.13% +[ Thu Sep 15 11:25:11 2022 ] Training epoch: 66 +[ Thu Sep 15 11:25:17 2022 ] Batch(4/123) done. Loss: 0.0047 lr:0.010000 network_time: 0.0257 +[ Thu Sep 15 11:26:30 2022 ] Batch(104/123) done. Loss: 0.0067 lr:0.010000 network_time: 0.0280 +[ Thu Sep 15 11:26:44 2022 ] Eval epoch: 66 +[ Thu Sep 15 11:27:21 2022 ] Mean test loss of 296 batches: 1.3787643909454346. +[ Thu Sep 15 11:27:21 2022 ] Top1: 69.09% +[ Thu Sep 15 11:27:21 2022 ] Top5: 93.00% +[ Thu Sep 15 11:27:21 2022 ] Training epoch: 67 +[ Thu Sep 15 11:28:24 2022 ] Batch(81/123) done. Loss: 0.0074 lr:0.010000 network_time: 0.0309 +[ Thu Sep 15 11:28:54 2022 ] Eval epoch: 67 +[ Thu Sep 15 11:29:31 2022 ] Mean test loss of 296 batches: 1.3409534692764282. +[ Thu Sep 15 11:29:31 2022 ] Top1: 69.84% +[ Thu Sep 15 11:29:31 2022 ] Top5: 93.29% +[ Thu Sep 15 11:29:31 2022 ] Training epoch: 68 +[ Thu Sep 15 11:30:17 2022 ] Batch(58/123) done. Loss: 0.0077 lr:0.010000 network_time: 0.0301 +[ Thu Sep 15 11:31:04 2022 ] Eval epoch: 68 +[ Thu Sep 15 11:31:41 2022 ] Mean test loss of 296 batches: 1.368935465812683. +[ Thu Sep 15 11:31:41 2022 ] Top1: 69.40% +[ Thu Sep 15 11:31:41 2022 ] Top5: 92.97% +[ Thu Sep 15 11:31:42 2022 ] Training epoch: 69 +[ Thu Sep 15 11:32:11 2022 ] Batch(35/123) done. Loss: 0.0123 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 11:33:15 2022 ] Eval epoch: 69 +[ Thu Sep 15 11:33:52 2022 ] Mean test loss of 296 batches: 1.3770889043807983. +[ Thu Sep 15 11:33:52 2022 ] Top1: 69.31% +[ Thu Sep 15 11:33:52 2022 ] Top5: 93.02% +[ Thu Sep 15 11:33:52 2022 ] Training epoch: 70 +[ Thu Sep 15 11:34:04 2022 ] Batch(12/123) done. Loss: 0.0053 lr:0.010000 network_time: 0.0257 +[ Thu Sep 15 11:35:17 2022 ] Batch(112/123) done. Loss: 0.0088 lr:0.010000 network_time: 0.0270 +[ Thu Sep 15 11:35:25 2022 ] Eval epoch: 70 +[ Thu Sep 15 11:36:02 2022 ] Mean test loss of 296 batches: 1.3863496780395508. +[ Thu Sep 15 11:36:02 2022 ] Top1: 69.12% +[ Thu Sep 15 11:36:02 2022 ] Top5: 92.98% +[ Thu Sep 15 11:36:02 2022 ] Training epoch: 71 +[ Thu Sep 15 11:37:11 2022 ] Batch(89/123) done. Loss: 0.0057 lr:0.010000 network_time: 0.0291 +[ Thu Sep 15 11:37:36 2022 ] Eval epoch: 71 +[ Thu Sep 15 11:38:13 2022 ] Mean test loss of 296 batches: 1.344552993774414. +[ Thu Sep 15 11:38:13 2022 ] Top1: 69.96% +[ Thu Sep 15 11:38:13 2022 ] Top5: 93.27% +[ Thu Sep 15 11:38:13 2022 ] Training epoch: 72 +[ Thu Sep 15 11:39:05 2022 ] Batch(66/123) done. Loss: 0.0115 lr:0.010000 network_time: 0.0268 +[ Thu Sep 15 11:39:47 2022 ] Eval epoch: 72 +[ Thu Sep 15 11:40:24 2022 ] Mean test loss of 296 batches: 1.3615833520889282. +[ Thu Sep 15 11:40:24 2022 ] Top1: 69.70% +[ Thu Sep 15 11:40:24 2022 ] Top5: 93.27% +[ Thu Sep 15 11:40:24 2022 ] Training epoch: 73 +[ Thu Sep 15 11:40:59 2022 ] Batch(43/123) done. Loss: 0.0044 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 11:41:57 2022 ] Eval epoch: 73 +[ Thu Sep 15 11:42:34 2022 ] Mean test loss of 296 batches: 1.357302188873291. +[ Thu Sep 15 11:42:34 2022 ] Top1: 69.78% +[ Thu Sep 15 11:42:34 2022 ] Top5: 93.29% +[ Thu Sep 15 11:42:34 2022 ] Training epoch: 74 +[ Thu Sep 15 11:42:52 2022 ] Batch(20/123) done. Loss: 0.0073 lr:0.010000 network_time: 0.0312 +[ Thu Sep 15 11:44:05 2022 ] Batch(120/123) done. Loss: 0.0049 lr:0.010000 network_time: 0.0256 +[ Thu Sep 15 11:44:07 2022 ] Eval epoch: 74 +[ Thu Sep 15 11:44:44 2022 ] Mean test loss of 296 batches: 1.3960591554641724. +[ Thu Sep 15 11:44:44 2022 ] Top1: 69.39% +[ Thu Sep 15 11:44:44 2022 ] Top5: 93.07% +[ Thu Sep 15 11:44:44 2022 ] Training epoch: 75 +[ Thu Sep 15 11:45:59 2022 ] Batch(97/123) done. Loss: 0.0030 lr:0.010000 network_time: 0.0263 +[ Thu Sep 15 11:46:17 2022 ] Eval epoch: 75 +[ Thu Sep 15 11:46:54 2022 ] Mean test loss of 296 batches: 1.3555740118026733. +[ Thu Sep 15 11:46:54 2022 ] Top1: 69.99% +[ Thu Sep 15 11:46:55 2022 ] Top5: 93.27% +[ Thu Sep 15 11:46:55 2022 ] Training epoch: 76 +[ Thu Sep 15 11:47:53 2022 ] Batch(74/123) done. Loss: 0.0033 lr:0.010000 network_time: 0.0308 +[ Thu Sep 15 11:48:28 2022 ] Eval epoch: 76 +[ Thu Sep 15 11:49:05 2022 ] Mean test loss of 296 batches: 1.3962376117706299. +[ Thu Sep 15 11:49:05 2022 ] Top1: 69.70% +[ Thu Sep 15 11:49:05 2022 ] Top5: 92.89% +[ Thu Sep 15 11:49:05 2022 ] Training epoch: 77 +[ Thu Sep 15 11:49:46 2022 ] Batch(51/123) done. Loss: 0.0025 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 11:50:38 2022 ] Eval epoch: 77 +[ Thu Sep 15 11:51:15 2022 ] Mean test loss of 296 batches: 1.3623369932174683. +[ Thu Sep 15 11:51:16 2022 ] Top1: 70.01% +[ Thu Sep 15 11:51:16 2022 ] Top5: 93.27% +[ Thu Sep 15 11:51:16 2022 ] Training epoch: 78 +[ Thu Sep 15 11:51:40 2022 ] Batch(28/123) done. Loss: 0.0022 lr:0.010000 network_time: 0.0318 +[ Thu Sep 15 11:52:49 2022 ] Eval epoch: 78 +[ Thu Sep 15 11:53:26 2022 ] Mean test loss of 296 batches: 1.38022780418396. +[ Thu Sep 15 11:53:26 2022 ] Top1: 69.73% +[ Thu Sep 15 11:53:26 2022 ] Top5: 93.17% +[ Thu Sep 15 11:53:26 2022 ] Training epoch: 79 +[ Thu Sep 15 11:53:33 2022 ] Batch(5/123) done. Loss: 0.0195 lr:0.010000 network_time: 0.0266 +[ Thu Sep 15 11:54:46 2022 ] Batch(105/123) done. Loss: 0.0093 lr:0.010000 network_time: 0.0262 +[ Thu Sep 15 11:54:59 2022 ] Eval epoch: 79 +[ Thu Sep 15 11:55:36 2022 ] Mean test loss of 296 batches: 1.3709092140197754. +[ Thu Sep 15 11:55:36 2022 ] Top1: 69.84% +[ Thu Sep 15 11:55:36 2022 ] Top5: 93.15% +[ Thu Sep 15 11:55:36 2022 ] Training epoch: 80 +[ Thu Sep 15 11:56:40 2022 ] Batch(82/123) done. Loss: 0.0055 lr:0.010000 network_time: 0.0318 +[ Thu Sep 15 11:57:09 2022 ] Eval epoch: 80 +[ Thu Sep 15 11:57:46 2022 ] Mean test loss of 296 batches: 1.402287483215332. +[ Thu Sep 15 11:57:46 2022 ] Top1: 69.55% +[ Thu Sep 15 11:57:46 2022 ] Top5: 92.88% +[ Thu Sep 15 11:57:46 2022 ] Training epoch: 81 +[ Thu Sep 15 11:58:33 2022 ] Batch(59/123) done. Loss: 0.0033 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 11:59:19 2022 ] Eval epoch: 81 +[ Thu Sep 15 11:59:56 2022 ] Mean test loss of 296 batches: 1.348851203918457. +[ Thu Sep 15 11:59:56 2022 ] Top1: 70.00% +[ Thu Sep 15 11:59:56 2022 ] Top5: 93.37% +[ Thu Sep 15 11:59:56 2022 ] Training epoch: 82 +[ Thu Sep 15 12:00:26 2022 ] Batch(36/123) done. Loss: 0.0039 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 12:01:29 2022 ] Eval epoch: 82 +[ Thu Sep 15 12:02:06 2022 ] Mean test loss of 296 batches: 1.3820340633392334. +[ Thu Sep 15 12:02:06 2022 ] Top1: 69.62% +[ Thu Sep 15 12:02:06 2022 ] Top5: 93.22% +[ Thu Sep 15 12:02:06 2022 ] Training epoch: 83 +[ Thu Sep 15 12:02:19 2022 ] Batch(13/123) done. Loss: 0.0063 lr:0.001000 network_time: 0.0291 +[ Thu Sep 15 12:03:32 2022 ] Batch(113/123) done. Loss: 0.0157 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 12:03:39 2022 ] Eval epoch: 83 +[ Thu Sep 15 12:04:16 2022 ] Mean test loss of 296 batches: 1.3838192224502563. +[ Thu Sep 15 12:04:16 2022 ] Top1: 69.71% +[ Thu Sep 15 12:04:16 2022 ] Top5: 93.11% +[ Thu Sep 15 12:04:17 2022 ] Training epoch: 84 +[ Thu Sep 15 12:05:26 2022 ] Batch(90/123) done. Loss: 0.0081 lr:0.001000 network_time: 0.0255 +[ Thu Sep 15 12:05:49 2022 ] Eval epoch: 84 +[ Thu Sep 15 12:06:26 2022 ] Mean test loss of 296 batches: 1.3555415868759155. +[ Thu Sep 15 12:06:27 2022 ] Top1: 70.05% +[ Thu Sep 15 12:06:27 2022 ] Top5: 93.31% +[ Thu Sep 15 12:06:27 2022 ] Training epoch: 85 +[ Thu Sep 15 12:07:19 2022 ] Batch(67/123) done. Loss: 0.0021 lr:0.001000 network_time: 0.0260 +[ Thu Sep 15 12:08:00 2022 ] Eval epoch: 85 +[ Thu Sep 15 12:08:37 2022 ] Mean test loss of 296 batches: 1.3763482570648193. +[ Thu Sep 15 12:08:37 2022 ] Top1: 69.96% +[ Thu Sep 15 12:08:37 2022 ] Top5: 93.12% +[ Thu Sep 15 12:08:37 2022 ] Training epoch: 86 +[ Thu Sep 15 12:09:13 2022 ] Batch(44/123) done. Loss: 0.0639 lr:0.001000 network_time: 0.0294 +[ Thu Sep 15 12:10:10 2022 ] Eval epoch: 86 +[ Thu Sep 15 12:10:47 2022 ] Mean test loss of 296 batches: 1.3721981048583984. +[ Thu Sep 15 12:10:47 2022 ] Top1: 69.88% +[ Thu Sep 15 12:10:47 2022 ] Top5: 93.12% +[ Thu Sep 15 12:10:47 2022 ] Training epoch: 87 +[ Thu Sep 15 12:11:07 2022 ] Batch(21/123) done. Loss: 0.0064 lr:0.001000 network_time: 0.0290 +[ Thu Sep 15 12:12:20 2022 ] Batch(121/123) done. Loss: 0.0061 lr:0.001000 network_time: 0.0421 +[ Thu Sep 15 12:12:20 2022 ] Eval epoch: 87 +[ Thu Sep 15 12:12:57 2022 ] Mean test loss of 296 batches: 1.3810360431671143. +[ Thu Sep 15 12:12:57 2022 ] Top1: 69.66% +[ Thu Sep 15 12:12:57 2022 ] Top5: 93.09% +[ Thu Sep 15 12:12:57 2022 ] Training epoch: 88 +[ Thu Sep 15 12:14:13 2022 ] Batch(98/123) done. Loss: 0.0034 lr:0.001000 network_time: 0.0271 +[ Thu Sep 15 12:14:31 2022 ] Eval epoch: 88 +[ Thu Sep 15 12:15:07 2022 ] Mean test loss of 296 batches: 1.3972258567810059. +[ Thu Sep 15 12:15:07 2022 ] Top1: 69.61% +[ Thu Sep 15 12:15:07 2022 ] Top5: 93.00% +[ Thu Sep 15 12:15:08 2022 ] Training epoch: 89 +[ Thu Sep 15 12:16:06 2022 ] Batch(75/123) done. Loss: 0.0148 lr:0.001000 network_time: 0.0313 +[ Thu Sep 15 12:16:41 2022 ] Eval epoch: 89 +[ Thu Sep 15 12:17:18 2022 ] Mean test loss of 296 batches: 1.3643290996551514. +[ Thu Sep 15 12:17:18 2022 ] Top1: 69.97% +[ Thu Sep 15 12:17:18 2022 ] Top5: 93.34% +[ Thu Sep 15 12:17:18 2022 ] Training epoch: 90 +[ Thu Sep 15 12:17:59 2022 ] Batch(52/123) done. Loss: 0.0081 lr:0.001000 network_time: 0.0261 +[ Thu Sep 15 12:18:51 2022 ] Eval epoch: 90 +[ Thu Sep 15 12:19:28 2022 ] Mean test loss of 296 batches: 1.3755717277526855. +[ Thu Sep 15 12:19:28 2022 ] Top1: 69.98% +[ Thu Sep 15 12:19:28 2022 ] Top5: 93.14% +[ Thu Sep 15 12:19:28 2022 ] Training epoch: 91 +[ Thu Sep 15 12:19:53 2022 ] Batch(29/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 12:21:01 2022 ] Eval epoch: 91 +[ Thu Sep 15 12:21:38 2022 ] Mean test loss of 296 batches: 1.3874123096466064. +[ Thu Sep 15 12:21:38 2022 ] Top1: 69.63% +[ Thu Sep 15 12:21:38 2022 ] Top5: 93.17% +[ Thu Sep 15 12:21:38 2022 ] Training epoch: 92 +[ Thu Sep 15 12:21:47 2022 ] Batch(6/123) done. Loss: 0.0041 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 12:23:00 2022 ] Batch(106/123) done. Loss: 0.0038 lr:0.001000 network_time: 0.0290 +[ Thu Sep 15 12:23:12 2022 ] Eval epoch: 92 +[ Thu Sep 15 12:23:49 2022 ] Mean test loss of 296 batches: 1.3890985250473022. +[ Thu Sep 15 12:23:49 2022 ] Top1: 69.61% +[ Thu Sep 15 12:23:49 2022 ] Top5: 93.04% +[ Thu Sep 15 12:23:49 2022 ] Training epoch: 93 +[ Thu Sep 15 12:24:53 2022 ] Batch(83/123) done. Loss: 0.0086 lr:0.001000 network_time: 0.0306 +[ Thu Sep 15 12:25:22 2022 ] Eval epoch: 93 +[ Thu Sep 15 12:25:59 2022 ] Mean test loss of 296 batches: 1.379187822341919. +[ Thu Sep 15 12:25:59 2022 ] Top1: 69.88% +[ Thu Sep 15 12:26:00 2022 ] Top5: 93.11% +[ Thu Sep 15 12:26:00 2022 ] Training epoch: 94 +[ Thu Sep 15 12:26:47 2022 ] Batch(60/123) done. Loss: 0.0080 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 12:27:33 2022 ] Eval epoch: 94 +[ Thu Sep 15 12:28:10 2022 ] Mean test loss of 296 batches: 1.3838298320770264. +[ Thu Sep 15 12:28:10 2022 ] Top1: 69.74% +[ Thu Sep 15 12:28:10 2022 ] Top5: 93.10% +[ Thu Sep 15 12:28:10 2022 ] Training epoch: 95 +[ Thu Sep 15 12:28:41 2022 ] Batch(37/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 12:29:43 2022 ] Eval epoch: 95 +[ Thu Sep 15 12:30:20 2022 ] Mean test loss of 296 batches: 1.3837475776672363. +[ Thu Sep 15 12:30:20 2022 ] Top1: 69.87% +[ Thu Sep 15 12:30:20 2022 ] Top5: 93.04% +[ Thu Sep 15 12:30:20 2022 ] Training epoch: 96 +[ Thu Sep 15 12:30:34 2022 ] Batch(14/123) done. Loss: 0.0077 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 12:31:47 2022 ] Batch(114/123) done. Loss: 0.0020 lr:0.001000 network_time: 0.0305 +[ Thu Sep 15 12:31:53 2022 ] Eval epoch: 96 +[ Thu Sep 15 12:32:30 2022 ] Mean test loss of 296 batches: 1.431807041168213. +[ Thu Sep 15 12:32:30 2022 ] Top1: 69.07% +[ Thu Sep 15 12:32:31 2022 ] Top5: 92.89% +[ Thu Sep 15 12:32:31 2022 ] Training epoch: 97 +[ Thu Sep 15 12:33:41 2022 ] Batch(91/123) done. Loss: 0.0155 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 12:34:04 2022 ] Eval epoch: 97 +[ Thu Sep 15 12:34:41 2022 ] Mean test loss of 296 batches: 1.4066472053527832. +[ Thu Sep 15 12:34:41 2022 ] Top1: 69.57% +[ Thu Sep 15 12:34:41 2022 ] Top5: 93.02% +[ Thu Sep 15 12:34:41 2022 ] Training epoch: 98 +[ Thu Sep 15 12:35:34 2022 ] Batch(68/123) done. Loss: 0.0044 lr:0.001000 network_time: 0.0265 +[ Thu Sep 15 12:36:14 2022 ] Eval epoch: 98 +[ Thu Sep 15 12:36:51 2022 ] Mean test loss of 296 batches: 1.3874813318252563. +[ Thu Sep 15 12:36:51 2022 ] Top1: 69.70% +[ Thu Sep 15 12:36:51 2022 ] Top5: 93.10% +[ Thu Sep 15 12:36:51 2022 ] Training epoch: 99 +[ Thu Sep 15 12:37:27 2022 ] Batch(45/123) done. Loss: 0.0061 lr:0.001000 network_time: 0.0260 +[ Thu Sep 15 12:38:24 2022 ] Eval epoch: 99 +[ Thu Sep 15 12:39:01 2022 ] Mean test loss of 296 batches: 1.4042295217514038. +[ Thu Sep 15 12:39:01 2022 ] Top1: 69.63% +[ Thu Sep 15 12:39:01 2022 ] Top5: 93.14% +[ Thu Sep 15 12:39:01 2022 ] Training epoch: 100 +[ Thu Sep 15 12:39:20 2022 ] Batch(22/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0281 +[ Thu Sep 15 12:40:33 2022 ] Batch(122/123) done. Loss: 0.0149 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 12:40:34 2022 ] Eval epoch: 100 +[ Thu Sep 15 12:41:11 2022 ] Mean test loss of 296 batches: 1.3698790073394775. +[ Thu Sep 15 12:41:11 2022 ] Top1: 69.92% +[ Thu Sep 15 12:41:11 2022 ] Top5: 93.39% +[ Thu Sep 15 12:41:11 2022 ] Training epoch: 101 +[ Thu Sep 15 12:42:27 2022 ] Batch(99/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0365 +[ Thu Sep 15 12:42:44 2022 ] Eval epoch: 101 +[ Thu Sep 15 12:43:21 2022 ] Mean test loss of 296 batches: 1.384576678276062. +[ Thu Sep 15 12:43:21 2022 ] Top1: 69.62% +[ Thu Sep 15 12:43:21 2022 ] Top5: 93.15% +[ Thu Sep 15 12:43:21 2022 ] Training epoch: 102 +[ Thu Sep 15 12:44:20 2022 ] Batch(76/123) done. Loss: 0.0066 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 12:44:54 2022 ] Eval epoch: 102 +[ Thu Sep 15 12:45:31 2022 ] Mean test loss of 296 batches: 1.3877416849136353. +[ Thu Sep 15 12:45:31 2022 ] Top1: 69.81% +[ Thu Sep 15 12:45:31 2022 ] Top5: 93.10% +[ Thu Sep 15 12:45:31 2022 ] Training epoch: 103 +[ Thu Sep 15 12:46:14 2022 ] Batch(53/123) done. Loss: 0.0104 lr:0.000100 network_time: 0.0283 +[ Thu Sep 15 12:47:04 2022 ] Eval epoch: 103 +[ Thu Sep 15 12:47:41 2022 ] Mean test loss of 296 batches: 1.4137234687805176. +[ Thu Sep 15 12:47:41 2022 ] Top1: 69.38% +[ Thu Sep 15 12:47:41 2022 ] Top5: 93.06% +[ Thu Sep 15 12:47:41 2022 ] Training epoch: 104 +[ Thu Sep 15 12:48:07 2022 ] Batch(30/123) done. Loss: 0.0088 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 12:49:14 2022 ] Eval epoch: 104 +[ Thu Sep 15 12:49:51 2022 ] Mean test loss of 296 batches: 1.3649687767028809. +[ Thu Sep 15 12:49:51 2022 ] Top1: 70.33% +[ Thu Sep 15 12:49:51 2022 ] Top5: 93.36% +[ Thu Sep 15 12:49:51 2022 ] Training epoch: 105 +[ Thu Sep 15 12:50:00 2022 ] Batch(7/123) done. Loss: 0.0052 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 12:51:13 2022 ] Batch(107/123) done. Loss: 0.0038 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 12:51:24 2022 ] Eval epoch: 105 +[ Thu Sep 15 12:52:01 2022 ] Mean test loss of 296 batches: 1.3972617387771606. +[ Thu Sep 15 12:52:02 2022 ] Top1: 69.69% +[ Thu Sep 15 12:52:02 2022 ] Top5: 93.09% +[ Thu Sep 15 12:52:02 2022 ] Training epoch: 106 +[ Thu Sep 15 12:53:07 2022 ] Batch(84/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 12:53:35 2022 ] Eval epoch: 106 +[ Thu Sep 15 12:54:12 2022 ] Mean test loss of 296 batches: 1.3901668787002563. +[ Thu Sep 15 12:54:12 2022 ] Top1: 69.78% +[ Thu Sep 15 12:54:12 2022 ] Top5: 93.29% +[ Thu Sep 15 12:54:12 2022 ] Training epoch: 107 +[ Thu Sep 15 12:55:00 2022 ] Batch(61/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 12:55:45 2022 ] Eval epoch: 107 +[ Thu Sep 15 12:56:22 2022 ] Mean test loss of 296 batches: 1.407670259475708. +[ Thu Sep 15 12:56:22 2022 ] Top1: 69.36% +[ Thu Sep 15 12:56:22 2022 ] Top5: 93.06% +[ Thu Sep 15 12:56:22 2022 ] Training epoch: 108 +[ Thu Sep 15 12:56:54 2022 ] Batch(38/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 12:57:56 2022 ] Eval epoch: 108 +[ Thu Sep 15 12:58:33 2022 ] Mean test loss of 296 batches: 1.4096759557724. +[ Thu Sep 15 12:58:33 2022 ] Top1: 69.60% +[ Thu Sep 15 12:58:33 2022 ] Top5: 92.99% +[ Thu Sep 15 12:58:33 2022 ] Training epoch: 109 +[ Thu Sep 15 12:58:47 2022 ] Batch(15/123) done. Loss: 0.0158 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 13:00:01 2022 ] Batch(115/123) done. Loss: 0.0069 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 13:00:06 2022 ] Eval epoch: 109 +[ Thu Sep 15 13:00:43 2022 ] Mean test loss of 296 batches: 1.4066957235336304. +[ Thu Sep 15 13:00:43 2022 ] Top1: 69.54% +[ Thu Sep 15 13:00:43 2022 ] Top5: 93.02% +[ Thu Sep 15 13:00:43 2022 ] Training epoch: 110 +[ Thu Sep 15 13:01:54 2022 ] Batch(92/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 13:02:16 2022 ] Eval epoch: 110 +[ Thu Sep 15 13:02:53 2022 ] Mean test loss of 296 batches: 1.3835705518722534. +[ Thu Sep 15 13:02:53 2022 ] Top1: 69.88% +[ Thu Sep 15 13:02:53 2022 ] Top5: 93.20% +[ Thu Sep 15 13:02:53 2022 ] Training epoch: 111 +[ Thu Sep 15 13:03:47 2022 ] Batch(69/123) done. Loss: 0.0019 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 13:04:26 2022 ] Eval epoch: 111 +[ Thu Sep 15 13:05:03 2022 ] Mean test loss of 296 batches: 1.3800050020217896. +[ Thu Sep 15 13:05:03 2022 ] Top1: 69.69% +[ Thu Sep 15 13:05:03 2022 ] Top5: 93.25% +[ Thu Sep 15 13:05:03 2022 ] Training epoch: 112 +[ Thu Sep 15 13:05:41 2022 ] Batch(46/123) done. Loss: 0.0028 lr:0.000100 network_time: 0.0264 +[ Thu Sep 15 13:06:36 2022 ] Eval epoch: 112 +[ Thu Sep 15 13:07:13 2022 ] Mean test loss of 296 batches: 1.38374924659729. +[ Thu Sep 15 13:07:14 2022 ] Top1: 69.60% +[ Thu Sep 15 13:07:14 2022 ] Top5: 93.28% +[ Thu Sep 15 13:07:14 2022 ] Training epoch: 113 +[ Thu Sep 15 13:07:34 2022 ] Batch(23/123) done. Loss: 0.0083 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 13:08:47 2022 ] Eval epoch: 113 +[ Thu Sep 15 13:09:24 2022 ] Mean test loss of 296 batches: 1.4301618337631226. +[ Thu Sep 15 13:09:24 2022 ] Top1: 69.08% +[ Thu Sep 15 13:09:24 2022 ] Top5: 92.84% +[ Thu Sep 15 13:09:24 2022 ] Training epoch: 114 +[ Thu Sep 15 13:09:27 2022 ] Batch(0/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0561 +[ Thu Sep 15 13:10:41 2022 ] Batch(100/123) done. Loss: 0.0127 lr:0.000100 network_time: 0.0263 +[ Thu Sep 15 13:10:57 2022 ] Eval epoch: 114 +[ Thu Sep 15 13:11:34 2022 ] Mean test loss of 296 batches: 1.3947770595550537. +[ Thu Sep 15 13:11:34 2022 ] Top1: 69.63% +[ Thu Sep 15 13:11:34 2022 ] Top5: 93.05% +[ Thu Sep 15 13:11:34 2022 ] Training epoch: 115 +[ Thu Sep 15 13:12:34 2022 ] Batch(77/123) done. Loss: 0.0068 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 13:13:07 2022 ] Eval epoch: 115 +[ Thu Sep 15 13:13:44 2022 ] Mean test loss of 296 batches: 1.3894644975662231. +[ Thu Sep 15 13:13:44 2022 ] Top1: 69.58% +[ Thu Sep 15 13:13:44 2022 ] Top5: 93.03% +[ Thu Sep 15 13:13:44 2022 ] Training epoch: 116 +[ Thu Sep 15 13:14:28 2022 ] Batch(54/123) done. Loss: 0.0118 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 13:15:17 2022 ] Eval epoch: 116 +[ Thu Sep 15 13:15:55 2022 ] Mean test loss of 296 batches: 1.3956997394561768. +[ Thu Sep 15 13:15:55 2022 ] Top1: 69.87% +[ Thu Sep 15 13:15:55 2022 ] Top5: 92.98% +[ Thu Sep 15 13:15:55 2022 ] Training epoch: 117 +[ Thu Sep 15 13:16:21 2022 ] Batch(31/123) done. Loss: 0.0023 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 13:17:28 2022 ] Eval epoch: 117 +[ Thu Sep 15 13:18:05 2022 ] Mean test loss of 296 batches: 1.4064886569976807. +[ Thu Sep 15 13:18:05 2022 ] Top1: 69.64% +[ Thu Sep 15 13:18:05 2022 ] Top5: 93.04% +[ Thu Sep 15 13:18:05 2022 ] Training epoch: 118 +[ Thu Sep 15 13:18:15 2022 ] Batch(8/123) done. Loss: 0.0087 lr:0.000100 network_time: 0.0254 +[ Thu Sep 15 13:19:28 2022 ] Batch(108/123) done. Loss: 0.0084 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 13:19:38 2022 ] Eval epoch: 118 +[ Thu Sep 15 13:20:16 2022 ] Mean test loss of 296 batches: 1.3757609128952026. +[ Thu Sep 15 13:20:16 2022 ] Top1: 69.97% +[ Thu Sep 15 13:20:16 2022 ] Top5: 93.31% +[ Thu Sep 15 13:20:16 2022 ] Training epoch: 119 +[ Thu Sep 15 13:21:22 2022 ] Batch(85/123) done. Loss: 0.0081 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 13:21:49 2022 ] Eval epoch: 119 +[ Thu Sep 15 13:22:26 2022 ] Mean test loss of 296 batches: 1.4105119705200195. +[ Thu Sep 15 13:22:27 2022 ] Top1: 69.50% +[ Thu Sep 15 13:22:27 2022 ] Top5: 92.96% +[ Thu Sep 15 13:22:27 2022 ] Training epoch: 120 +[ Thu Sep 15 13:23:16 2022 ] Batch(62/123) done. Loss: 0.0050 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 13:24:00 2022 ] Eval epoch: 120 +[ Thu Sep 15 13:24:37 2022 ] Mean test loss of 296 batches: 1.399190902709961. +[ Thu Sep 15 13:24:37 2022 ] Top1: 69.63% +[ Thu Sep 15 13:24:37 2022 ] Top5: 92.98% +[ Thu Sep 15 13:24:37 2022 ] Training epoch: 121 +[ Thu Sep 15 13:25:09 2022 ] Batch(39/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 13:26:10 2022 ] Eval epoch: 121 +[ Thu Sep 15 13:26:47 2022 ] Mean test loss of 296 batches: 1.3894983530044556. +[ Thu Sep 15 13:26:47 2022 ] Top1: 69.64% +[ Thu Sep 15 13:26:47 2022 ] Top5: 93.08% +[ Thu Sep 15 13:26:47 2022 ] Training epoch: 122 +[ Thu Sep 15 13:27:03 2022 ] Batch(16/123) done. Loss: 0.0064 lr:0.000100 network_time: 0.0252 +[ Thu Sep 15 13:28:16 2022 ] Batch(116/123) done. Loss: 0.0035 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 13:28:20 2022 ] Eval epoch: 122 +[ Thu Sep 15 13:28:57 2022 ] Mean test loss of 296 batches: 1.3649495840072632. +[ Thu Sep 15 13:28:57 2022 ] Top1: 70.31% +[ Thu Sep 15 13:28:57 2022 ] Top5: 93.20% +[ Thu Sep 15 13:28:57 2022 ] Training epoch: 123 +[ Thu Sep 15 13:30:08 2022 ] Batch(93/123) done. Loss: 0.0045 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 13:30:30 2022 ] Eval epoch: 123 +[ Thu Sep 15 13:31:07 2022 ] Mean test loss of 296 batches: 1.4097572565078735. +[ Thu Sep 15 13:31:07 2022 ] Top1: 69.45% +[ Thu Sep 15 13:31:07 2022 ] Top5: 92.94% +[ Thu Sep 15 13:31:07 2022 ] Training epoch: 124 +[ Thu Sep 15 13:32:02 2022 ] Batch(70/123) done. Loss: 0.0120 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 13:32:40 2022 ] Eval epoch: 124 +[ Thu Sep 15 13:33:17 2022 ] Mean test loss of 296 batches: 1.3734040260314941. +[ Thu Sep 15 13:33:17 2022 ] Top1: 70.04% +[ Thu Sep 15 13:33:17 2022 ] Top5: 93.28% +[ Thu Sep 15 13:33:17 2022 ] Training epoch: 125 +[ Thu Sep 15 13:33:55 2022 ] Batch(47/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0248 +[ Thu Sep 15 13:34:50 2022 ] Eval epoch: 125 +[ Thu Sep 15 13:35:27 2022 ] Mean test loss of 296 batches: 1.396437406539917. +[ Thu Sep 15 13:35:27 2022 ] Top1: 69.73% +[ Thu Sep 15 13:35:27 2022 ] Top5: 93.10% +[ Thu Sep 15 13:35:27 2022 ] Training epoch: 126 +[ Thu Sep 15 13:35:48 2022 ] Batch(24/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0296 +[ Thu Sep 15 13:37:00 2022 ] Eval epoch: 126 +[ Thu Sep 15 13:37:37 2022 ] Mean test loss of 296 batches: 1.3989466428756714. +[ Thu Sep 15 13:37:37 2022 ] Top1: 69.76% +[ Thu Sep 15 13:37:38 2022 ] Top5: 93.13% +[ Thu Sep 15 13:37:38 2022 ] Training epoch: 127 +[ Thu Sep 15 13:37:42 2022 ] Batch(1/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0527 +[ Thu Sep 15 13:38:55 2022 ] Batch(101/123) done. Loss: 0.0047 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 13:39:11 2022 ] Eval epoch: 127 +[ Thu Sep 15 13:39:48 2022 ] Mean test loss of 296 batches: 1.3757375478744507. +[ Thu Sep 15 13:39:48 2022 ] Top1: 70.15% +[ Thu Sep 15 13:39:48 2022 ] Top5: 93.25% +[ Thu Sep 15 13:39:48 2022 ] Training epoch: 128 +[ Thu Sep 15 13:40:48 2022 ] Batch(78/123) done. Loss: 0.0085 lr:0.000100 network_time: 0.0313 +[ Thu Sep 15 13:41:21 2022 ] Eval epoch: 128 +[ Thu Sep 15 13:41:58 2022 ] Mean test loss of 296 batches: 1.3980305194854736. +[ Thu Sep 15 13:41:58 2022 ] Top1: 69.69% +[ Thu Sep 15 13:41:58 2022 ] Top5: 93.00% +[ Thu Sep 15 13:41:58 2022 ] Training epoch: 129 +[ Thu Sep 15 13:42:42 2022 ] Batch(55/123) done. Loss: 0.0077 lr:0.000100 network_time: 0.0245 +[ Thu Sep 15 13:43:31 2022 ] Eval epoch: 129 +[ Thu Sep 15 13:44:08 2022 ] Mean test loss of 296 batches: 1.3790334463119507. +[ Thu Sep 15 13:44:08 2022 ] Top1: 69.61% +[ Thu Sep 15 13:44:08 2022 ] Top5: 93.20% +[ Thu Sep 15 13:44:08 2022 ] Training epoch: 130 +[ Thu Sep 15 13:44:35 2022 ] Batch(32/123) done. Loss: 0.0154 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 13:45:41 2022 ] Eval epoch: 130 +[ Thu Sep 15 13:46:18 2022 ] Mean test loss of 296 batches: 1.403220772743225. +[ Thu Sep 15 13:46:18 2022 ] Top1: 69.63% +[ Thu Sep 15 13:46:19 2022 ] Top5: 93.13% +[ Thu Sep 15 13:46:19 2022 ] Training epoch: 131 +[ Thu Sep 15 13:46:29 2022 ] Batch(9/123) done. Loss: 0.0017 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 13:47:42 2022 ] Batch(109/123) done. Loss: 0.0067 lr:0.000100 network_time: 0.0304 +[ Thu Sep 15 13:47:52 2022 ] Eval epoch: 131 +[ Thu Sep 15 13:48:29 2022 ] Mean test loss of 296 batches: 1.3794163465499878. +[ Thu Sep 15 13:48:29 2022 ] Top1: 70.11% +[ Thu Sep 15 13:48:29 2022 ] Top5: 93.14% +[ Thu Sep 15 13:48:29 2022 ] Training epoch: 132 +[ Thu Sep 15 13:49:35 2022 ] Batch(86/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 13:50:02 2022 ] Eval epoch: 132 +[ Thu Sep 15 13:50:39 2022 ] Mean test loss of 296 batches: 1.3776257038116455. +[ Thu Sep 15 13:50:39 2022 ] Top1: 69.87% +[ Thu Sep 15 13:50:39 2022 ] Top5: 93.34% +[ Thu Sep 15 13:50:39 2022 ] Training epoch: 133 +[ Thu Sep 15 13:51:29 2022 ] Batch(63/123) done. Loss: 0.0022 lr:0.000100 network_time: 0.0464 +[ Thu Sep 15 13:52:12 2022 ] Eval epoch: 133 +[ Thu Sep 15 13:52:49 2022 ] Mean test loss of 296 batches: 1.4044573307037354. +[ Thu Sep 15 13:52:49 2022 ] Top1: 69.47% +[ Thu Sep 15 13:52:50 2022 ] Top5: 93.05% +[ Thu Sep 15 13:52:50 2022 ] Training epoch: 134 +[ Thu Sep 15 13:53:22 2022 ] Batch(40/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0256 +[ Thu Sep 15 13:54:22 2022 ] Eval epoch: 134 +[ Thu Sep 15 13:54:59 2022 ] Mean test loss of 296 batches: 1.3846921920776367. +[ Thu Sep 15 13:54:59 2022 ] Top1: 69.70% +[ Thu Sep 15 13:55:00 2022 ] Top5: 93.18% +[ Thu Sep 15 13:55:00 2022 ] Training epoch: 135 +[ Thu Sep 15 13:55:16 2022 ] Batch(17/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 13:56:29 2022 ] Batch(117/123) done. Loss: 0.0081 lr:0.000100 network_time: 0.0293 +[ Thu Sep 15 13:56:32 2022 ] Eval epoch: 135 +[ Thu Sep 15 13:57:09 2022 ] Mean test loss of 296 batches: 1.3706209659576416. +[ Thu Sep 15 13:57:09 2022 ] Top1: 70.06% +[ Thu Sep 15 13:57:10 2022 ] Top5: 93.23% +[ Thu Sep 15 13:57:10 2022 ] Training epoch: 136 +[ Thu Sep 15 13:58:22 2022 ] Batch(94/123) done. Loss: 0.0104 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 13:58:43 2022 ] Eval epoch: 136 +[ Thu Sep 15 13:59:20 2022 ] Mean test loss of 296 batches: 1.3784817457199097. +[ Thu Sep 15 13:59:20 2022 ] Top1: 70.18% +[ Thu Sep 15 13:59:20 2022 ] Top5: 93.11% +[ Thu Sep 15 13:59:20 2022 ] Training epoch: 137 +[ Thu Sep 15 14:00:15 2022 ] Batch(71/123) done. Loss: 0.0059 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 14:00:53 2022 ] Eval epoch: 137 +[ Thu Sep 15 14:01:30 2022 ] Mean test loss of 296 batches: 1.384010910987854. +[ Thu Sep 15 14:01:30 2022 ] Top1: 69.90% +[ Thu Sep 15 14:01:30 2022 ] Top5: 93.18% +[ Thu Sep 15 14:01:30 2022 ] Training epoch: 138 +[ Thu Sep 15 14:02:08 2022 ] Batch(48/123) done. Loss: 0.0030 lr:0.000100 network_time: 0.0253 +[ Thu Sep 15 14:03:03 2022 ] Eval epoch: 138 +[ Thu Sep 15 14:03:40 2022 ] Mean test loss of 296 batches: 1.3741310834884644. +[ Thu Sep 15 14:03:40 2022 ] Top1: 70.05% +[ Thu Sep 15 14:03:40 2022 ] Top5: 93.30% +[ Thu Sep 15 14:03:40 2022 ] Training epoch: 139 +[ Thu Sep 15 14:04:02 2022 ] Batch(25/123) done. Loss: 0.0078 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 14:05:13 2022 ] Eval epoch: 139 +[ Thu Sep 15 14:05:50 2022 ] Mean test loss of 296 batches: 1.4210567474365234. +[ Thu Sep 15 14:05:50 2022 ] Top1: 69.16% +[ Thu Sep 15 14:05:51 2022 ] Top5: 92.96% +[ Thu Sep 15 14:05:51 2022 ] Training epoch: 140 +[ Thu Sep 15 14:05:56 2022 ] Batch(2/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 14:07:09 2022 ] Batch(102/123) done. Loss: 0.0065 lr:0.000100 network_time: 0.0370 +[ Thu Sep 15 14:07:24 2022 ] Eval epoch: 140 +[ Thu Sep 15 14:08:01 2022 ] Mean test loss of 296 batches: 1.4052220582962036. +[ Thu Sep 15 14:08:01 2022 ] Top1: 69.70% +[ Thu Sep 15 14:08:01 2022 ] Top5: 93.07% diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_bone_xview/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..821b2bbe54d615f982a1fae01d636d62cd6f434f --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_joint_motion_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_joint_motion.yaml +device: +- 6 +- 7 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_joint_motion_xview +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint_motion.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint_motion.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_joint_motion_xview diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..883b64e77e70140e9024d00820a931c9f5f15323 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b82b1dab9a4878ebe0df0030fcd14a3a345fa5dd850baa1e7dbda1c8ecdb0da1 +size 5718404 diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/log.txt b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..291973c1c12de8711f6caa987893e7071cca8ad9 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 09:04:13 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_motion_xview', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_motion_xview', 'Experiment_name': 'ntu_ShiftGCN_joint_motion_xview', 'config': './config/nturgbd-cross-view/train_joint_motion.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint_motion.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [6, 7], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 09:04:13 2022 ] Training epoch: 1 +[ Thu Sep 15 09:05:32 2022 ] Batch(99/123) done. Loss: 2.1944 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 09:05:49 2022 ] Eval epoch: 1 +[ Thu Sep 15 09:06:26 2022 ] Mean test loss of 296 batches: 10.541125297546387. +[ Thu Sep 15 09:06:26 2022 ] Top1: 17.99% +[ Thu Sep 15 09:06:26 2022 ] Top5: 45.42% +[ Thu Sep 15 09:06:26 2022 ] Training epoch: 2 +[ Thu Sep 15 09:07:25 2022 ] Batch(76/123) done. Loss: 2.0463 lr:0.100000 network_time: 0.0287 +[ Thu Sep 15 09:07:59 2022 ] Eval epoch: 2 +[ Thu Sep 15 09:08:37 2022 ] Mean test loss of 296 batches: 5.925306797027588. +[ Thu Sep 15 09:08:37 2022 ] Top1: 19.27% +[ Thu Sep 15 09:08:37 2022 ] Top5: 43.40% +[ Thu Sep 15 09:08:37 2022 ] Training epoch: 3 +[ Thu Sep 15 09:09:19 2022 ] Batch(53/123) done. Loss: 2.1677 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 09:10:10 2022 ] Eval epoch: 3 +[ Thu Sep 15 09:10:47 2022 ] Mean test loss of 296 batches: 3.929342746734619. +[ Thu Sep 15 09:10:47 2022 ] Top1: 28.47% +[ Thu Sep 15 09:10:47 2022 ] Top5: 61.66% +[ Thu Sep 15 09:10:47 2022 ] Training epoch: 4 +[ Thu Sep 15 09:11:13 2022 ] Batch(30/123) done. Loss: 1.0779 lr:0.100000 network_time: 0.0248 +[ Thu Sep 15 09:12:20 2022 ] Eval epoch: 4 +[ Thu Sep 15 09:12:57 2022 ] Mean test loss of 296 batches: 3.401308298110962. +[ Thu Sep 15 09:12:57 2022 ] Top1: 34.66% +[ Thu Sep 15 09:12:57 2022 ] Top5: 69.70% +[ Thu Sep 15 09:12:57 2022 ] Training epoch: 5 +[ Thu Sep 15 09:13:06 2022 ] Batch(7/123) done. Loss: 1.5257 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 09:14:19 2022 ] Batch(107/123) done. Loss: 1.1335 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 09:14:30 2022 ] Eval epoch: 5 +[ Thu Sep 15 09:15:07 2022 ] Mean test loss of 296 batches: 2.9881510734558105. +[ Thu Sep 15 09:15:07 2022 ] Top1: 32.56% +[ Thu Sep 15 09:15:07 2022 ] Top5: 68.72% +[ Thu Sep 15 09:15:07 2022 ] Training epoch: 6 +[ Thu Sep 15 09:16:12 2022 ] Batch(84/123) done. Loss: 1.2525 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:16:40 2022 ] Eval epoch: 6 +[ Thu Sep 15 09:17:17 2022 ] Mean test loss of 296 batches: 2.075395345687866. +[ Thu Sep 15 09:17:17 2022 ] Top1: 44.00% +[ Thu Sep 15 09:17:17 2022 ] Top5: 81.28% +[ Thu Sep 15 09:17:17 2022 ] Training epoch: 7 +[ Thu Sep 15 09:18:05 2022 ] Batch(61/123) done. Loss: 0.9061 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:18:50 2022 ] Eval epoch: 7 +[ Thu Sep 15 09:19:27 2022 ] Mean test loss of 296 batches: 2.427981376647949. +[ Thu Sep 15 09:19:27 2022 ] Top1: 44.78% +[ Thu Sep 15 09:19:27 2022 ] Top5: 79.71% +[ Thu Sep 15 09:19:27 2022 ] Training epoch: 8 +[ Thu Sep 15 09:19:58 2022 ] Batch(38/123) done. Loss: 1.0554 lr:0.100000 network_time: 0.0328 +[ Thu Sep 15 09:21:00 2022 ] Eval epoch: 8 +[ Thu Sep 15 09:21:37 2022 ] Mean test loss of 296 batches: 3.348461389541626. +[ Thu Sep 15 09:21:37 2022 ] Top1: 34.86% +[ Thu Sep 15 09:21:37 2022 ] Top5: 67.18% +[ Thu Sep 15 09:21:37 2022 ] Training epoch: 9 +[ Thu Sep 15 09:21:51 2022 ] Batch(15/123) done. Loss: 0.8747 lr:0.100000 network_time: 0.0329 +[ Thu Sep 15 09:23:05 2022 ] Batch(115/123) done. Loss: 0.8749 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 09:23:10 2022 ] Eval epoch: 9 +[ Thu Sep 15 09:23:47 2022 ] Mean test loss of 296 batches: 3.7175636291503906. +[ Thu Sep 15 09:23:47 2022 ] Top1: 35.33% +[ Thu Sep 15 09:23:47 2022 ] Top5: 69.98% +[ Thu Sep 15 09:23:47 2022 ] Training epoch: 10 +[ Thu Sep 15 09:24:58 2022 ] Batch(92/123) done. Loss: 0.7818 lr:0.100000 network_time: 0.0285 +[ Thu Sep 15 09:25:20 2022 ] Eval epoch: 10 +[ Thu Sep 15 09:25:57 2022 ] Mean test loss of 296 batches: 2.8265042304992676. +[ Thu Sep 15 09:25:57 2022 ] Top1: 44.61% +[ Thu Sep 15 09:25:57 2022 ] Top5: 77.70% +[ Thu Sep 15 09:25:57 2022 ] Training epoch: 11 +[ Thu Sep 15 09:26:51 2022 ] Batch(69/123) done. Loss: 0.8064 lr:0.100000 network_time: 0.0305 +[ Thu Sep 15 09:27:30 2022 ] Eval epoch: 11 +[ Thu Sep 15 09:28:07 2022 ] Mean test loss of 296 batches: 3.3214447498321533. +[ Thu Sep 15 09:28:07 2022 ] Top1: 39.36% +[ Thu Sep 15 09:28:07 2022 ] Top5: 71.50% +[ Thu Sep 15 09:28:07 2022 ] Training epoch: 12 +[ Thu Sep 15 09:28:44 2022 ] Batch(46/123) done. Loss: 0.8027 lr:0.100000 network_time: 0.0260 +[ Thu Sep 15 09:29:40 2022 ] Eval epoch: 12 +[ Thu Sep 15 09:30:17 2022 ] Mean test loss of 296 batches: 4.5313615798950195. +[ Thu Sep 15 09:30:17 2022 ] Top1: 27.54% +[ Thu Sep 15 09:30:17 2022 ] Top5: 62.45% +[ Thu Sep 15 09:30:17 2022 ] Training epoch: 13 +[ Thu Sep 15 09:30:37 2022 ] Batch(23/123) done. Loss: 0.8135 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:31:50 2022 ] Eval epoch: 13 +[ Thu Sep 15 09:32:27 2022 ] Mean test loss of 296 batches: 1.8461687564849854. +[ Thu Sep 15 09:32:27 2022 ] Top1: 51.31% +[ Thu Sep 15 09:32:27 2022 ] Top5: 84.40% +[ Thu Sep 15 09:32:27 2022 ] Training epoch: 14 +[ Thu Sep 15 09:32:30 2022 ] Batch(0/123) done. Loss: 0.5064 lr:0.100000 network_time: 0.0625 +[ Thu Sep 15 09:33:43 2022 ] Batch(100/123) done. Loss: 0.7618 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:34:00 2022 ] Eval epoch: 14 +[ Thu Sep 15 09:34:37 2022 ] Mean test loss of 296 batches: 2.3183469772338867. +[ Thu Sep 15 09:34:37 2022 ] Top1: 47.93% +[ Thu Sep 15 09:34:37 2022 ] Top5: 83.30% +[ Thu Sep 15 09:34:37 2022 ] Training epoch: 15 +[ Thu Sep 15 09:35:36 2022 ] Batch(77/123) done. Loss: 0.5750 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 09:36:10 2022 ] Eval epoch: 15 +[ Thu Sep 15 09:36:46 2022 ] Mean test loss of 296 batches: 3.4477028846740723. +[ Thu Sep 15 09:36:46 2022 ] Top1: 34.15% +[ Thu Sep 15 09:36:47 2022 ] Top5: 67.81% +[ Thu Sep 15 09:36:47 2022 ] Training epoch: 16 +[ Thu Sep 15 09:37:29 2022 ] Batch(54/123) done. Loss: 0.8148 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 09:38:19 2022 ] Eval epoch: 16 +[ Thu Sep 15 09:38:56 2022 ] Mean test loss of 296 batches: 1.9735115766525269. +[ Thu Sep 15 09:38:56 2022 ] Top1: 52.53% +[ Thu Sep 15 09:38:56 2022 ] Top5: 84.91% +[ Thu Sep 15 09:38:56 2022 ] Training epoch: 17 +[ Thu Sep 15 09:39:22 2022 ] Batch(31/123) done. Loss: 0.3617 lr:0.100000 network_time: 0.0290 +[ Thu Sep 15 09:40:29 2022 ] Eval epoch: 17 +[ Thu Sep 15 09:41:06 2022 ] Mean test loss of 296 batches: 2.4605064392089844. +[ Thu Sep 15 09:41:06 2022 ] Top1: 46.31% +[ Thu Sep 15 09:41:06 2022 ] Top5: 81.74% +[ Thu Sep 15 09:41:06 2022 ] Training epoch: 18 +[ Thu Sep 15 09:41:15 2022 ] Batch(8/123) done. Loss: 0.3591 lr:0.100000 network_time: 0.0281 +[ Thu Sep 15 09:42:28 2022 ] Batch(108/123) done. Loss: 0.5530 lr:0.100000 network_time: 0.0330 +[ Thu Sep 15 09:42:39 2022 ] Eval epoch: 18 +[ Thu Sep 15 09:43:15 2022 ] Mean test loss of 296 batches: 1.5425431728363037. +[ Thu Sep 15 09:43:16 2022 ] Top1: 61.83% +[ Thu Sep 15 09:43:16 2022 ] Top5: 90.28% +[ Thu Sep 15 09:43:16 2022 ] Training epoch: 19 +[ Thu Sep 15 09:44:21 2022 ] Batch(85/123) done. Loss: 0.8794 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 09:44:49 2022 ] Eval epoch: 19 +[ Thu Sep 15 09:45:25 2022 ] Mean test loss of 296 batches: 2.3788092136383057. +[ Thu Sep 15 09:45:26 2022 ] Top1: 47.92% +[ Thu Sep 15 09:45:26 2022 ] Top5: 79.72% +[ Thu Sep 15 09:45:26 2022 ] Training epoch: 20 +[ Thu Sep 15 09:46:15 2022 ] Batch(62/123) done. Loss: 0.6587 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 09:46:59 2022 ] Eval epoch: 20 +[ Thu Sep 15 09:47:36 2022 ] Mean test loss of 296 batches: 2.0377092361450195. +[ Thu Sep 15 09:47:36 2022 ] Top1: 51.38% +[ Thu Sep 15 09:47:36 2022 ] Top5: 83.51% +[ Thu Sep 15 09:47:36 2022 ] Training epoch: 21 +[ Thu Sep 15 09:48:08 2022 ] Batch(39/123) done. Loss: 0.3342 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 09:49:09 2022 ] Eval epoch: 21 +[ Thu Sep 15 09:49:46 2022 ] Mean test loss of 296 batches: 2.4133481979370117. +[ Thu Sep 15 09:49:46 2022 ] Top1: 47.33% +[ Thu Sep 15 09:49:46 2022 ] Top5: 77.92% +[ Thu Sep 15 09:49:46 2022 ] Training epoch: 22 +[ Thu Sep 15 09:50:01 2022 ] Batch(16/123) done. Loss: 0.4003 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:51:14 2022 ] Batch(116/123) done. Loss: 0.6447 lr:0.100000 network_time: 0.0290 +[ Thu Sep 15 09:51:19 2022 ] Eval epoch: 22 +[ Thu Sep 15 09:51:56 2022 ] Mean test loss of 296 batches: 1.8490498065948486. +[ Thu Sep 15 09:51:56 2022 ] Top1: 53.59% +[ Thu Sep 15 09:51:56 2022 ] Top5: 85.17% +[ Thu Sep 15 09:51:56 2022 ] Training epoch: 23 +[ Thu Sep 15 09:53:07 2022 ] Batch(93/123) done. Loss: 0.2608 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 09:53:28 2022 ] Eval epoch: 23 +[ Thu Sep 15 09:54:05 2022 ] Mean test loss of 296 batches: 2.395524740219116. +[ Thu Sep 15 09:54:05 2022 ] Top1: 51.77% +[ Thu Sep 15 09:54:05 2022 ] Top5: 84.31% +[ Thu Sep 15 09:54:05 2022 ] Training epoch: 24 +[ Thu Sep 15 09:55:00 2022 ] Batch(70/123) done. Loss: 0.2402 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 09:55:38 2022 ] Eval epoch: 24 +[ Thu Sep 15 09:56:15 2022 ] Mean test loss of 296 batches: 2.6345374584198. +[ Thu Sep 15 09:56:15 2022 ] Top1: 46.95% +[ Thu Sep 15 09:56:15 2022 ] Top5: 81.84% +[ Thu Sep 15 09:56:15 2022 ] Training epoch: 25 +[ Thu Sep 15 09:56:53 2022 ] Batch(47/123) done. Loss: 0.2885 lr:0.100000 network_time: 0.0298 +[ Thu Sep 15 09:57:48 2022 ] Eval epoch: 25 +[ Thu Sep 15 09:58:25 2022 ] Mean test loss of 296 batches: 2.4668877124786377. +[ Thu Sep 15 09:58:25 2022 ] Top1: 44.89% +[ Thu Sep 15 09:58:25 2022 ] Top5: 79.24% +[ Thu Sep 15 09:58:25 2022 ] Training epoch: 26 +[ Thu Sep 15 09:58:46 2022 ] Batch(24/123) done. Loss: 0.2295 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:59:58 2022 ] Eval epoch: 26 +[ Thu Sep 15 10:00:35 2022 ] Mean test loss of 296 batches: 2.8713467121124268. +[ Thu Sep 15 10:00:35 2022 ] Top1: 47.52% +[ Thu Sep 15 10:00:35 2022 ] Top5: 79.01% +[ Thu Sep 15 10:00:35 2022 ] Training epoch: 27 +[ Thu Sep 15 10:00:39 2022 ] Batch(1/123) done. Loss: 0.1833 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 10:01:52 2022 ] Batch(101/123) done. Loss: 0.4430 lr:0.100000 network_time: 0.0289 +[ Thu Sep 15 10:02:08 2022 ] Eval epoch: 27 +[ Thu Sep 15 10:02:45 2022 ] Mean test loss of 296 batches: 1.65923273563385. +[ Thu Sep 15 10:02:45 2022 ] Top1: 59.49% +[ Thu Sep 15 10:02:45 2022 ] Top5: 90.04% +[ Thu Sep 15 10:02:45 2022 ] Training epoch: 28 +[ Thu Sep 15 10:03:46 2022 ] Batch(78/123) done. Loss: 0.2704 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 10:04:18 2022 ] Eval epoch: 28 +[ Thu Sep 15 10:04:55 2022 ] Mean test loss of 296 batches: 1.9268420934677124. +[ Thu Sep 15 10:04:55 2022 ] Top1: 54.65% +[ Thu Sep 15 10:04:55 2022 ] Top5: 85.89% +[ Thu Sep 15 10:04:55 2022 ] Training epoch: 29 +[ Thu Sep 15 10:05:39 2022 ] Batch(55/123) done. Loss: 0.2265 lr:0.100000 network_time: 0.0290 +[ Thu Sep 15 10:06:29 2022 ] Eval epoch: 29 +[ Thu Sep 15 10:07:05 2022 ] Mean test loss of 296 batches: 3.465728759765625. +[ Thu Sep 15 10:07:05 2022 ] Top1: 42.20% +[ Thu Sep 15 10:07:06 2022 ] Top5: 76.88% +[ Thu Sep 15 10:07:06 2022 ] Training epoch: 30 +[ Thu Sep 15 10:07:32 2022 ] Batch(32/123) done. Loss: 0.1872 lr:0.100000 network_time: 0.0323 +[ Thu Sep 15 10:08:39 2022 ] Eval epoch: 30 +[ Thu Sep 15 10:09:16 2022 ] Mean test loss of 296 batches: 3.8087360858917236. +[ Thu Sep 15 10:09:16 2022 ] Top1: 41.27% +[ Thu Sep 15 10:09:16 2022 ] Top5: 72.26% +[ Thu Sep 15 10:09:16 2022 ] Training epoch: 31 +[ Thu Sep 15 10:09:26 2022 ] Batch(9/123) done. Loss: 0.2641 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:10:39 2022 ] Batch(109/123) done. Loss: 0.2358 lr:0.100000 network_time: 0.0363 +[ Thu Sep 15 10:10:49 2022 ] Eval epoch: 31 +[ Thu Sep 15 10:11:26 2022 ] Mean test loss of 296 batches: 2.352151393890381. +[ Thu Sep 15 10:11:26 2022 ] Top1: 52.22% +[ Thu Sep 15 10:11:26 2022 ] Top5: 85.93% +[ Thu Sep 15 10:11:26 2022 ] Training epoch: 32 +[ Thu Sep 15 10:12:32 2022 ] Batch(86/123) done. Loss: 0.3734 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 10:12:58 2022 ] Eval epoch: 32 +[ Thu Sep 15 10:13:35 2022 ] Mean test loss of 296 batches: 2.314934492111206. +[ Thu Sep 15 10:13:35 2022 ] Top1: 56.50% +[ Thu Sep 15 10:13:36 2022 ] Top5: 88.80% +[ Thu Sep 15 10:13:36 2022 ] Training epoch: 33 +[ Thu Sep 15 10:14:25 2022 ] Batch(63/123) done. Loss: 0.2993 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 10:15:09 2022 ] Eval epoch: 33 +[ Thu Sep 15 10:15:45 2022 ] Mean test loss of 296 batches: 3.762425661087036. +[ Thu Sep 15 10:15:45 2022 ] Top1: 39.09% +[ Thu Sep 15 10:15:45 2022 ] Top5: 69.74% +[ Thu Sep 15 10:15:45 2022 ] Training epoch: 34 +[ Thu Sep 15 10:16:18 2022 ] Batch(40/123) done. Loss: 0.1483 lr:0.100000 network_time: 0.0278 +[ Thu Sep 15 10:17:18 2022 ] Eval epoch: 34 +[ Thu Sep 15 10:17:55 2022 ] Mean test loss of 296 batches: 2.0906965732574463. +[ Thu Sep 15 10:17:55 2022 ] Top1: 55.70% +[ Thu Sep 15 10:17:55 2022 ] Top5: 85.20% +[ Thu Sep 15 10:17:55 2022 ] Training epoch: 35 +[ Thu Sep 15 10:18:11 2022 ] Batch(17/123) done. Loss: 0.3407 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:19:24 2022 ] Batch(117/123) done. Loss: 0.4128 lr:0.100000 network_time: 0.0316 +[ Thu Sep 15 10:19:28 2022 ] Eval epoch: 35 +[ Thu Sep 15 10:20:05 2022 ] Mean test loss of 296 batches: 2.946030378341675. +[ Thu Sep 15 10:20:05 2022 ] Top1: 50.82% +[ Thu Sep 15 10:20:05 2022 ] Top5: 80.44% +[ Thu Sep 15 10:20:05 2022 ] Training epoch: 36 +[ Thu Sep 15 10:21:17 2022 ] Batch(94/123) done. Loss: 0.3085 lr:0.100000 network_time: 0.0320 +[ Thu Sep 15 10:21:38 2022 ] Eval epoch: 36 +[ Thu Sep 15 10:22:15 2022 ] Mean test loss of 296 batches: 1.9085206985473633. +[ Thu Sep 15 10:22:15 2022 ] Top1: 59.22% +[ Thu Sep 15 10:22:15 2022 ] Top5: 89.57% +[ Thu Sep 15 10:22:15 2022 ] Training epoch: 37 +[ Thu Sep 15 10:23:11 2022 ] Batch(71/123) done. Loss: 0.2065 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 10:23:48 2022 ] Eval epoch: 37 +[ Thu Sep 15 10:24:25 2022 ] Mean test loss of 296 batches: 2.25624942779541. +[ Thu Sep 15 10:24:25 2022 ] Top1: 54.09% +[ Thu Sep 15 10:24:25 2022 ] Top5: 85.28% +[ Thu Sep 15 10:24:25 2022 ] Training epoch: 38 +[ Thu Sep 15 10:25:04 2022 ] Batch(48/123) done. Loss: 0.3258 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:25:58 2022 ] Eval epoch: 38 +[ Thu Sep 15 10:26:35 2022 ] Mean test loss of 296 batches: 2.1770968437194824. +[ Thu Sep 15 10:26:35 2022 ] Top1: 55.12% +[ Thu Sep 15 10:26:35 2022 ] Top5: 85.95% +[ Thu Sep 15 10:26:35 2022 ] Training epoch: 39 +[ Thu Sep 15 10:26:57 2022 ] Batch(25/123) done. Loss: 0.1509 lr:0.100000 network_time: 0.0306 +[ Thu Sep 15 10:28:08 2022 ] Eval epoch: 39 +[ Thu Sep 15 10:28:45 2022 ] Mean test loss of 296 batches: 2.8569211959838867. +[ Thu Sep 15 10:28:45 2022 ] Top1: 48.70% +[ Thu Sep 15 10:28:45 2022 ] Top5: 81.25% +[ Thu Sep 15 10:28:45 2022 ] Training epoch: 40 +[ Thu Sep 15 10:28:50 2022 ] Batch(2/123) done. Loss: 0.1428 lr:0.100000 network_time: 0.0366 +[ Thu Sep 15 10:30:03 2022 ] Batch(102/123) done. Loss: 0.3519 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 10:30:18 2022 ] Eval epoch: 40 +[ Thu Sep 15 10:30:54 2022 ] Mean test loss of 296 batches: 2.048165798187256. +[ Thu Sep 15 10:30:54 2022 ] Top1: 58.69% +[ Thu Sep 15 10:30:54 2022 ] Top5: 87.45% +[ Thu Sep 15 10:30:54 2022 ] Training epoch: 41 +[ Thu Sep 15 10:31:56 2022 ] Batch(79/123) done. Loss: 0.1384 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:32:27 2022 ] Eval epoch: 41 +[ Thu Sep 15 10:33:04 2022 ] Mean test loss of 296 batches: 2.2625927925109863. +[ Thu Sep 15 10:33:04 2022 ] Top1: 58.39% +[ Thu Sep 15 10:33:04 2022 ] Top5: 85.02% +[ Thu Sep 15 10:33:04 2022 ] Training epoch: 42 +[ Thu Sep 15 10:33:48 2022 ] Batch(56/123) done. Loss: 0.4280 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 10:34:37 2022 ] Eval epoch: 42 +[ Thu Sep 15 10:35:14 2022 ] Mean test loss of 296 batches: 2.092979669570923. +[ Thu Sep 15 10:35:14 2022 ] Top1: 58.09% +[ Thu Sep 15 10:35:14 2022 ] Top5: 88.46% +[ Thu Sep 15 10:35:14 2022 ] Training epoch: 43 +[ Thu Sep 15 10:35:41 2022 ] Batch(33/123) done. Loss: 0.1257 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 10:36:47 2022 ] Eval epoch: 43 +[ Thu Sep 15 10:37:23 2022 ] Mean test loss of 296 batches: 2.8443000316619873. +[ Thu Sep 15 10:37:24 2022 ] Top1: 48.70% +[ Thu Sep 15 10:37:24 2022 ] Top5: 83.03% +[ Thu Sep 15 10:37:24 2022 ] Training epoch: 44 +[ Thu Sep 15 10:37:34 2022 ] Batch(10/123) done. Loss: 0.1756 lr:0.100000 network_time: 0.0317 +[ Thu Sep 15 10:38:47 2022 ] Batch(110/123) done. Loss: 0.1498 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:38:57 2022 ] Eval epoch: 44 +[ Thu Sep 15 10:39:33 2022 ] Mean test loss of 296 batches: 2.139049530029297. +[ Thu Sep 15 10:39:33 2022 ] Top1: 59.14% +[ Thu Sep 15 10:39:33 2022 ] Top5: 86.23% +[ Thu Sep 15 10:39:34 2022 ] Training epoch: 45 +[ Thu Sep 15 10:40:40 2022 ] Batch(87/123) done. Loss: 0.2130 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 10:41:06 2022 ] Eval epoch: 45 +[ Thu Sep 15 10:41:43 2022 ] Mean test loss of 296 batches: 3.214433431625366. +[ Thu Sep 15 10:41:43 2022 ] Top1: 46.93% +[ Thu Sep 15 10:41:43 2022 ] Top5: 78.46% +[ Thu Sep 15 10:41:43 2022 ] Training epoch: 46 +[ Thu Sep 15 10:42:33 2022 ] Batch(64/123) done. Loss: 0.2074 lr:0.100000 network_time: 0.0299 +[ Thu Sep 15 10:43:16 2022 ] Eval epoch: 46 +[ Thu Sep 15 10:43:52 2022 ] Mean test loss of 296 batches: 3.47165584564209. +[ Thu Sep 15 10:43:53 2022 ] Top1: 43.88% +[ Thu Sep 15 10:43:53 2022 ] Top5: 73.30% +[ Thu Sep 15 10:43:53 2022 ] Training epoch: 47 +[ Thu Sep 15 10:44:26 2022 ] Batch(41/123) done. Loss: 0.2009 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 10:45:25 2022 ] Eval epoch: 47 +[ Thu Sep 15 10:46:02 2022 ] Mean test loss of 296 batches: 2.4172680377960205. +[ Thu Sep 15 10:46:02 2022 ] Top1: 55.10% +[ Thu Sep 15 10:46:03 2022 ] Top5: 86.04% +[ Thu Sep 15 10:46:03 2022 ] Training epoch: 48 +[ Thu Sep 15 10:46:19 2022 ] Batch(18/123) done. Loss: 0.2132 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 10:47:33 2022 ] Batch(118/123) done. Loss: 0.1894 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:47:36 2022 ] Eval epoch: 48 +[ Thu Sep 15 10:48:13 2022 ] Mean test loss of 296 batches: 2.062666654586792. +[ Thu Sep 15 10:48:13 2022 ] Top1: 57.45% +[ Thu Sep 15 10:48:13 2022 ] Top5: 87.50% +[ Thu Sep 15 10:48:13 2022 ] Training epoch: 49 +[ Thu Sep 15 10:49:26 2022 ] Batch(95/123) done. Loss: 0.1065 lr:0.100000 network_time: 0.0323 +[ Thu Sep 15 10:49:46 2022 ] Eval epoch: 49 +[ Thu Sep 15 10:50:23 2022 ] Mean test loss of 296 batches: 1.8771799802780151. +[ Thu Sep 15 10:50:23 2022 ] Top1: 62.06% +[ Thu Sep 15 10:50:23 2022 ] Top5: 88.94% +[ Thu Sep 15 10:50:23 2022 ] Training epoch: 50 +[ Thu Sep 15 10:51:19 2022 ] Batch(72/123) done. Loss: 0.4280 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:51:56 2022 ] Eval epoch: 50 +[ Thu Sep 15 10:52:33 2022 ] Mean test loss of 296 batches: 1.9319062232971191. +[ Thu Sep 15 10:52:34 2022 ] Top1: 59.02% +[ Thu Sep 15 10:52:34 2022 ] Top5: 89.02% +[ Thu Sep 15 10:52:34 2022 ] Training epoch: 51 +[ Thu Sep 15 10:53:13 2022 ] Batch(49/123) done. Loss: 0.2071 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 10:54:07 2022 ] Eval epoch: 51 +[ Thu Sep 15 10:54:44 2022 ] Mean test loss of 296 batches: 2.2423434257507324. +[ Thu Sep 15 10:54:44 2022 ] Top1: 57.01% +[ Thu Sep 15 10:54:44 2022 ] Top5: 87.52% +[ Thu Sep 15 10:54:44 2022 ] Training epoch: 52 +[ Thu Sep 15 10:55:07 2022 ] Batch(26/123) done. Loss: 0.2673 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:56:17 2022 ] Eval epoch: 52 +[ Thu Sep 15 10:56:54 2022 ] Mean test loss of 296 batches: 3.0437023639678955. +[ Thu Sep 15 10:56:54 2022 ] Top1: 47.34% +[ Thu Sep 15 10:56:54 2022 ] Top5: 81.49% +[ Thu Sep 15 10:56:54 2022 ] Training epoch: 53 +[ Thu Sep 15 10:57:00 2022 ] Batch(3/123) done. Loss: 0.2166 lr:0.100000 network_time: 0.0295 +[ Thu Sep 15 10:58:13 2022 ] Batch(103/123) done. Loss: 0.1694 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 10:58:28 2022 ] Eval epoch: 53 +[ Thu Sep 15 10:59:04 2022 ] Mean test loss of 296 batches: 3.3700997829437256. +[ Thu Sep 15 10:59:05 2022 ] Top1: 49.00% +[ Thu Sep 15 10:59:05 2022 ] Top5: 79.19% +[ Thu Sep 15 10:59:05 2022 ] Training epoch: 54 +[ Thu Sep 15 11:00:07 2022 ] Batch(80/123) done. Loss: 0.0829 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 11:00:38 2022 ] Eval epoch: 54 +[ Thu Sep 15 11:01:15 2022 ] Mean test loss of 296 batches: 3.5582878589630127. +[ Thu Sep 15 11:01:15 2022 ] Top1: 46.13% +[ Thu Sep 15 11:01:15 2022 ] Top5: 77.47% +[ Thu Sep 15 11:01:16 2022 ] Training epoch: 55 +[ Thu Sep 15 11:02:01 2022 ] Batch(57/123) done. Loss: 0.2957 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 11:02:49 2022 ] Eval epoch: 55 +[ Thu Sep 15 11:03:26 2022 ] Mean test loss of 296 batches: 2.6279397010803223. +[ Thu Sep 15 11:03:26 2022 ] Top1: 52.80% +[ Thu Sep 15 11:03:26 2022 ] Top5: 83.73% +[ Thu Sep 15 11:03:26 2022 ] Training epoch: 56 +[ Thu Sep 15 11:03:54 2022 ] Batch(34/123) done. Loss: 0.2123 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 11:04:59 2022 ] Eval epoch: 56 +[ Thu Sep 15 11:05:36 2022 ] Mean test loss of 296 batches: 2.581186294555664. +[ Thu Sep 15 11:05:36 2022 ] Top1: 52.96% +[ Thu Sep 15 11:05:36 2022 ] Top5: 84.26% +[ Thu Sep 15 11:05:36 2022 ] Training epoch: 57 +[ Thu Sep 15 11:05:48 2022 ] Batch(11/123) done. Loss: 0.0678 lr:0.100000 network_time: 0.0300 +[ Thu Sep 15 11:07:01 2022 ] Batch(111/123) done. Loss: 0.1838 lr:0.100000 network_time: 0.0324 +[ Thu Sep 15 11:07:09 2022 ] Eval epoch: 57 +[ Thu Sep 15 11:07:46 2022 ] Mean test loss of 296 batches: 2.2672312259674072. +[ Thu Sep 15 11:07:46 2022 ] Top1: 57.43% +[ Thu Sep 15 11:07:46 2022 ] Top5: 88.60% +[ Thu Sep 15 11:07:47 2022 ] Training epoch: 58 +[ Thu Sep 15 11:08:54 2022 ] Batch(88/123) done. Loss: 0.1328 lr:0.100000 network_time: 0.0246 +[ Thu Sep 15 11:09:19 2022 ] Eval epoch: 58 +[ Thu Sep 15 11:09:56 2022 ] Mean test loss of 296 batches: 1.9263916015625. +[ Thu Sep 15 11:09:57 2022 ] Top1: 61.58% +[ Thu Sep 15 11:09:57 2022 ] Top5: 89.66% +[ Thu Sep 15 11:09:57 2022 ] Training epoch: 59 +[ Thu Sep 15 11:10:48 2022 ] Batch(65/123) done. Loss: 0.2005 lr:0.100000 network_time: 0.0253 +[ Thu Sep 15 11:11:30 2022 ] Eval epoch: 59 +[ Thu Sep 15 11:12:07 2022 ] Mean test loss of 296 batches: 2.8199093341827393. +[ Thu Sep 15 11:12:07 2022 ] Top1: 49.94% +[ Thu Sep 15 11:12:07 2022 ] Top5: 79.42% +[ Thu Sep 15 11:12:07 2022 ] Training epoch: 60 +[ Thu Sep 15 11:12:41 2022 ] Batch(42/123) done. Loss: 0.0885 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 11:13:40 2022 ] Eval epoch: 60 +[ Thu Sep 15 11:14:17 2022 ] Mean test loss of 296 batches: 3.3927879333496094. +[ Thu Sep 15 11:14:17 2022 ] Top1: 40.08% +[ Thu Sep 15 11:14:17 2022 ] Top5: 72.51% +[ Thu Sep 15 11:14:17 2022 ] Training epoch: 61 +[ Thu Sep 15 11:14:34 2022 ] Batch(19/123) done. Loss: 0.0690 lr:0.010000 network_time: 0.0292 +[ Thu Sep 15 11:15:48 2022 ] Batch(119/123) done. Loss: 0.0368 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 11:15:50 2022 ] Eval epoch: 61 +[ Thu Sep 15 11:16:27 2022 ] Mean test loss of 296 batches: 1.446427822113037. +[ Thu Sep 15 11:16:27 2022 ] Top1: 68.69% +[ Thu Sep 15 11:16:27 2022 ] Top5: 92.92% +[ Thu Sep 15 11:16:27 2022 ] Training epoch: 62 +[ Thu Sep 15 11:17:41 2022 ] Batch(96/123) done. Loss: 0.0195 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 11:18:00 2022 ] Eval epoch: 62 +[ Thu Sep 15 11:18:38 2022 ] Mean test loss of 296 batches: 1.4903637170791626. +[ Thu Sep 15 11:18:38 2022 ] Top1: 68.93% +[ Thu Sep 15 11:18:38 2022 ] Top5: 92.89% +[ Thu Sep 15 11:18:38 2022 ] Training epoch: 63 +[ Thu Sep 15 11:19:35 2022 ] Batch(73/123) done. Loss: 0.0048 lr:0.010000 network_time: 0.0261 +[ Thu Sep 15 11:20:11 2022 ] Eval epoch: 63 +[ Thu Sep 15 11:20:49 2022 ] Mean test loss of 296 batches: 1.4617441892623901. +[ Thu Sep 15 11:20:49 2022 ] Top1: 69.23% +[ Thu Sep 15 11:20:49 2022 ] Top5: 93.29% +[ Thu Sep 15 11:20:49 2022 ] Training epoch: 64 +[ Thu Sep 15 11:21:29 2022 ] Batch(50/123) done. Loss: 0.0093 lr:0.010000 network_time: 0.0297 +[ Thu Sep 15 11:22:22 2022 ] Eval epoch: 64 +[ Thu Sep 15 11:22:59 2022 ] Mean test loss of 296 batches: 1.4043320417404175. +[ Thu Sep 15 11:22:59 2022 ] Top1: 69.11% +[ Thu Sep 15 11:22:59 2022 ] Top5: 93.14% +[ Thu Sep 15 11:22:59 2022 ] Training epoch: 65 +[ Thu Sep 15 11:23:22 2022 ] Batch(27/123) done. Loss: 0.0061 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 11:24:32 2022 ] Eval epoch: 65 +[ Thu Sep 15 11:25:09 2022 ] Mean test loss of 296 batches: 1.7129192352294922. +[ Thu Sep 15 11:25:09 2022 ] Top1: 65.01% +[ Thu Sep 15 11:25:09 2022 ] Top5: 90.94% +[ Thu Sep 15 11:25:09 2022 ] Training epoch: 66 +[ Thu Sep 15 11:25:15 2022 ] Batch(4/123) done. Loss: 0.0059 lr:0.010000 network_time: 0.0324 +[ Thu Sep 15 11:26:29 2022 ] Batch(104/123) done. Loss: 0.0032 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 11:26:42 2022 ] Eval epoch: 66 +[ Thu Sep 15 11:27:19 2022 ] Mean test loss of 296 batches: 1.4624316692352295. +[ Thu Sep 15 11:27:19 2022 ] Top1: 69.75% +[ Thu Sep 15 11:27:19 2022 ] Top5: 93.28% +[ Thu Sep 15 11:27:19 2022 ] Training epoch: 67 +[ Thu Sep 15 11:28:22 2022 ] Batch(81/123) done. Loss: 0.0090 lr:0.010000 network_time: 0.0263 +[ Thu Sep 15 11:28:52 2022 ] Eval epoch: 67 +[ Thu Sep 15 11:29:30 2022 ] Mean test loss of 296 batches: 1.4172018766403198. +[ Thu Sep 15 11:29:30 2022 ] Top1: 69.97% +[ Thu Sep 15 11:29:30 2022 ] Top5: 93.46% +[ Thu Sep 15 11:29:30 2022 ] Training epoch: 68 +[ Thu Sep 15 11:30:16 2022 ] Batch(58/123) done. Loss: 0.0062 lr:0.010000 network_time: 0.0296 +[ Thu Sep 15 11:31:03 2022 ] Eval epoch: 68 +[ Thu Sep 15 11:31:40 2022 ] Mean test loss of 296 batches: 1.3846447467803955. +[ Thu Sep 15 11:31:40 2022 ] Top1: 70.11% +[ Thu Sep 15 11:31:40 2022 ] Top5: 93.37% +[ Thu Sep 15 11:31:40 2022 ] Training epoch: 69 +[ Thu Sep 15 11:32:09 2022 ] Batch(35/123) done. Loss: 0.0082 lr:0.010000 network_time: 0.0279 +[ Thu Sep 15 11:33:13 2022 ] Eval epoch: 69 +[ Thu Sep 15 11:33:50 2022 ] Mean test loss of 296 batches: 1.4668055772781372. +[ Thu Sep 15 11:33:50 2022 ] Top1: 69.23% +[ Thu Sep 15 11:33:51 2022 ] Top5: 92.94% +[ Thu Sep 15 11:33:51 2022 ] Training epoch: 70 +[ Thu Sep 15 11:34:03 2022 ] Batch(12/123) done. Loss: 0.0038 lr:0.010000 network_time: 0.0274 +[ Thu Sep 15 11:35:16 2022 ] Batch(112/123) done. Loss: 0.0046 lr:0.010000 network_time: 0.0265 +[ Thu Sep 15 11:35:24 2022 ] Eval epoch: 70 +[ Thu Sep 15 11:36:01 2022 ] Mean test loss of 296 batches: 1.475498914718628. +[ Thu Sep 15 11:36:01 2022 ] Top1: 69.66% +[ Thu Sep 15 11:36:01 2022 ] Top5: 93.07% +[ Thu Sep 15 11:36:01 2022 ] Training epoch: 71 +[ Thu Sep 15 11:37:10 2022 ] Batch(89/123) done. Loss: 0.0075 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 11:37:34 2022 ] Eval epoch: 71 +[ Thu Sep 15 11:38:12 2022 ] Mean test loss of 296 batches: 1.38612961769104. +[ Thu Sep 15 11:38:12 2022 ] Top1: 70.43% +[ Thu Sep 15 11:38:12 2022 ] Top5: 93.33% +[ Thu Sep 15 11:38:12 2022 ] Training epoch: 72 +[ Thu Sep 15 11:39:04 2022 ] Batch(66/123) done. Loss: 0.0061 lr:0.010000 network_time: 0.0317 +[ Thu Sep 15 11:39:45 2022 ] Eval epoch: 72 +[ Thu Sep 15 11:40:22 2022 ] Mean test loss of 296 batches: 1.4636784791946411. +[ Thu Sep 15 11:40:22 2022 ] Top1: 69.33% +[ Thu Sep 15 11:40:22 2022 ] Top5: 93.08% +[ Thu Sep 15 11:40:22 2022 ] Training epoch: 73 +[ Thu Sep 15 11:40:57 2022 ] Batch(43/123) done. Loss: 0.0052 lr:0.010000 network_time: 0.0319 +[ Thu Sep 15 11:41:55 2022 ] Eval epoch: 73 +[ Thu Sep 15 11:42:32 2022 ] Mean test loss of 296 batches: 1.4635051488876343. +[ Thu Sep 15 11:42:32 2022 ] Top1: 68.60% +[ Thu Sep 15 11:42:32 2022 ] Top5: 92.85% +[ Thu Sep 15 11:42:32 2022 ] Training epoch: 74 +[ Thu Sep 15 11:42:50 2022 ] Batch(20/123) done. Loss: 0.0046 lr:0.010000 network_time: 0.0318 +[ Thu Sep 15 11:44:04 2022 ] Batch(120/123) done. Loss: 0.0214 lr:0.010000 network_time: 0.0264 +[ Thu Sep 15 11:44:05 2022 ] Eval epoch: 74 +[ Thu Sep 15 11:44:42 2022 ] Mean test loss of 296 batches: 1.4525089263916016. +[ Thu Sep 15 11:44:42 2022 ] Top1: 69.45% +[ Thu Sep 15 11:44:43 2022 ] Top5: 93.14% +[ Thu Sep 15 11:44:43 2022 ] Training epoch: 75 +[ Thu Sep 15 11:45:57 2022 ] Batch(97/123) done. Loss: 0.0081 lr:0.010000 network_time: 0.0305 +[ Thu Sep 15 11:46:16 2022 ] Eval epoch: 75 +[ Thu Sep 15 11:46:53 2022 ] Mean test loss of 296 batches: 1.447242021560669. +[ Thu Sep 15 11:46:53 2022 ] Top1: 69.90% +[ Thu Sep 15 11:46:53 2022 ] Top5: 93.36% +[ Thu Sep 15 11:46:53 2022 ] Training epoch: 76 +[ Thu Sep 15 11:47:50 2022 ] Batch(74/123) done. Loss: 0.0088 lr:0.010000 network_time: 0.0265 +[ Thu Sep 15 11:48:26 2022 ] Eval epoch: 76 +[ Thu Sep 15 11:49:02 2022 ] Mean test loss of 296 batches: 1.430863857269287. +[ Thu Sep 15 11:49:02 2022 ] Top1: 70.17% +[ Thu Sep 15 11:49:02 2022 ] Top5: 93.21% +[ Thu Sep 15 11:49:02 2022 ] Training epoch: 77 +[ Thu Sep 15 11:49:43 2022 ] Batch(51/123) done. Loss: 0.0032 lr:0.010000 network_time: 0.0277 +[ Thu Sep 15 11:50:35 2022 ] Eval epoch: 77 +[ Thu Sep 15 11:51:12 2022 ] Mean test loss of 296 batches: 1.4083019495010376. +[ Thu Sep 15 11:51:12 2022 ] Top1: 70.50% +[ Thu Sep 15 11:51:12 2022 ] Top5: 93.33% +[ Thu Sep 15 11:51:12 2022 ] Training epoch: 78 +[ Thu Sep 15 11:51:36 2022 ] Batch(28/123) done. Loss: 0.0049 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 11:52:45 2022 ] Eval epoch: 78 +[ Thu Sep 15 11:53:22 2022 ] Mean test loss of 296 batches: 1.428770661354065. +[ Thu Sep 15 11:53:22 2022 ] Top1: 70.01% +[ Thu Sep 15 11:53:22 2022 ] Top5: 93.38% +[ Thu Sep 15 11:53:22 2022 ] Training epoch: 79 +[ Thu Sep 15 11:53:29 2022 ] Batch(5/123) done. Loss: 0.0054 lr:0.010000 network_time: 0.0313 +[ Thu Sep 15 11:54:43 2022 ] Batch(105/123) done. Loss: 0.0595 lr:0.010000 network_time: 0.0287 +[ Thu Sep 15 11:54:55 2022 ] Eval epoch: 79 +[ Thu Sep 15 11:55:32 2022 ] Mean test loss of 296 batches: 1.5185917615890503. +[ Thu Sep 15 11:55:32 2022 ] Top1: 69.06% +[ Thu Sep 15 11:55:33 2022 ] Top5: 92.88% +[ Thu Sep 15 11:55:33 2022 ] Training epoch: 80 +[ Thu Sep 15 11:56:36 2022 ] Batch(82/123) done. Loss: 0.0085 lr:0.010000 network_time: 0.0304 +[ Thu Sep 15 11:57:05 2022 ] Eval epoch: 80 +[ Thu Sep 15 11:57:42 2022 ] Mean test loss of 296 batches: 1.4583548307418823. +[ Thu Sep 15 11:57:42 2022 ] Top1: 70.02% +[ Thu Sep 15 11:57:43 2022 ] Top5: 93.27% +[ Thu Sep 15 11:57:43 2022 ] Training epoch: 81 +[ Thu Sep 15 11:58:29 2022 ] Batch(59/123) done. Loss: 0.0044 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 11:59:16 2022 ] Eval epoch: 81 +[ Thu Sep 15 11:59:53 2022 ] Mean test loss of 296 batches: 1.4041979312896729. +[ Thu Sep 15 11:59:53 2022 ] Top1: 70.12% +[ Thu Sep 15 11:59:53 2022 ] Top5: 93.19% +[ Thu Sep 15 11:59:53 2022 ] Training epoch: 82 +[ Thu Sep 15 12:00:23 2022 ] Batch(36/123) done. Loss: 0.0032 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 12:01:26 2022 ] Eval epoch: 82 +[ Thu Sep 15 12:02:03 2022 ] Mean test loss of 296 batches: 1.486446738243103. +[ Thu Sep 15 12:02:03 2022 ] Top1: 68.72% +[ Thu Sep 15 12:02:03 2022 ] Top5: 93.05% +[ Thu Sep 15 12:02:04 2022 ] Training epoch: 83 +[ Thu Sep 15 12:02:17 2022 ] Batch(13/123) done. Loss: 0.0059 lr:0.001000 network_time: 0.0281 +[ Thu Sep 15 12:03:30 2022 ] Batch(113/123) done. Loss: 0.0190 lr:0.001000 network_time: 0.0320 +[ Thu Sep 15 12:03:37 2022 ] Eval epoch: 83 +[ Thu Sep 15 12:04:14 2022 ] Mean test loss of 296 batches: 1.4069422483444214. +[ Thu Sep 15 12:04:14 2022 ] Top1: 70.15% +[ Thu Sep 15 12:04:14 2022 ] Top5: 93.29% +[ Thu Sep 15 12:04:14 2022 ] Training epoch: 84 +[ Thu Sep 15 12:05:23 2022 ] Batch(90/123) done. Loss: 0.0049 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 12:05:47 2022 ] Eval epoch: 84 +[ Thu Sep 15 12:06:24 2022 ] Mean test loss of 296 batches: 1.4547550678253174. +[ Thu Sep 15 12:06:24 2022 ] Top1: 69.92% +[ Thu Sep 15 12:06:24 2022 ] Top5: 93.27% +[ Thu Sep 15 12:06:24 2022 ] Training epoch: 85 +[ Thu Sep 15 12:07:16 2022 ] Batch(67/123) done. Loss: 0.0050 lr:0.001000 network_time: 0.0362 +[ Thu Sep 15 12:07:57 2022 ] Eval epoch: 85 +[ Thu Sep 15 12:08:34 2022 ] Mean test loss of 296 batches: 1.4078006744384766. +[ Thu Sep 15 12:08:34 2022 ] Top1: 70.47% +[ Thu Sep 15 12:08:34 2022 ] Top5: 93.32% +[ Thu Sep 15 12:08:34 2022 ] Training epoch: 86 +[ Thu Sep 15 12:09:09 2022 ] Batch(44/123) done. Loss: 0.0218 lr:0.001000 network_time: 0.0277 +[ Thu Sep 15 12:10:07 2022 ] Eval epoch: 86 +[ Thu Sep 15 12:10:44 2022 ] Mean test loss of 296 batches: 1.561448335647583. +[ Thu Sep 15 12:10:44 2022 ] Top1: 69.39% +[ Thu Sep 15 12:10:44 2022 ] Top5: 92.89% +[ Thu Sep 15 12:10:44 2022 ] Training epoch: 87 +[ Thu Sep 15 12:11:03 2022 ] Batch(21/123) done. Loss: 0.0055 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 12:12:16 2022 ] Batch(121/123) done. Loss: 0.0061 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 12:12:17 2022 ] Eval epoch: 87 +[ Thu Sep 15 12:12:54 2022 ] Mean test loss of 296 batches: 1.4587470293045044. +[ Thu Sep 15 12:12:54 2022 ] Top1: 69.48% +[ Thu Sep 15 12:12:54 2022 ] Top5: 93.18% +[ Thu Sep 15 12:12:54 2022 ] Training epoch: 88 +[ Thu Sep 15 12:14:09 2022 ] Batch(98/123) done. Loss: 0.0033 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 12:14:27 2022 ] Eval epoch: 88 +[ Thu Sep 15 12:15:04 2022 ] Mean test loss of 296 batches: 1.4414913654327393. +[ Thu Sep 15 12:15:04 2022 ] Top1: 70.05% +[ Thu Sep 15 12:15:04 2022 ] Top5: 93.25% +[ Thu Sep 15 12:15:04 2022 ] Training epoch: 89 +[ Thu Sep 15 12:16:02 2022 ] Batch(75/123) done. Loss: 0.0063 lr:0.001000 network_time: 0.0322 +[ Thu Sep 15 12:16:37 2022 ] Eval epoch: 89 +[ Thu Sep 15 12:17:14 2022 ] Mean test loss of 296 batches: 1.4233782291412354. +[ Thu Sep 15 12:17:14 2022 ] Top1: 69.97% +[ Thu Sep 15 12:17:14 2022 ] Top5: 93.03% +[ Thu Sep 15 12:17:14 2022 ] Training epoch: 90 +[ Thu Sep 15 12:17:56 2022 ] Batch(52/123) done. Loss: 0.0098 lr:0.001000 network_time: 0.0279 +[ Thu Sep 15 12:18:47 2022 ] Eval epoch: 90 +[ Thu Sep 15 12:19:25 2022 ] Mean test loss of 296 batches: 1.417391061782837. +[ Thu Sep 15 12:19:25 2022 ] Top1: 70.33% +[ Thu Sep 15 12:19:25 2022 ] Top5: 93.28% +[ Thu Sep 15 12:19:25 2022 ] Training epoch: 91 +[ Thu Sep 15 12:19:49 2022 ] Batch(29/123) done. Loss: 0.0014 lr:0.001000 network_time: 0.0267 +[ Thu Sep 15 12:20:58 2022 ] Eval epoch: 91 +[ Thu Sep 15 12:21:35 2022 ] Mean test loss of 296 batches: 1.486148476600647. +[ Thu Sep 15 12:21:35 2022 ] Top1: 69.62% +[ Thu Sep 15 12:21:35 2022 ] Top5: 93.13% +[ Thu Sep 15 12:21:35 2022 ] Training epoch: 92 +[ Thu Sep 15 12:21:42 2022 ] Batch(6/123) done. Loss: 0.0022 lr:0.001000 network_time: 0.0298 +[ Thu Sep 15 12:22:56 2022 ] Batch(106/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 12:23:08 2022 ] Eval epoch: 92 +[ Thu Sep 15 12:23:45 2022 ] Mean test loss of 296 batches: 1.4244694709777832. +[ Thu Sep 15 12:23:45 2022 ] Top1: 69.89% +[ Thu Sep 15 12:23:45 2022 ] Top5: 93.38% +[ Thu Sep 15 12:23:45 2022 ] Training epoch: 93 +[ Thu Sep 15 12:24:49 2022 ] Batch(83/123) done. Loss: 0.0020 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 12:25:18 2022 ] Eval epoch: 93 +[ Thu Sep 15 12:25:54 2022 ] Mean test loss of 296 batches: 1.497768521308899. +[ Thu Sep 15 12:25:54 2022 ] Top1: 69.78% +[ Thu Sep 15 12:25:54 2022 ] Top5: 93.28% +[ Thu Sep 15 12:25:54 2022 ] Training epoch: 94 +[ Thu Sep 15 12:26:42 2022 ] Batch(60/123) done. Loss: 0.0054 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 12:27:27 2022 ] Eval epoch: 94 +[ Thu Sep 15 12:28:04 2022 ] Mean test loss of 296 batches: 1.4625046253204346. +[ Thu Sep 15 12:28:04 2022 ] Top1: 70.20% +[ Thu Sep 15 12:28:04 2022 ] Top5: 93.32% +[ Thu Sep 15 12:28:04 2022 ] Training epoch: 95 +[ Thu Sep 15 12:28:34 2022 ] Batch(37/123) done. Loss: 0.0040 lr:0.001000 network_time: 0.0266 +[ Thu Sep 15 12:29:37 2022 ] Eval epoch: 95 +[ Thu Sep 15 12:30:14 2022 ] Mean test loss of 296 batches: 1.62168288230896. +[ Thu Sep 15 12:30:14 2022 ] Top1: 69.27% +[ Thu Sep 15 12:30:14 2022 ] Top5: 92.49% +[ Thu Sep 15 12:30:14 2022 ] Training epoch: 96 +[ Thu Sep 15 12:30:27 2022 ] Batch(14/123) done. Loss: 0.0028 lr:0.001000 network_time: 0.0313 +[ Thu Sep 15 12:31:41 2022 ] Batch(114/123) done. Loss: 0.0019 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 12:31:47 2022 ] Eval epoch: 96 +[ Thu Sep 15 12:32:24 2022 ] Mean test loss of 296 batches: 1.4777637720108032. +[ Thu Sep 15 12:32:24 2022 ] Top1: 68.63% +[ Thu Sep 15 12:32:24 2022 ] Top5: 93.06% +[ Thu Sep 15 12:32:24 2022 ] Training epoch: 97 +[ Thu Sep 15 12:33:34 2022 ] Batch(91/123) done. Loss: 0.0044 lr:0.001000 network_time: 0.0307 +[ Thu Sep 15 12:33:56 2022 ] Eval epoch: 97 +[ Thu Sep 15 12:34:33 2022 ] Mean test loss of 296 batches: 1.6676489114761353. +[ Thu Sep 15 12:34:33 2022 ] Top1: 65.04% +[ Thu Sep 15 12:34:34 2022 ] Top5: 91.66% +[ Thu Sep 15 12:34:34 2022 ] Training epoch: 98 +[ Thu Sep 15 12:35:27 2022 ] Batch(68/123) done. Loss: 0.0101 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 12:36:06 2022 ] Eval epoch: 98 +[ Thu Sep 15 12:36:43 2022 ] Mean test loss of 296 batches: 1.4118143320083618. +[ Thu Sep 15 12:36:44 2022 ] Top1: 70.33% +[ Thu Sep 15 12:36:44 2022 ] Top5: 93.40% +[ Thu Sep 15 12:36:44 2022 ] Training epoch: 99 +[ Thu Sep 15 12:37:20 2022 ] Batch(45/123) done. Loss: 0.0054 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 12:38:16 2022 ] Eval epoch: 99 +[ Thu Sep 15 12:38:53 2022 ] Mean test loss of 296 batches: 1.4273409843444824. +[ Thu Sep 15 12:38:54 2022 ] Top1: 70.23% +[ Thu Sep 15 12:38:54 2022 ] Top5: 93.40% +[ Thu Sep 15 12:38:54 2022 ] Training epoch: 100 +[ Thu Sep 15 12:39:13 2022 ] Batch(22/123) done. Loss: 0.0027 lr:0.001000 network_time: 0.0262 +[ Thu Sep 15 12:40:27 2022 ] Batch(122/123) done. Loss: 0.0058 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 12:40:27 2022 ] Eval epoch: 100 +[ Thu Sep 15 12:41:04 2022 ] Mean test loss of 296 batches: 1.384246826171875. +[ Thu Sep 15 12:41:04 2022 ] Top1: 70.40% +[ Thu Sep 15 12:41:04 2022 ] Top5: 93.22% +[ Thu Sep 15 12:41:04 2022 ] Training epoch: 101 +[ Thu Sep 15 12:42:19 2022 ] Batch(99/123) done. Loss: 0.0025 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 12:42:36 2022 ] Eval epoch: 101 +[ Thu Sep 15 12:43:14 2022 ] Mean test loss of 296 batches: 1.4325897693634033. +[ Thu Sep 15 12:43:14 2022 ] Top1: 69.26% +[ Thu Sep 15 12:43:14 2022 ] Top5: 93.10% +[ Thu Sep 15 12:43:14 2022 ] Training epoch: 102 +[ Thu Sep 15 12:44:13 2022 ] Batch(76/123) done. Loss: 0.0019 lr:0.000100 network_time: 0.0255 +[ Thu Sep 15 12:44:47 2022 ] Eval epoch: 102 +[ Thu Sep 15 12:45:24 2022 ] Mean test loss of 296 batches: 1.43014395236969. +[ Thu Sep 15 12:45:24 2022 ] Top1: 69.73% +[ Thu Sep 15 12:45:24 2022 ] Top5: 93.13% +[ Thu Sep 15 12:45:24 2022 ] Training epoch: 103 +[ Thu Sep 15 12:46:06 2022 ] Batch(53/123) done. Loss: 0.0082 lr:0.000100 network_time: 0.0277 +[ Thu Sep 15 12:46:57 2022 ] Eval epoch: 103 +[ Thu Sep 15 12:47:34 2022 ] Mean test loss of 296 batches: 1.4462734460830688. +[ Thu Sep 15 12:47:34 2022 ] Top1: 69.95% +[ Thu Sep 15 12:47:34 2022 ] Top5: 93.07% +[ Thu Sep 15 12:47:34 2022 ] Training epoch: 104 +[ Thu Sep 15 12:47:59 2022 ] Batch(30/123) done. Loss: 0.0493 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 12:49:07 2022 ] Eval epoch: 104 +[ Thu Sep 15 12:49:44 2022 ] Mean test loss of 296 batches: 1.5018680095672607. +[ Thu Sep 15 12:49:44 2022 ] Top1: 69.89% +[ Thu Sep 15 12:49:44 2022 ] Top5: 93.18% +[ Thu Sep 15 12:49:44 2022 ] Training epoch: 105 +[ Thu Sep 15 12:49:53 2022 ] Batch(7/123) done. Loss: 0.0042 lr:0.000100 network_time: 0.0367 +[ Thu Sep 15 12:51:06 2022 ] Batch(107/123) done. Loss: 0.0033 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 12:51:18 2022 ] Eval epoch: 105 +[ Thu Sep 15 12:51:54 2022 ] Mean test loss of 296 batches: 1.450966715812683. +[ Thu Sep 15 12:51:54 2022 ] Top1: 70.04% +[ Thu Sep 15 12:51:55 2022 ] Top5: 93.29% +[ Thu Sep 15 12:51:55 2022 ] Training epoch: 106 +[ Thu Sep 15 12:52:59 2022 ] Batch(84/123) done. Loss: 0.0018 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 12:53:27 2022 ] Eval epoch: 106 +[ Thu Sep 15 12:54:05 2022 ] Mean test loss of 296 batches: 1.4319416284561157. +[ Thu Sep 15 12:54:05 2022 ] Top1: 69.99% +[ Thu Sep 15 12:54:05 2022 ] Top5: 93.30% +[ Thu Sep 15 12:54:05 2022 ] Training epoch: 107 +[ Thu Sep 15 12:54:53 2022 ] Batch(61/123) done. Loss: 0.0019 lr:0.000100 network_time: 0.0304 +[ Thu Sep 15 12:55:38 2022 ] Eval epoch: 107 +[ Thu Sep 15 12:56:15 2022 ] Mean test loss of 296 batches: 1.7199466228485107. +[ Thu Sep 15 12:56:15 2022 ] Top1: 64.81% +[ Thu Sep 15 12:56:15 2022 ] Top5: 91.61% +[ Thu Sep 15 12:56:15 2022 ] Training epoch: 108 +[ Thu Sep 15 12:56:46 2022 ] Batch(38/123) done. Loss: 0.0026 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 12:57:48 2022 ] Eval epoch: 108 +[ Thu Sep 15 12:58:25 2022 ] Mean test loss of 296 batches: 1.5287234783172607. +[ Thu Sep 15 12:58:25 2022 ] Top1: 69.48% +[ Thu Sep 15 12:58:25 2022 ] Top5: 92.95% +[ Thu Sep 15 12:58:25 2022 ] Training epoch: 109 +[ Thu Sep 15 12:58:40 2022 ] Batch(15/123) done. Loss: 0.0031 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 12:59:53 2022 ] Batch(115/123) done. Loss: 0.0054 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 12:59:58 2022 ] Eval epoch: 109 +[ Thu Sep 15 13:00:35 2022 ] Mean test loss of 296 batches: 1.4482325315475464. +[ Thu Sep 15 13:00:35 2022 ] Top1: 69.81% +[ Thu Sep 15 13:00:35 2022 ] Top5: 93.17% +[ Thu Sep 15 13:00:35 2022 ] Training epoch: 110 +[ Thu Sep 15 13:01:46 2022 ] Batch(92/123) done. Loss: 0.0021 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 13:02:08 2022 ] Eval epoch: 110 +[ Thu Sep 15 13:02:45 2022 ] Mean test loss of 296 batches: 1.4567145109176636. +[ Thu Sep 15 13:02:45 2022 ] Top1: 69.43% +[ Thu Sep 15 13:02:46 2022 ] Top5: 93.03% +[ Thu Sep 15 13:02:46 2022 ] Training epoch: 111 +[ Thu Sep 15 13:03:39 2022 ] Batch(69/123) done. Loss: 0.0099 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 13:04:18 2022 ] Eval epoch: 111 +[ Thu Sep 15 13:04:56 2022 ] Mean test loss of 296 batches: 1.4794142246246338. +[ Thu Sep 15 13:04:56 2022 ] Top1: 68.49% +[ Thu Sep 15 13:04:56 2022 ] Top5: 92.95% +[ Thu Sep 15 13:04:56 2022 ] Training epoch: 112 +[ Thu Sep 15 13:05:33 2022 ] Batch(46/123) done. Loss: 0.0017 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 13:06:29 2022 ] Eval epoch: 112 +[ Thu Sep 15 13:07:05 2022 ] Mean test loss of 296 batches: 1.3977859020233154. +[ Thu Sep 15 13:07:05 2022 ] Top1: 70.41% +[ Thu Sep 15 13:07:05 2022 ] Top5: 93.40% +[ Thu Sep 15 13:07:05 2022 ] Training epoch: 113 +[ Thu Sep 15 13:07:26 2022 ] Batch(23/123) done. Loss: 0.0077 lr:0.000100 network_time: 0.0511 +[ Thu Sep 15 13:08:38 2022 ] Eval epoch: 113 +[ Thu Sep 15 13:09:15 2022 ] Mean test loss of 296 batches: 1.4809592962265015. +[ Thu Sep 15 13:09:15 2022 ] Top1: 68.42% +[ Thu Sep 15 13:09:15 2022 ] Top5: 93.01% +[ Thu Sep 15 13:09:15 2022 ] Training epoch: 114 +[ Thu Sep 15 13:09:19 2022 ] Batch(0/123) done. Loss: 0.0033 lr:0.000100 network_time: 0.0442 +[ Thu Sep 15 13:10:32 2022 ] Batch(100/123) done. Loss: 0.0019 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 13:10:48 2022 ] Eval epoch: 114 +[ Thu Sep 15 13:11:25 2022 ] Mean test loss of 296 batches: 1.551892638206482. +[ Thu Sep 15 13:11:25 2022 ] Top1: 69.58% +[ Thu Sep 15 13:11:26 2022 ] Top5: 92.97% +[ Thu Sep 15 13:11:26 2022 ] Training epoch: 115 +[ Thu Sep 15 13:12:25 2022 ] Batch(77/123) done. Loss: 0.0016 lr:0.000100 network_time: 0.0323 +[ Thu Sep 15 13:12:59 2022 ] Eval epoch: 115 +[ Thu Sep 15 13:13:36 2022 ] Mean test loss of 296 batches: 1.4836949110031128. +[ Thu Sep 15 13:13:36 2022 ] Top1: 69.60% +[ Thu Sep 15 13:13:36 2022 ] Top5: 93.14% +[ Thu Sep 15 13:13:36 2022 ] Training epoch: 116 +[ Thu Sep 15 13:14:18 2022 ] Batch(54/123) done. Loss: 0.0052 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 13:15:09 2022 ] Eval epoch: 116 +[ Thu Sep 15 13:15:45 2022 ] Mean test loss of 296 batches: 1.4088884592056274. +[ Thu Sep 15 13:15:45 2022 ] Top1: 70.47% +[ Thu Sep 15 13:15:46 2022 ] Top5: 93.39% +[ Thu Sep 15 13:15:46 2022 ] Training epoch: 117 +[ Thu Sep 15 13:16:12 2022 ] Batch(31/123) done. Loss: 0.0085 lr:0.000100 network_time: 0.0317 +[ Thu Sep 15 13:17:18 2022 ] Eval epoch: 117 +[ Thu Sep 15 13:17:55 2022 ] Mean test loss of 296 batches: 1.4486162662506104. +[ Thu Sep 15 13:17:55 2022 ] Top1: 69.71% +[ Thu Sep 15 13:17:55 2022 ] Top5: 93.20% +[ Thu Sep 15 13:17:55 2022 ] Training epoch: 118 +[ Thu Sep 15 13:18:04 2022 ] Batch(8/123) done. Loss: 0.0101 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 13:19:18 2022 ] Batch(108/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 13:19:28 2022 ] Eval epoch: 118 +[ Thu Sep 15 13:20:05 2022 ] Mean test loss of 296 batches: 1.4738733768463135. +[ Thu Sep 15 13:20:05 2022 ] Top1: 69.81% +[ Thu Sep 15 13:20:05 2022 ] Top5: 93.08% +[ Thu Sep 15 13:20:05 2022 ] Training epoch: 119 +[ Thu Sep 15 13:21:10 2022 ] Batch(85/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0308 +[ Thu Sep 15 13:21:38 2022 ] Eval epoch: 119 +[ Thu Sep 15 13:22:15 2022 ] Mean test loss of 296 batches: 1.4473400115966797. +[ Thu Sep 15 13:22:15 2022 ] Top1: 70.14% +[ Thu Sep 15 13:22:15 2022 ] Top5: 93.16% +[ Thu Sep 15 13:22:15 2022 ] Training epoch: 120 +[ Thu Sep 15 13:23:03 2022 ] Batch(62/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0253 +[ Thu Sep 15 13:23:47 2022 ] Eval epoch: 120 +[ Thu Sep 15 13:24:24 2022 ] Mean test loss of 296 batches: 1.477660894393921. +[ Thu Sep 15 13:24:24 2022 ] Top1: 69.07% +[ Thu Sep 15 13:24:25 2022 ] Top5: 92.72% +[ Thu Sep 15 13:24:25 2022 ] Training epoch: 121 +[ Thu Sep 15 13:24:56 2022 ] Batch(39/123) done. Loss: 0.0221 lr:0.000100 network_time: 0.0315 +[ Thu Sep 15 13:25:57 2022 ] Eval epoch: 121 +[ Thu Sep 15 13:26:34 2022 ] Mean test loss of 296 batches: 1.4495548009872437. +[ Thu Sep 15 13:26:34 2022 ] Top1: 70.14% +[ Thu Sep 15 13:26:35 2022 ] Top5: 93.33% +[ Thu Sep 15 13:26:35 2022 ] Training epoch: 122 +[ Thu Sep 15 13:26:49 2022 ] Batch(16/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0368 +[ Thu Sep 15 13:28:03 2022 ] Batch(116/123) done. Loss: 0.0021 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 13:28:07 2022 ] Eval epoch: 122 +[ Thu Sep 15 13:28:44 2022 ] Mean test loss of 296 batches: 1.506091594696045. +[ Thu Sep 15 13:28:44 2022 ] Top1: 68.75% +[ Thu Sep 15 13:28:45 2022 ] Top5: 92.70% +[ Thu Sep 15 13:28:45 2022 ] Training epoch: 123 +[ Thu Sep 15 13:29:56 2022 ] Batch(93/123) done. Loss: 0.0041 lr:0.000100 network_time: 0.0298 +[ Thu Sep 15 13:30:17 2022 ] Eval epoch: 123 +[ Thu Sep 15 13:30:54 2022 ] Mean test loss of 296 batches: 1.482643961906433. +[ Thu Sep 15 13:30:54 2022 ] Top1: 69.69% +[ Thu Sep 15 13:30:55 2022 ] Top5: 93.18% +[ Thu Sep 15 13:30:55 2022 ] Training epoch: 124 +[ Thu Sep 15 13:31:49 2022 ] Batch(70/123) done. Loss: 0.0037 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 13:32:27 2022 ] Eval epoch: 124 +[ Thu Sep 15 13:33:04 2022 ] Mean test loss of 296 batches: 1.4302045106887817. +[ Thu Sep 15 13:33:04 2022 ] Top1: 70.19% +[ Thu Sep 15 13:33:04 2022 ] Top5: 93.38% +[ Thu Sep 15 13:33:04 2022 ] Training epoch: 125 +[ Thu Sep 15 13:33:42 2022 ] Batch(47/123) done. Loss: 0.0033 lr:0.000100 network_time: 0.0284 +[ Thu Sep 15 13:34:37 2022 ] Eval epoch: 125 +[ Thu Sep 15 13:35:14 2022 ] Mean test loss of 296 batches: 1.4475640058517456. +[ Thu Sep 15 13:35:14 2022 ] Top1: 70.01% +[ Thu Sep 15 13:35:14 2022 ] Top5: 93.23% +[ Thu Sep 15 13:35:14 2022 ] Training epoch: 126 +[ Thu Sep 15 13:35:35 2022 ] Batch(24/123) done. Loss: 0.0058 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 13:36:47 2022 ] Eval epoch: 126 +[ Thu Sep 15 13:37:24 2022 ] Mean test loss of 296 batches: 1.4791288375854492. +[ Thu Sep 15 13:37:24 2022 ] Top1: 69.18% +[ Thu Sep 15 13:37:25 2022 ] Top5: 92.77% +[ Thu Sep 15 13:37:25 2022 ] Training epoch: 127 +[ Thu Sep 15 13:37:28 2022 ] Batch(1/123) done. Loss: 0.0066 lr:0.000100 network_time: 0.0265 +[ Thu Sep 15 13:38:42 2022 ] Batch(101/123) done. Loss: 0.0212 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 13:38:57 2022 ] Eval epoch: 127 +[ Thu Sep 15 13:39:34 2022 ] Mean test loss of 296 batches: 1.4047034978866577. +[ Thu Sep 15 13:39:34 2022 ] Top1: 69.97% +[ Thu Sep 15 13:39:34 2022 ] Top5: 93.42% +[ Thu Sep 15 13:39:34 2022 ] Training epoch: 128 +[ Thu Sep 15 13:40:34 2022 ] Batch(78/123) done. Loss: 0.0046 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 13:41:07 2022 ] Eval epoch: 128 +[ Thu Sep 15 13:41:44 2022 ] Mean test loss of 296 batches: 1.3966552019119263. +[ Thu Sep 15 13:41:44 2022 ] Top1: 70.59% +[ Thu Sep 15 13:41:44 2022 ] Top5: 93.44% +[ Thu Sep 15 13:41:44 2022 ] Training epoch: 129 +[ Thu Sep 15 13:42:27 2022 ] Batch(55/123) done. Loss: 0.0037 lr:0.000100 network_time: 0.0311 +[ Thu Sep 15 13:43:17 2022 ] Eval epoch: 129 +[ Thu Sep 15 13:43:53 2022 ] Mean test loss of 296 batches: 1.4890860319137573. +[ Thu Sep 15 13:43:53 2022 ] Top1: 69.82% +[ Thu Sep 15 13:43:54 2022 ] Top5: 92.96% +[ Thu Sep 15 13:43:54 2022 ] Training epoch: 130 +[ Thu Sep 15 13:44:20 2022 ] Batch(32/123) done. Loss: 0.0027 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 13:45:26 2022 ] Eval epoch: 130 +[ Thu Sep 15 13:46:04 2022 ] Mean test loss of 296 batches: 1.480393648147583. +[ Thu Sep 15 13:46:04 2022 ] Top1: 69.79% +[ Thu Sep 15 13:46:04 2022 ] Top5: 93.20% +[ Thu Sep 15 13:46:04 2022 ] Training epoch: 131 +[ Thu Sep 15 13:46:14 2022 ] Batch(9/123) done. Loss: 0.0023 lr:0.000100 network_time: 0.0297 +[ Thu Sep 15 13:47:27 2022 ] Batch(109/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0327 +[ Thu Sep 15 13:47:37 2022 ] Eval epoch: 131 +[ Thu Sep 15 13:48:14 2022 ] Mean test loss of 296 batches: 1.4267818927764893. +[ Thu Sep 15 13:48:14 2022 ] Top1: 69.95% +[ Thu Sep 15 13:48:14 2022 ] Top5: 93.27% +[ Thu Sep 15 13:48:14 2022 ] Training epoch: 132 +[ Thu Sep 15 13:49:20 2022 ] Batch(86/123) done. Loss: 0.0032 lr:0.000100 network_time: 0.0267 +[ Thu Sep 15 13:49:47 2022 ] Eval epoch: 132 +[ Thu Sep 15 13:50:23 2022 ] Mean test loss of 296 batches: 1.4464603662490845. +[ Thu Sep 15 13:50:23 2022 ] Top1: 69.00% +[ Thu Sep 15 13:50:23 2022 ] Top5: 93.10% +[ Thu Sep 15 13:50:24 2022 ] Training epoch: 133 +[ Thu Sep 15 13:51:13 2022 ] Batch(63/123) done. Loss: 0.0043 lr:0.000100 network_time: 0.0266 +[ Thu Sep 15 13:51:56 2022 ] Eval epoch: 133 +[ Thu Sep 15 13:52:33 2022 ] Mean test loss of 296 batches: 1.4272266626358032. +[ Thu Sep 15 13:52:34 2022 ] Top1: 70.21% +[ Thu Sep 15 13:52:34 2022 ] Top5: 93.29% +[ Thu Sep 15 13:52:34 2022 ] Training epoch: 134 +[ Thu Sep 15 13:53:06 2022 ] Batch(40/123) done. Loss: 0.0031 lr:0.000100 network_time: 0.0303 +[ Thu Sep 15 13:54:06 2022 ] Eval epoch: 134 +[ Thu Sep 15 13:54:43 2022 ] Mean test loss of 296 batches: 1.571119785308838. +[ Thu Sep 15 13:54:43 2022 ] Top1: 67.32% +[ Thu Sep 15 13:54:43 2022 ] Top5: 92.12% +[ Thu Sep 15 13:54:43 2022 ] Training epoch: 135 +[ Thu Sep 15 13:54:59 2022 ] Batch(17/123) done. Loss: 0.0095 lr:0.000100 network_time: 0.0282 +[ Thu Sep 15 13:56:12 2022 ] Batch(117/123) done. Loss: 0.0313 lr:0.000100 network_time: 0.0299 +[ Thu Sep 15 13:56:16 2022 ] Eval epoch: 135 +[ Thu Sep 15 13:56:53 2022 ] Mean test loss of 296 batches: 1.4661633968353271. +[ Thu Sep 15 13:56:53 2022 ] Top1: 68.90% +[ Thu Sep 15 13:56:53 2022 ] Top5: 93.14% +[ Thu Sep 15 13:56:53 2022 ] Training epoch: 136 +[ Thu Sep 15 13:58:05 2022 ] Batch(94/123) done. Loss: 0.0037 lr:0.000100 network_time: 0.0301 +[ Thu Sep 15 13:58:26 2022 ] Eval epoch: 136 +[ Thu Sep 15 13:59:03 2022 ] Mean test loss of 296 batches: 1.4121479988098145. +[ Thu Sep 15 13:59:03 2022 ] Top1: 70.44% +[ Thu Sep 15 13:59:03 2022 ] Top5: 93.42% +[ Thu Sep 15 13:59:03 2022 ] Training epoch: 137 +[ Thu Sep 15 13:59:58 2022 ] Batch(71/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0319 +[ Thu Sep 15 14:00:36 2022 ] Eval epoch: 137 +[ Thu Sep 15 14:01:13 2022 ] Mean test loss of 296 batches: 1.4106135368347168. +[ Thu Sep 15 14:01:13 2022 ] Top1: 70.15% +[ Thu Sep 15 14:01:13 2022 ] Top5: 93.44% +[ Thu Sep 15 14:01:13 2022 ] Training epoch: 138 +[ Thu Sep 15 14:01:52 2022 ] Batch(48/123) done. Loss: 0.0055 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 14:02:46 2022 ] Eval epoch: 138 +[ Thu Sep 15 14:03:23 2022 ] Mean test loss of 296 batches: 1.3850427865982056. +[ Thu Sep 15 14:03:23 2022 ] Top1: 70.23% +[ Thu Sep 15 14:03:23 2022 ] Top5: 93.52% +[ Thu Sep 15 14:03:23 2022 ] Training epoch: 139 +[ Thu Sep 15 14:03:45 2022 ] Batch(25/123) done. Loss: 0.0363 lr:0.000100 network_time: 0.0314 +[ Thu Sep 15 14:04:56 2022 ] Eval epoch: 139 +[ Thu Sep 15 14:05:33 2022 ] Mean test loss of 296 batches: 1.7598429918289185. +[ Thu Sep 15 14:05:33 2022 ] Top1: 64.46% +[ Thu Sep 15 14:05:33 2022 ] Top5: 91.52% +[ Thu Sep 15 14:05:33 2022 ] Training epoch: 140 +[ Thu Sep 15 14:05:38 2022 ] Batch(2/123) done. Loss: 0.0020 lr:0.000100 network_time: 0.0359 +[ Thu Sep 15 14:06:51 2022 ] Batch(102/123) done. Loss: 0.0357 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 14:07:06 2022 ] Eval epoch: 140 +[ Thu Sep 15 14:07:43 2022 ] Mean test loss of 296 batches: 1.5276402235031128. +[ Thu Sep 15 14:07:43 2022 ] Top1: 68.23% +[ Thu Sep 15 14:07:43 2022 ] Top5: 92.48% diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_motion_xview/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/config.yaml b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f128f4c7a952a3d746ec83e526d2d3de054fbeb --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/config.yaml @@ -0,0 +1,56 @@ +Experiment_name: ntu_ShiftGCN_joint_xview +base_lr: 0.1 +batch_size: 64 +config: ./config/nturgbd-cross-view/train_joint.yaml +device: +- 4 +- 5 +eval_interval: 5 +feeder: feeders.feeder.Feeder +ignore_weights: [] +log_interval: 100 +model: model.shift_gcn.Model +model_args: + graph: graph.ntu_rgb_d.Graph + graph_args: + labeling_mode: spatial + num_class: 60 + num_person: 2 + num_point: 25 +model_saved_name: ./save_models/ntu_ShiftGCN_joint_xview +nesterov: true +num_epoch: 140 +num_worker: 32 +only_train_epoch: 1 +only_train_part: true +optimizer: SGD +phase: train +print_log: true +save_interval: 2 +save_score: false +seed: 1 +show_topk: +- 1 +- 5 +start_epoch: 0 +step: +- 60 +- 80 +- 100 +test_batch_size: 64 +test_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint.npy + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl +train_feeder_args: + data_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint.npy + debug: false + label_path: /data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl + normalization: false + random_choose: false + random_move: false + random_shift: false + window_size: -1 +warm_up_epoch: 0 +weight_decay: 0.0001 +weights: null +work_dir: ./work_dir/ntu_ShiftGCN_joint_xview diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/eval_results/best_acc.pkl b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/eval_results/best_acc.pkl new file mode 100644 index 0000000000000000000000000000000000000000..aa5784706457831d37b278d02b4311cd31fdea00 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/eval_results/best_acc.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f006343b67531929375512c89416586b1f5aea9668adf5ac77bc3b6dec9e1b2 +size 5718404 diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/log.txt b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd686864e455f21d86a55a7bac43b241fcc57fdb --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/log.txt @@ -0,0 +1,875 @@ +[ Thu Sep 15 09:04:06 2022 ] Parameters: +{'work_dir': './work_dir/ntu_ShiftGCN_joint_xview', 'model_saved_name': './save_models/ntu_ShiftGCN_joint_xview', 'Experiment_name': 'ntu_ShiftGCN_joint_xview', 'config': './config/nturgbd-cross-view/train_joint.yaml', 'phase': 'train', 'save_score': False, 'seed': 1, 'log_interval': 100, 'save_interval': 2, 'eval_interval': 5, 'print_log': True, 'show_topk': [1, 5], 'feeder': 'feeders.feeder.Feeder', 'num_worker': 32, 'train_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/train_label.pkl', 'debug': False, 'random_choose': False, 'random_shift': False, 'random_move': False, 'window_size': -1, 'normalization': False}, 'test_feeder_args': {'data_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_data_joint.npy', 'label_path': '/data/lhd/long_tailed_skeleton_data/MS-G3D-data/ntu/xview/val_label.pkl'}, 'model': 'model.shift_gcn.Model', 'model_args': {'num_class': 60, 'num_point': 25, 'num_person': 2, 'graph': 'graph.ntu_rgb_d.Graph', 'graph_args': {'labeling_mode': 'spatial'}}, 'weights': None, 'ignore_weights': [], 'base_lr': 0.1, 'step': [60, 80, 100], 'device': [4, 5], 'optimizer': 'SGD', 'nesterov': True, 'batch_size': 64, 'test_batch_size': 64, 'start_epoch': 0, 'num_epoch': 140, 'weight_decay': 0.0001, 'only_train_part': True, 'only_train_epoch': 1, 'warm_up_epoch': 0} + +[ Thu Sep 15 09:04:06 2022 ] Training epoch: 1 +[ Thu Sep 15 09:05:24 2022 ] Batch(99/123) done. Loss: 2.4710 lr:0.100000 network_time: 0.0264 +[ Thu Sep 15 09:05:41 2022 ] Eval epoch: 1 +[ Thu Sep 15 09:06:18 2022 ] Mean test loss of 296 batches: 4.5494608879089355. +[ Thu Sep 15 09:06:18 2022 ] Top1: 13.94% +[ Thu Sep 15 09:06:18 2022 ] Top5: 41.49% +[ Thu Sep 15 09:06:18 2022 ] Training epoch: 2 +[ Thu Sep 15 09:07:17 2022 ] Batch(76/123) done. Loss: 2.0748 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 09:07:51 2022 ] Eval epoch: 2 +[ Thu Sep 15 09:08:28 2022 ] Mean test loss of 296 batches: 3.793429374694824. +[ Thu Sep 15 09:08:28 2022 ] Top1: 23.36% +[ Thu Sep 15 09:08:28 2022 ] Top5: 52.06% +[ Thu Sep 15 09:08:28 2022 ] Training epoch: 3 +[ Thu Sep 15 09:09:10 2022 ] Batch(53/123) done. Loss: 2.3235 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 09:10:01 2022 ] Eval epoch: 3 +[ Thu Sep 15 09:10:37 2022 ] Mean test loss of 296 batches: 2.9083969593048096. +[ Thu Sep 15 09:10:37 2022 ] Top1: 28.67% +[ Thu Sep 15 09:10:37 2022 ] Top5: 63.46% +[ Thu Sep 15 09:10:37 2022 ] Training epoch: 4 +[ Thu Sep 15 09:11:03 2022 ] Batch(30/123) done. Loss: 1.4093 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 09:12:10 2022 ] Eval epoch: 4 +[ Thu Sep 15 09:12:46 2022 ] Mean test loss of 296 batches: 2.6307458877563477. +[ Thu Sep 15 09:12:47 2022 ] Top1: 32.71% +[ Thu Sep 15 09:12:47 2022 ] Top5: 69.74% +[ Thu Sep 15 09:12:47 2022 ] Training epoch: 5 +[ Thu Sep 15 09:12:55 2022 ] Batch(7/123) done. Loss: 1.6495 lr:0.100000 network_time: 0.0299 +[ Thu Sep 15 09:14:08 2022 ] Batch(107/123) done. Loss: 1.4556 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 09:14:20 2022 ] Eval epoch: 5 +[ Thu Sep 15 09:14:56 2022 ] Mean test loss of 296 batches: 2.3194708824157715. +[ Thu Sep 15 09:14:56 2022 ] Top1: 36.92% +[ Thu Sep 15 09:14:56 2022 ] Top5: 75.58% +[ Thu Sep 15 09:14:56 2022 ] Training epoch: 6 +[ Thu Sep 15 09:16:01 2022 ] Batch(84/123) done. Loss: 1.6715 lr:0.100000 network_time: 0.0285 +[ Thu Sep 15 09:16:30 2022 ] Eval epoch: 6 +[ Thu Sep 15 09:17:06 2022 ] Mean test loss of 296 batches: 2.2074134349823. +[ Thu Sep 15 09:17:06 2022 ] Top1: 39.45% +[ Thu Sep 15 09:17:06 2022 ] Top5: 76.71% +[ Thu Sep 15 09:17:06 2022 ] Training epoch: 7 +[ Thu Sep 15 09:17:54 2022 ] Batch(61/123) done. Loss: 0.9604 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 09:18:39 2022 ] Eval epoch: 7 +[ Thu Sep 15 09:19:15 2022 ] Mean test loss of 296 batches: 2.154940128326416. +[ Thu Sep 15 09:19:16 2022 ] Top1: 43.12% +[ Thu Sep 15 09:19:16 2022 ] Top5: 80.07% +[ Thu Sep 15 09:19:16 2022 ] Training epoch: 8 +[ Thu Sep 15 09:19:47 2022 ] Batch(38/123) done. Loss: 1.1079 lr:0.100000 network_time: 0.0286 +[ Thu Sep 15 09:20:49 2022 ] Eval epoch: 8 +[ Thu Sep 15 09:21:25 2022 ] Mean test loss of 296 batches: 1.9786752462387085. +[ Thu Sep 15 09:21:25 2022 ] Top1: 45.31% +[ Thu Sep 15 09:21:25 2022 ] Top5: 83.43% +[ Thu Sep 15 09:21:25 2022 ] Training epoch: 9 +[ Thu Sep 15 09:21:40 2022 ] Batch(15/123) done. Loss: 1.1296 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 09:22:53 2022 ] Batch(115/123) done. Loss: 1.3751 lr:0.100000 network_time: 0.0343 +[ Thu Sep 15 09:22:58 2022 ] Eval epoch: 9 +[ Thu Sep 15 09:23:34 2022 ] Mean test loss of 296 batches: 2.078497886657715. +[ Thu Sep 15 09:23:34 2022 ] Top1: 44.35% +[ Thu Sep 15 09:23:35 2022 ] Top5: 80.46% +[ Thu Sep 15 09:23:35 2022 ] Training epoch: 10 +[ Thu Sep 15 09:24:46 2022 ] Batch(92/123) done. Loss: 1.0879 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 09:25:08 2022 ] Eval epoch: 10 +[ Thu Sep 15 09:25:45 2022 ] Mean test loss of 296 batches: 1.9804260730743408. +[ Thu Sep 15 09:25:45 2022 ] Top1: 48.89% +[ Thu Sep 15 09:25:45 2022 ] Top5: 84.91% +[ Thu Sep 15 09:25:45 2022 ] Training epoch: 11 +[ Thu Sep 15 09:26:39 2022 ] Batch(69/123) done. Loss: 1.0778 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 09:27:18 2022 ] Eval epoch: 11 +[ Thu Sep 15 09:27:54 2022 ] Mean test loss of 296 batches: 1.7119996547698975. +[ Thu Sep 15 09:27:54 2022 ] Top1: 50.82% +[ Thu Sep 15 09:27:54 2022 ] Top5: 86.51% +[ Thu Sep 15 09:27:54 2022 ] Training epoch: 12 +[ Thu Sep 15 09:28:32 2022 ] Batch(46/123) done. Loss: 0.9247 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 09:29:27 2022 ] Eval epoch: 12 +[ Thu Sep 15 09:30:04 2022 ] Mean test loss of 296 batches: 1.580631136894226. +[ Thu Sep 15 09:30:04 2022 ] Top1: 53.48% +[ Thu Sep 15 09:30:04 2022 ] Top5: 87.62% +[ Thu Sep 15 09:30:04 2022 ] Training epoch: 13 +[ Thu Sep 15 09:30:24 2022 ] Batch(23/123) done. Loss: 0.9638 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 09:31:37 2022 ] Eval epoch: 13 +[ Thu Sep 15 09:32:13 2022 ] Mean test loss of 296 batches: 2.1848692893981934. +[ Thu Sep 15 09:32:14 2022 ] Top1: 47.56% +[ Thu Sep 15 09:32:14 2022 ] Top5: 83.97% +[ Thu Sep 15 09:32:14 2022 ] Training epoch: 14 +[ Thu Sep 15 09:32:17 2022 ] Batch(0/123) done. Loss: 0.7401 lr:0.100000 network_time: 0.0418 +[ Thu Sep 15 09:33:30 2022 ] Batch(100/123) done. Loss: 0.8434 lr:0.100000 network_time: 0.0295 +[ Thu Sep 15 09:33:47 2022 ] Eval epoch: 14 +[ Thu Sep 15 09:34:23 2022 ] Mean test loss of 296 batches: 2.22471284866333. +[ Thu Sep 15 09:34:23 2022 ] Top1: 49.05% +[ Thu Sep 15 09:34:23 2022 ] Top5: 82.84% +[ Thu Sep 15 09:34:23 2022 ] Training epoch: 15 +[ Thu Sep 15 09:35:23 2022 ] Batch(77/123) done. Loss: 0.8124 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 09:35:56 2022 ] Eval epoch: 15 +[ Thu Sep 15 09:36:32 2022 ] Mean test loss of 296 batches: 1.5644292831420898. +[ Thu Sep 15 09:36:32 2022 ] Top1: 57.00% +[ Thu Sep 15 09:36:32 2022 ] Top5: 88.16% +[ Thu Sep 15 09:36:32 2022 ] Training epoch: 16 +[ Thu Sep 15 09:37:16 2022 ] Batch(54/123) done. Loss: 0.7071 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:38:05 2022 ] Eval epoch: 16 +[ Thu Sep 15 09:38:42 2022 ] Mean test loss of 296 batches: 1.570395827293396. +[ Thu Sep 15 09:38:43 2022 ] Top1: 56.85% +[ Thu Sep 15 09:38:43 2022 ] Top5: 89.48% +[ Thu Sep 15 09:38:43 2022 ] Training epoch: 17 +[ Thu Sep 15 09:39:09 2022 ] Batch(31/123) done. Loss: 0.4795 lr:0.100000 network_time: 0.0317 +[ Thu Sep 15 09:40:16 2022 ] Eval epoch: 17 +[ Thu Sep 15 09:40:52 2022 ] Mean test loss of 296 batches: 1.7061970233917236. +[ Thu Sep 15 09:40:52 2022 ] Top1: 56.53% +[ Thu Sep 15 09:40:53 2022 ] Top5: 88.44% +[ Thu Sep 15 09:40:53 2022 ] Training epoch: 18 +[ Thu Sep 15 09:41:02 2022 ] Batch(8/123) done. Loss: 0.3583 lr:0.100000 network_time: 0.0303 +[ Thu Sep 15 09:42:15 2022 ] Batch(108/123) done. Loss: 0.7888 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 09:42:26 2022 ] Eval epoch: 18 +[ Thu Sep 15 09:43:02 2022 ] Mean test loss of 296 batches: 1.7308231592178345. +[ Thu Sep 15 09:43:02 2022 ] Top1: 56.21% +[ Thu Sep 15 09:43:02 2022 ] Top5: 87.89% +[ Thu Sep 15 09:43:02 2022 ] Training epoch: 19 +[ Thu Sep 15 09:44:08 2022 ] Batch(85/123) done. Loss: 0.7148 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 09:44:35 2022 ] Eval epoch: 19 +[ Thu Sep 15 09:45:12 2022 ] Mean test loss of 296 batches: 1.6061136722564697. +[ Thu Sep 15 09:45:12 2022 ] Top1: 58.27% +[ Thu Sep 15 09:45:12 2022 ] Top5: 88.89% +[ Thu Sep 15 09:45:12 2022 ] Training epoch: 20 +[ Thu Sep 15 09:46:01 2022 ] Batch(62/123) done. Loss: 0.6098 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 09:46:45 2022 ] Eval epoch: 20 +[ Thu Sep 15 09:47:22 2022 ] Mean test loss of 296 batches: 1.3890010118484497. +[ Thu Sep 15 09:47:22 2022 ] Top1: 60.65% +[ Thu Sep 15 09:47:22 2022 ] Top5: 91.32% +[ Thu Sep 15 09:47:22 2022 ] Training epoch: 21 +[ Thu Sep 15 09:47:54 2022 ] Batch(39/123) done. Loss: 0.4251 lr:0.100000 network_time: 0.0309 +[ Thu Sep 15 09:48:55 2022 ] Eval epoch: 21 +[ Thu Sep 15 09:49:32 2022 ] Mean test loss of 296 batches: 1.7081384658813477. +[ Thu Sep 15 09:49:32 2022 ] Top1: 56.28% +[ Thu Sep 15 09:49:32 2022 ] Top5: 88.57% +[ Thu Sep 15 09:49:32 2022 ] Training epoch: 22 +[ Thu Sep 15 09:49:47 2022 ] Batch(16/123) done. Loss: 0.7831 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 09:51:00 2022 ] Batch(116/123) done. Loss: 0.8177 lr:0.100000 network_time: 0.0282 +[ Thu Sep 15 09:51:05 2022 ] Eval epoch: 22 +[ Thu Sep 15 09:51:42 2022 ] Mean test loss of 296 batches: 1.4028172492980957. +[ Thu Sep 15 09:51:42 2022 ] Top1: 62.32% +[ Thu Sep 15 09:51:42 2022 ] Top5: 90.79% +[ Thu Sep 15 09:51:42 2022 ] Training epoch: 23 +[ Thu Sep 15 09:52:53 2022 ] Batch(93/123) done. Loss: 0.3823 lr:0.100000 network_time: 0.0301 +[ Thu Sep 15 09:53:15 2022 ] Eval epoch: 23 +[ Thu Sep 15 09:53:51 2022 ] Mean test loss of 296 batches: 1.325274109840393. +[ Thu Sep 15 09:53:51 2022 ] Top1: 63.32% +[ Thu Sep 15 09:53:51 2022 ] Top5: 92.03% +[ Thu Sep 15 09:53:51 2022 ] Training epoch: 24 +[ Thu Sep 15 09:54:46 2022 ] Batch(70/123) done. Loss: 0.5978 lr:0.100000 network_time: 0.0313 +[ Thu Sep 15 09:55:25 2022 ] Eval epoch: 24 +[ Thu Sep 15 09:56:02 2022 ] Mean test loss of 296 batches: 1.606492280960083. +[ Thu Sep 15 09:56:02 2022 ] Top1: 57.72% +[ Thu Sep 15 09:56:02 2022 ] Top5: 90.43% +[ Thu Sep 15 09:56:02 2022 ] Training epoch: 25 +[ Thu Sep 15 09:56:40 2022 ] Batch(47/123) done. Loss: 0.5708 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 09:57:35 2022 ] Eval epoch: 25 +[ Thu Sep 15 09:58:12 2022 ] Mean test loss of 296 batches: 1.8277171850204468. +[ Thu Sep 15 09:58:12 2022 ] Top1: 53.95% +[ Thu Sep 15 09:58:12 2022 ] Top5: 86.96% +[ Thu Sep 15 09:58:12 2022 ] Training epoch: 26 +[ Thu Sep 15 09:58:33 2022 ] Batch(24/123) done. Loss: 0.3502 lr:0.100000 network_time: 0.0279 +[ Thu Sep 15 09:59:45 2022 ] Eval epoch: 26 +[ Thu Sep 15 10:00:22 2022 ] Mean test loss of 296 batches: 1.806972622871399. +[ Thu Sep 15 10:00:22 2022 ] Top1: 57.47% +[ Thu Sep 15 10:00:22 2022 ] Top5: 88.36% +[ Thu Sep 15 10:00:22 2022 ] Training epoch: 27 +[ Thu Sep 15 10:00:27 2022 ] Batch(1/123) done. Loss: 0.4201 lr:0.100000 network_time: 0.0323 +[ Thu Sep 15 10:01:40 2022 ] Batch(101/123) done. Loss: 0.4634 lr:0.100000 network_time: 0.0266 +[ Thu Sep 15 10:01:55 2022 ] Eval epoch: 27 +[ Thu Sep 15 10:02:32 2022 ] Mean test loss of 296 batches: 1.500139832496643. +[ Thu Sep 15 10:02:32 2022 ] Top1: 61.13% +[ Thu Sep 15 10:02:32 2022 ] Top5: 91.16% +[ Thu Sep 15 10:02:32 2022 ] Training epoch: 28 +[ Thu Sep 15 10:03:33 2022 ] Batch(78/123) done. Loss: 0.3999 lr:0.100000 network_time: 0.0253 +[ Thu Sep 15 10:04:06 2022 ] Eval epoch: 28 +[ Thu Sep 15 10:04:42 2022 ] Mean test loss of 296 batches: 1.5600544214248657. +[ Thu Sep 15 10:04:42 2022 ] Top1: 60.61% +[ Thu Sep 15 10:04:42 2022 ] Top5: 91.09% +[ Thu Sep 15 10:04:42 2022 ] Training epoch: 29 +[ Thu Sep 15 10:05:26 2022 ] Batch(55/123) done. Loss: 0.3269 lr:0.100000 network_time: 0.0268 +[ Thu Sep 15 10:06:16 2022 ] Eval epoch: 29 +[ Thu Sep 15 10:06:53 2022 ] Mean test loss of 296 batches: 1.700828194618225. +[ Thu Sep 15 10:06:53 2022 ] Top1: 58.58% +[ Thu Sep 15 10:06:53 2022 ] Top5: 89.62% +[ Thu Sep 15 10:06:53 2022 ] Training epoch: 30 +[ Thu Sep 15 10:07:20 2022 ] Batch(32/123) done. Loss: 0.2925 lr:0.100000 network_time: 0.0275 +[ Thu Sep 15 10:08:26 2022 ] Eval epoch: 30 +[ Thu Sep 15 10:09:03 2022 ] Mean test loss of 296 batches: 1.8058421611785889. +[ Thu Sep 15 10:09:03 2022 ] Top1: 58.60% +[ Thu Sep 15 10:09:03 2022 ] Top5: 89.45% +[ Thu Sep 15 10:09:03 2022 ] Training epoch: 31 +[ Thu Sep 15 10:09:13 2022 ] Batch(9/123) done. Loss: 0.4014 lr:0.100000 network_time: 0.0338 +[ Thu Sep 15 10:10:27 2022 ] Batch(109/123) done. Loss: 0.3556 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:10:36 2022 ] Eval epoch: 31 +[ Thu Sep 15 10:11:13 2022 ] Mean test loss of 296 batches: 2.189121961593628. +[ Thu Sep 15 10:11:13 2022 ] Top1: 58.31% +[ Thu Sep 15 10:11:14 2022 ] Top5: 88.21% +[ Thu Sep 15 10:11:14 2022 ] Training epoch: 32 +[ Thu Sep 15 10:12:20 2022 ] Batch(86/123) done. Loss: 0.3996 lr:0.100000 network_time: 0.0259 +[ Thu Sep 15 10:12:47 2022 ] Eval epoch: 32 +[ Thu Sep 15 10:13:23 2022 ] Mean test loss of 296 batches: 1.7273542881011963. +[ Thu Sep 15 10:13:23 2022 ] Top1: 60.12% +[ Thu Sep 15 10:13:23 2022 ] Top5: 89.47% +[ Thu Sep 15 10:13:23 2022 ] Training epoch: 33 +[ Thu Sep 15 10:14:13 2022 ] Batch(63/123) done. Loss: 0.4219 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 10:14:57 2022 ] Eval epoch: 33 +[ Thu Sep 15 10:15:33 2022 ] Mean test loss of 296 batches: 1.6284067630767822. +[ Thu Sep 15 10:15:33 2022 ] Top1: 60.47% +[ Thu Sep 15 10:15:33 2022 ] Top5: 90.42% +[ Thu Sep 15 10:15:33 2022 ] Training epoch: 34 +[ Thu Sep 15 10:16:06 2022 ] Batch(40/123) done. Loss: 0.2914 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 10:17:07 2022 ] Eval epoch: 34 +[ Thu Sep 15 10:17:43 2022 ] Mean test loss of 296 batches: 1.6068403720855713. +[ Thu Sep 15 10:17:43 2022 ] Top1: 61.50% +[ Thu Sep 15 10:17:43 2022 ] Top5: 90.37% +[ Thu Sep 15 10:17:44 2022 ] Training epoch: 35 +[ Thu Sep 15 10:18:00 2022 ] Batch(17/123) done. Loss: 0.4441 lr:0.100000 network_time: 0.0307 +[ Thu Sep 15 10:19:13 2022 ] Batch(117/123) done. Loss: 0.4526 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 10:19:17 2022 ] Eval epoch: 35 +[ Thu Sep 15 10:19:54 2022 ] Mean test loss of 296 batches: 1.449270486831665. +[ Thu Sep 15 10:19:54 2022 ] Top1: 63.68% +[ Thu Sep 15 10:19:54 2022 ] Top5: 91.47% +[ Thu Sep 15 10:19:54 2022 ] Training epoch: 36 +[ Thu Sep 15 10:21:06 2022 ] Batch(94/123) done. Loss: 0.2393 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 10:21:27 2022 ] Eval epoch: 36 +[ Thu Sep 15 10:22:04 2022 ] Mean test loss of 296 batches: 1.6495709419250488. +[ Thu Sep 15 10:22:04 2022 ] Top1: 62.72% +[ Thu Sep 15 10:22:04 2022 ] Top5: 91.21% +[ Thu Sep 15 10:22:04 2022 ] Training epoch: 37 +[ Thu Sep 15 10:23:00 2022 ] Batch(71/123) done. Loss: 0.3236 lr:0.100000 network_time: 0.0272 +[ Thu Sep 15 10:23:37 2022 ] Eval epoch: 37 +[ Thu Sep 15 10:24:14 2022 ] Mean test loss of 296 batches: 1.7332868576049805. +[ Thu Sep 15 10:24:14 2022 ] Top1: 61.78% +[ Thu Sep 15 10:24:14 2022 ] Top5: 90.24% +[ Thu Sep 15 10:24:14 2022 ] Training epoch: 38 +[ Thu Sep 15 10:24:53 2022 ] Batch(48/123) done. Loss: 0.3468 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 10:25:48 2022 ] Eval epoch: 38 +[ Thu Sep 15 10:26:24 2022 ] Mean test loss of 296 batches: 1.4512298107147217. +[ Thu Sep 15 10:26:24 2022 ] Top1: 65.08% +[ Thu Sep 15 10:26:25 2022 ] Top5: 92.43% +[ Thu Sep 15 10:26:25 2022 ] Training epoch: 39 +[ Thu Sep 15 10:26:47 2022 ] Batch(25/123) done. Loss: 0.3424 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 10:27:58 2022 ] Eval epoch: 39 +[ Thu Sep 15 10:28:34 2022 ] Mean test loss of 296 batches: 1.6204822063446045. +[ Thu Sep 15 10:28:34 2022 ] Top1: 62.54% +[ Thu Sep 15 10:28:34 2022 ] Top5: 90.80% +[ Thu Sep 15 10:28:35 2022 ] Training epoch: 40 +[ Thu Sep 15 10:28:40 2022 ] Batch(2/123) done. Loss: 0.3034 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:29:53 2022 ] Batch(102/123) done. Loss: 0.3192 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:30:08 2022 ] Eval epoch: 40 +[ Thu Sep 15 10:30:44 2022 ] Mean test loss of 296 batches: 1.4410736560821533. +[ Thu Sep 15 10:30:44 2022 ] Top1: 66.62% +[ Thu Sep 15 10:30:44 2022 ] Top5: 91.79% +[ Thu Sep 15 10:30:44 2022 ] Training epoch: 41 +[ Thu Sep 15 10:31:46 2022 ] Batch(79/123) done. Loss: 0.2623 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 10:32:17 2022 ] Eval epoch: 41 +[ Thu Sep 15 10:32:53 2022 ] Mean test loss of 296 batches: 1.588296890258789. +[ Thu Sep 15 10:32:53 2022 ] Top1: 62.40% +[ Thu Sep 15 10:32:54 2022 ] Top5: 90.74% +[ Thu Sep 15 10:32:54 2022 ] Training epoch: 42 +[ Thu Sep 15 10:33:38 2022 ] Batch(56/123) done. Loss: 0.1636 lr:0.100000 network_time: 0.0397 +[ Thu Sep 15 10:34:27 2022 ] Eval epoch: 42 +[ Thu Sep 15 10:35:03 2022 ] Mean test loss of 296 batches: 1.521134853363037. +[ Thu Sep 15 10:35:03 2022 ] Top1: 65.15% +[ Thu Sep 15 10:35:03 2022 ] Top5: 91.62% +[ Thu Sep 15 10:35:03 2022 ] Training epoch: 43 +[ Thu Sep 15 10:35:31 2022 ] Batch(33/123) done. Loss: 0.0915 lr:0.100000 network_time: 0.0258 +[ Thu Sep 15 10:36:37 2022 ] Eval epoch: 43 +[ Thu Sep 15 10:37:14 2022 ] Mean test loss of 296 batches: 1.7537553310394287. +[ Thu Sep 15 10:37:14 2022 ] Top1: 60.21% +[ Thu Sep 15 10:37:14 2022 ] Top5: 89.51% +[ Thu Sep 15 10:37:14 2022 ] Training epoch: 44 +[ Thu Sep 15 10:37:25 2022 ] Batch(10/123) done. Loss: 0.2490 lr:0.100000 network_time: 0.0298 +[ Thu Sep 15 10:38:38 2022 ] Batch(110/123) done. Loss: 0.2769 lr:0.100000 network_time: 0.0256 +[ Thu Sep 15 10:38:47 2022 ] Eval epoch: 44 +[ Thu Sep 15 10:39:24 2022 ] Mean test loss of 296 batches: 1.8540397882461548. +[ Thu Sep 15 10:39:24 2022 ] Top1: 60.90% +[ Thu Sep 15 10:39:24 2022 ] Top5: 89.72% +[ Thu Sep 15 10:39:24 2022 ] Training epoch: 45 +[ Thu Sep 15 10:40:31 2022 ] Batch(87/123) done. Loss: 0.2806 lr:0.100000 network_time: 0.0263 +[ Thu Sep 15 10:40:57 2022 ] Eval epoch: 45 +[ Thu Sep 15 10:41:33 2022 ] Mean test loss of 296 batches: 1.5426244735717773. +[ Thu Sep 15 10:41:33 2022 ] Top1: 62.81% +[ Thu Sep 15 10:41:33 2022 ] Top5: 91.48% +[ Thu Sep 15 10:41:34 2022 ] Training epoch: 46 +[ Thu Sep 15 10:42:24 2022 ] Batch(64/123) done. Loss: 0.3137 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 10:43:06 2022 ] Eval epoch: 46 +[ Thu Sep 15 10:43:43 2022 ] Mean test loss of 296 batches: 1.4742553234100342. +[ Thu Sep 15 10:43:43 2022 ] Top1: 65.26% +[ Thu Sep 15 10:43:43 2022 ] Top5: 91.89% +[ Thu Sep 15 10:43:43 2022 ] Training epoch: 47 +[ Thu Sep 15 10:44:17 2022 ] Batch(41/123) done. Loss: 0.0989 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 10:45:16 2022 ] Eval epoch: 47 +[ Thu Sep 15 10:45:53 2022 ] Mean test loss of 296 batches: 1.3184282779693604. +[ Thu Sep 15 10:45:53 2022 ] Top1: 66.14% +[ Thu Sep 15 10:45:53 2022 ] Top5: 92.15% +[ Thu Sep 15 10:45:53 2022 ] Training epoch: 48 +[ Thu Sep 15 10:46:10 2022 ] Batch(18/123) done. Loss: 0.2938 lr:0.100000 network_time: 0.0289 +[ Thu Sep 15 10:47:23 2022 ] Batch(118/123) done. Loss: 0.3250 lr:0.100000 network_time: 0.0328 +[ Thu Sep 15 10:47:26 2022 ] Eval epoch: 48 +[ Thu Sep 15 10:48:03 2022 ] Mean test loss of 296 batches: 1.5897901058197021. +[ Thu Sep 15 10:48:03 2022 ] Top1: 62.58% +[ Thu Sep 15 10:48:03 2022 ] Top5: 89.61% +[ Thu Sep 15 10:48:03 2022 ] Training epoch: 49 +[ Thu Sep 15 10:49:17 2022 ] Batch(95/123) done. Loss: 0.1619 lr:0.100000 network_time: 0.0267 +[ Thu Sep 15 10:49:37 2022 ] Eval epoch: 49 +[ Thu Sep 15 10:50:13 2022 ] Mean test loss of 296 batches: 1.4348037242889404. +[ Thu Sep 15 10:50:13 2022 ] Top1: 63.71% +[ Thu Sep 15 10:50:13 2022 ] Top5: 91.36% +[ Thu Sep 15 10:50:13 2022 ] Training epoch: 50 +[ Thu Sep 15 10:51:10 2022 ] Batch(72/123) done. Loss: 0.2159 lr:0.100000 network_time: 0.0276 +[ Thu Sep 15 10:51:46 2022 ] Eval epoch: 50 +[ Thu Sep 15 10:52:23 2022 ] Mean test loss of 296 batches: 1.639190912246704. +[ Thu Sep 15 10:52:23 2022 ] Top1: 63.15% +[ Thu Sep 15 10:52:23 2022 ] Top5: 91.06% +[ Thu Sep 15 10:52:23 2022 ] Training epoch: 51 +[ Thu Sep 15 10:53:03 2022 ] Batch(49/123) done. Loss: 0.2818 lr:0.100000 network_time: 0.0265 +[ Thu Sep 15 10:53:56 2022 ] Eval epoch: 51 +[ Thu Sep 15 10:54:33 2022 ] Mean test loss of 296 batches: 1.5886286497116089. +[ Thu Sep 15 10:54:33 2022 ] Top1: 63.52% +[ Thu Sep 15 10:54:33 2022 ] Top5: 91.31% +[ Thu Sep 15 10:54:33 2022 ] Training epoch: 52 +[ Thu Sep 15 10:54:56 2022 ] Batch(26/123) done. Loss: 0.2218 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:56:06 2022 ] Eval epoch: 52 +[ Thu Sep 15 10:56:43 2022 ] Mean test loss of 296 batches: 1.4753092527389526. +[ Thu Sep 15 10:56:43 2022 ] Top1: 64.69% +[ Thu Sep 15 10:56:43 2022 ] Top5: 91.19% +[ Thu Sep 15 10:56:43 2022 ] Training epoch: 53 +[ Thu Sep 15 10:56:49 2022 ] Batch(3/123) done. Loss: 0.1650 lr:0.100000 network_time: 0.0438 +[ Thu Sep 15 10:58:02 2022 ] Batch(103/123) done. Loss: 0.3629 lr:0.100000 network_time: 0.0315 +[ Thu Sep 15 10:58:16 2022 ] Eval epoch: 53 +[ Thu Sep 15 10:58:53 2022 ] Mean test loss of 296 batches: 1.3907619714736938. +[ Thu Sep 15 10:58:53 2022 ] Top1: 66.63% +[ Thu Sep 15 10:58:53 2022 ] Top5: 91.94% +[ Thu Sep 15 10:58:53 2022 ] Training epoch: 54 +[ Thu Sep 15 10:59:56 2022 ] Batch(80/123) done. Loss: 0.2070 lr:0.100000 network_time: 0.0308 +[ Thu Sep 15 11:00:26 2022 ] Eval epoch: 54 +[ Thu Sep 15 11:01:03 2022 ] Mean test loss of 296 batches: 1.785662293434143. +[ Thu Sep 15 11:01:03 2022 ] Top1: 60.80% +[ Thu Sep 15 11:01:03 2022 ] Top5: 89.75% +[ Thu Sep 15 11:01:03 2022 ] Training epoch: 55 +[ Thu Sep 15 11:01:49 2022 ] Batch(57/123) done. Loss: 0.1848 lr:0.100000 network_time: 0.0270 +[ Thu Sep 15 11:02:36 2022 ] Eval epoch: 55 +[ Thu Sep 15 11:03:13 2022 ] Mean test loss of 296 batches: 1.7430919408798218. +[ Thu Sep 15 11:03:13 2022 ] Top1: 62.70% +[ Thu Sep 15 11:03:13 2022 ] Top5: 90.84% +[ Thu Sep 15 11:03:13 2022 ] Training epoch: 56 +[ Thu Sep 15 11:03:42 2022 ] Batch(34/123) done. Loss: 0.2585 lr:0.100000 network_time: 0.0271 +[ Thu Sep 15 11:04:47 2022 ] Eval epoch: 56 +[ Thu Sep 15 11:05:23 2022 ] Mean test loss of 296 batches: 1.6383695602416992. +[ Thu Sep 15 11:05:23 2022 ] Top1: 64.11% +[ Thu Sep 15 11:05:23 2022 ] Top5: 90.91% +[ Thu Sep 15 11:05:23 2022 ] Training epoch: 57 +[ Thu Sep 15 11:05:35 2022 ] Batch(11/123) done. Loss: 0.2636 lr:0.100000 network_time: 0.0274 +[ Thu Sep 15 11:06:48 2022 ] Batch(111/123) done. Loss: 0.3122 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 11:06:56 2022 ] Eval epoch: 57 +[ Thu Sep 15 11:07:33 2022 ] Mean test loss of 296 batches: 2.0154056549072266. +[ Thu Sep 15 11:07:33 2022 ] Top1: 57.01% +[ Thu Sep 15 11:07:33 2022 ] Top5: 87.83% +[ Thu Sep 15 11:07:33 2022 ] Training epoch: 58 +[ Thu Sep 15 11:08:42 2022 ] Batch(88/123) done. Loss: 0.3303 lr:0.100000 network_time: 0.0269 +[ Thu Sep 15 11:09:07 2022 ] Eval epoch: 58 +[ Thu Sep 15 11:09:44 2022 ] Mean test loss of 296 batches: 1.760401964187622. +[ Thu Sep 15 11:09:44 2022 ] Top1: 60.98% +[ Thu Sep 15 11:09:44 2022 ] Top5: 90.10% +[ Thu Sep 15 11:09:44 2022 ] Training epoch: 59 +[ Thu Sep 15 11:10:35 2022 ] Batch(65/123) done. Loss: 0.2060 lr:0.100000 network_time: 0.0273 +[ Thu Sep 15 11:11:17 2022 ] Eval epoch: 59 +[ Thu Sep 15 11:11:53 2022 ] Mean test loss of 296 batches: 1.578343391418457. +[ Thu Sep 15 11:11:53 2022 ] Top1: 65.24% +[ Thu Sep 15 11:11:54 2022 ] Top5: 91.88% +[ Thu Sep 15 11:11:54 2022 ] Training epoch: 60 +[ Thu Sep 15 11:12:28 2022 ] Batch(42/123) done. Loss: 0.1515 lr:0.100000 network_time: 0.0277 +[ Thu Sep 15 11:13:27 2022 ] Eval epoch: 60 +[ Thu Sep 15 11:14:04 2022 ] Mean test loss of 296 batches: 1.587172269821167. +[ Thu Sep 15 11:14:04 2022 ] Top1: 62.81% +[ Thu Sep 15 11:14:04 2022 ] Top5: 90.99% +[ Thu Sep 15 11:14:04 2022 ] Training epoch: 61 +[ Thu Sep 15 11:14:22 2022 ] Batch(19/123) done. Loss: 0.1495 lr:0.010000 network_time: 0.0282 +[ Thu Sep 15 11:15:35 2022 ] Batch(119/123) done. Loss: 0.0730 lr:0.010000 network_time: 0.0275 +[ Thu Sep 15 11:15:37 2022 ] Eval epoch: 61 +[ Thu Sep 15 11:16:14 2022 ] Mean test loss of 296 batches: 1.233351707458496. +[ Thu Sep 15 11:16:15 2022 ] Top1: 70.57% +[ Thu Sep 15 11:16:15 2022 ] Top5: 93.47% +[ Thu Sep 15 11:16:15 2022 ] Training epoch: 62 +[ Thu Sep 15 11:17:29 2022 ] Batch(96/123) done. Loss: 0.0536 lr:0.010000 network_time: 0.0282 +[ Thu Sep 15 11:17:48 2022 ] Eval epoch: 62 +[ Thu Sep 15 11:18:25 2022 ] Mean test loss of 296 batches: 1.2128609418869019. +[ Thu Sep 15 11:18:25 2022 ] Top1: 71.18% +[ Thu Sep 15 11:18:25 2022 ] Top5: 93.84% +[ Thu Sep 15 11:18:25 2022 ] Training epoch: 63 +[ Thu Sep 15 11:19:22 2022 ] Batch(73/123) done. Loss: 0.0494 lr:0.010000 network_time: 0.0472 +[ Thu Sep 15 11:19:58 2022 ] Eval epoch: 63 +[ Thu Sep 15 11:20:35 2022 ] Mean test loss of 296 batches: 1.2522099018096924. +[ Thu Sep 15 11:20:35 2022 ] Top1: 70.76% +[ Thu Sep 15 11:20:35 2022 ] Top5: 93.66% +[ Thu Sep 15 11:20:35 2022 ] Training epoch: 64 +[ Thu Sep 15 11:21:16 2022 ] Batch(50/123) done. Loss: 0.0088 lr:0.010000 network_time: 0.0261 +[ Thu Sep 15 11:22:09 2022 ] Eval epoch: 64 +[ Thu Sep 15 11:22:45 2022 ] Mean test loss of 296 batches: 1.250813603401184. +[ Thu Sep 15 11:22:45 2022 ] Top1: 70.68% +[ Thu Sep 15 11:22:46 2022 ] Top5: 93.65% +[ Thu Sep 15 11:22:46 2022 ] Training epoch: 65 +[ Thu Sep 15 11:23:09 2022 ] Batch(27/123) done. Loss: 0.0179 lr:0.010000 network_time: 0.0273 +[ Thu Sep 15 11:24:19 2022 ] Eval epoch: 65 +[ Thu Sep 15 11:24:56 2022 ] Mean test loss of 296 batches: 1.2324490547180176. +[ Thu Sep 15 11:24:56 2022 ] Top1: 71.20% +[ Thu Sep 15 11:24:56 2022 ] Top5: 93.75% +[ Thu Sep 15 11:24:56 2022 ] Training epoch: 66 +[ Thu Sep 15 11:25:03 2022 ] Batch(4/123) done. Loss: 0.0108 lr:0.010000 network_time: 0.0320 +[ Thu Sep 15 11:26:16 2022 ] Batch(104/123) done. Loss: 0.0098 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 11:26:29 2022 ] Eval epoch: 66 +[ Thu Sep 15 11:27:06 2022 ] Mean test loss of 296 batches: 1.2438300848007202. +[ Thu Sep 15 11:27:06 2022 ] Top1: 71.20% +[ Thu Sep 15 11:27:06 2022 ] Top5: 93.96% +[ Thu Sep 15 11:27:06 2022 ] Training epoch: 67 +[ Thu Sep 15 11:28:09 2022 ] Batch(81/123) done. Loss: 0.0170 lr:0.010000 network_time: 0.0320 +[ Thu Sep 15 11:28:39 2022 ] Eval epoch: 67 +[ Thu Sep 15 11:29:16 2022 ] Mean test loss of 296 batches: 1.2351856231689453. +[ Thu Sep 15 11:29:16 2022 ] Top1: 71.39% +[ Thu Sep 15 11:29:16 2022 ] Top5: 94.06% +[ Thu Sep 15 11:29:16 2022 ] Training epoch: 68 +[ Thu Sep 15 11:30:03 2022 ] Batch(58/123) done. Loss: 0.0142 lr:0.010000 network_time: 0.0317 +[ Thu Sep 15 11:30:50 2022 ] Eval epoch: 68 +[ Thu Sep 15 11:31:27 2022 ] Mean test loss of 296 batches: 1.2234604358673096. +[ Thu Sep 15 11:31:27 2022 ] Top1: 71.66% +[ Thu Sep 15 11:31:27 2022 ] Top5: 93.80% +[ Thu Sep 15 11:31:27 2022 ] Training epoch: 69 +[ Thu Sep 15 11:31:57 2022 ] Batch(35/123) done. Loss: 0.0385 lr:0.010000 network_time: 0.0336 +[ Thu Sep 15 11:33:00 2022 ] Eval epoch: 69 +[ Thu Sep 15 11:33:37 2022 ] Mean test loss of 296 batches: 1.250203251838684. +[ Thu Sep 15 11:33:38 2022 ] Top1: 71.37% +[ Thu Sep 15 11:33:38 2022 ] Top5: 93.77% +[ Thu Sep 15 11:33:38 2022 ] Training epoch: 70 +[ Thu Sep 15 11:33:50 2022 ] Batch(12/123) done. Loss: 0.0445 lr:0.010000 network_time: 0.0276 +[ Thu Sep 15 11:35:04 2022 ] Batch(112/123) done. Loss: 0.0085 lr:0.010000 network_time: 0.0322 +[ Thu Sep 15 11:35:11 2022 ] Eval epoch: 70 +[ Thu Sep 15 11:35:48 2022 ] Mean test loss of 296 batches: 1.2665828466415405. +[ Thu Sep 15 11:35:48 2022 ] Top1: 71.21% +[ Thu Sep 15 11:35:48 2022 ] Top5: 93.64% +[ Thu Sep 15 11:35:48 2022 ] Training epoch: 71 +[ Thu Sep 15 11:36:57 2022 ] Batch(89/123) done. Loss: 0.0117 lr:0.010000 network_time: 0.0270 +[ Thu Sep 15 11:37:21 2022 ] Eval epoch: 71 +[ Thu Sep 15 11:37:58 2022 ] Mean test loss of 296 batches: 1.2283488512039185. +[ Thu Sep 15 11:37:58 2022 ] Top1: 71.98% +[ Thu Sep 15 11:37:58 2022 ] Top5: 94.13% +[ Thu Sep 15 11:37:58 2022 ] Training epoch: 72 +[ Thu Sep 15 11:38:50 2022 ] Batch(66/123) done. Loss: 0.0065 lr:0.010000 network_time: 0.0267 +[ Thu Sep 15 11:39:31 2022 ] Eval epoch: 72 +[ Thu Sep 15 11:40:08 2022 ] Mean test loss of 296 batches: 1.2462166547775269. +[ Thu Sep 15 11:40:08 2022 ] Top1: 71.76% +[ Thu Sep 15 11:40:08 2022 ] Top5: 93.84% +[ Thu Sep 15 11:40:08 2022 ] Training epoch: 73 +[ Thu Sep 15 11:40:43 2022 ] Batch(43/123) done. Loss: 0.0100 lr:0.010000 network_time: 0.0232 +[ Thu Sep 15 11:41:41 2022 ] Eval epoch: 73 +[ Thu Sep 15 11:42:18 2022 ] Mean test loss of 296 batches: 1.2400442361831665. +[ Thu Sep 15 11:42:18 2022 ] Top1: 71.86% +[ Thu Sep 15 11:42:18 2022 ] Top5: 93.97% +[ Thu Sep 15 11:42:18 2022 ] Training epoch: 74 +[ Thu Sep 15 11:42:37 2022 ] Batch(20/123) done. Loss: 0.0098 lr:0.010000 network_time: 0.0278 +[ Thu Sep 15 11:43:50 2022 ] Batch(120/123) done. Loss: 0.0151 lr:0.010000 network_time: 0.0320 +[ Thu Sep 15 11:43:52 2022 ] Eval epoch: 74 +[ Thu Sep 15 11:44:28 2022 ] Mean test loss of 296 batches: 1.2737401723861694. +[ Thu Sep 15 11:44:28 2022 ] Top1: 71.29% +[ Thu Sep 15 11:44:28 2022 ] Top5: 93.80% +[ Thu Sep 15 11:44:28 2022 ] Training epoch: 75 +[ Thu Sep 15 11:45:43 2022 ] Batch(97/123) done. Loss: 0.0124 lr:0.010000 network_time: 0.0306 +[ Thu Sep 15 11:46:02 2022 ] Eval epoch: 75 +[ Thu Sep 15 11:46:38 2022 ] Mean test loss of 296 batches: 1.2346240282058716. +[ Thu Sep 15 11:46:38 2022 ] Top1: 72.24% +[ Thu Sep 15 11:46:39 2022 ] Top5: 94.04% +[ Thu Sep 15 11:46:39 2022 ] Training epoch: 76 +[ Thu Sep 15 11:47:37 2022 ] Batch(74/123) done. Loss: 0.0095 lr:0.010000 network_time: 0.0317 +[ Thu Sep 15 11:48:12 2022 ] Eval epoch: 76 +[ Thu Sep 15 11:48:49 2022 ] Mean test loss of 296 batches: 1.2648327350616455. +[ Thu Sep 15 11:48:49 2022 ] Top1: 71.50% +[ Thu Sep 15 11:48:49 2022 ] Top5: 93.76% +[ Thu Sep 15 11:48:49 2022 ] Training epoch: 77 +[ Thu Sep 15 11:49:30 2022 ] Batch(51/123) done. Loss: 0.0062 lr:0.010000 network_time: 0.0316 +[ Thu Sep 15 11:50:22 2022 ] Eval epoch: 77 +[ Thu Sep 15 11:50:59 2022 ] Mean test loss of 296 batches: 1.243092656135559. +[ Thu Sep 15 11:50:59 2022 ] Top1: 72.25% +[ Thu Sep 15 11:50:59 2022 ] Top5: 93.98% +[ Thu Sep 15 11:50:59 2022 ] Training epoch: 78 +[ Thu Sep 15 11:51:24 2022 ] Batch(28/123) done. Loss: 0.0054 lr:0.010000 network_time: 0.0328 +[ Thu Sep 15 11:52:33 2022 ] Eval epoch: 78 +[ Thu Sep 15 11:53:10 2022 ] Mean test loss of 296 batches: 1.2705637216567993. +[ Thu Sep 15 11:53:10 2022 ] Top1: 71.57% +[ Thu Sep 15 11:53:10 2022 ] Top5: 93.72% +[ Thu Sep 15 11:53:10 2022 ] Training epoch: 79 +[ Thu Sep 15 11:53:17 2022 ] Batch(5/123) done. Loss: 0.0362 lr:0.010000 network_time: 0.0251 +[ Thu Sep 15 11:54:31 2022 ] Batch(105/123) done. Loss: 0.0053 lr:0.010000 network_time: 0.0279 +[ Thu Sep 15 11:54:43 2022 ] Eval epoch: 79 +[ Thu Sep 15 11:55:20 2022 ] Mean test loss of 296 batches: 1.2619526386260986. +[ Thu Sep 15 11:55:20 2022 ] Top1: 71.74% +[ Thu Sep 15 11:55:20 2022 ] Top5: 93.84% +[ Thu Sep 15 11:55:20 2022 ] Training epoch: 80 +[ Thu Sep 15 11:56:24 2022 ] Batch(82/123) done. Loss: 0.0363 lr:0.010000 network_time: 0.0272 +[ Thu Sep 15 11:56:54 2022 ] Eval epoch: 80 +[ Thu Sep 15 11:57:30 2022 ] Mean test loss of 296 batches: 1.2848737239837646. +[ Thu Sep 15 11:57:30 2022 ] Top1: 71.46% +[ Thu Sep 15 11:57:31 2022 ] Top5: 93.67% +[ Thu Sep 15 11:57:31 2022 ] Training epoch: 81 +[ Thu Sep 15 11:58:18 2022 ] Batch(59/123) done. Loss: 0.0043 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 11:59:04 2022 ] Eval epoch: 81 +[ Thu Sep 15 11:59:40 2022 ] Mean test loss of 296 batches: 1.2562048435211182. +[ Thu Sep 15 11:59:40 2022 ] Top1: 71.59% +[ Thu Sep 15 11:59:41 2022 ] Top5: 93.88% +[ Thu Sep 15 11:59:41 2022 ] Training epoch: 82 +[ Thu Sep 15 12:00:11 2022 ] Batch(36/123) done. Loss: 0.0032 lr:0.001000 network_time: 0.0276 +[ Thu Sep 15 12:01:14 2022 ] Eval epoch: 82 +[ Thu Sep 15 12:01:51 2022 ] Mean test loss of 296 batches: 1.278260350227356. +[ Thu Sep 15 12:01:51 2022 ] Top1: 71.65% +[ Thu Sep 15 12:01:51 2022 ] Top5: 93.83% +[ Thu Sep 15 12:01:51 2022 ] Training epoch: 83 +[ Thu Sep 15 12:02:04 2022 ] Batch(13/123) done. Loss: 0.0056 lr:0.001000 network_time: 0.0274 +[ Thu Sep 15 12:03:17 2022 ] Batch(113/123) done. Loss: 0.0152 lr:0.001000 network_time: 0.0312 +[ Thu Sep 15 12:03:24 2022 ] Eval epoch: 83 +[ Thu Sep 15 12:04:01 2022 ] Mean test loss of 296 batches: 1.2579419612884521. +[ Thu Sep 15 12:04:01 2022 ] Top1: 71.88% +[ Thu Sep 15 12:04:01 2022 ] Top5: 93.87% +[ Thu Sep 15 12:04:01 2022 ] Training epoch: 84 +[ Thu Sep 15 12:05:11 2022 ] Batch(90/123) done. Loss: 0.0068 lr:0.001000 network_time: 0.0272 +[ Thu Sep 15 12:05:34 2022 ] Eval epoch: 84 +[ Thu Sep 15 12:06:11 2022 ] Mean test loss of 296 batches: 1.2505924701690674. +[ Thu Sep 15 12:06:11 2022 ] Top1: 71.98% +[ Thu Sep 15 12:06:11 2022 ] Top5: 93.90% +[ Thu Sep 15 12:06:12 2022 ] Training epoch: 85 +[ Thu Sep 15 12:07:04 2022 ] Batch(67/123) done. Loss: 0.0113 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 12:07:45 2022 ] Eval epoch: 85 +[ Thu Sep 15 12:08:21 2022 ] Mean test loss of 296 batches: 1.265346646308899. +[ Thu Sep 15 12:08:22 2022 ] Top1: 71.95% +[ Thu Sep 15 12:08:22 2022 ] Top5: 93.87% +[ Thu Sep 15 12:08:22 2022 ] Training epoch: 86 +[ Thu Sep 15 12:08:58 2022 ] Batch(44/123) done. Loss: 0.0603 lr:0.001000 network_time: 0.0313 +[ Thu Sep 15 12:09:55 2022 ] Eval epoch: 86 +[ Thu Sep 15 12:10:32 2022 ] Mean test loss of 296 batches: 1.256374478340149. +[ Thu Sep 15 12:10:32 2022 ] Top1: 71.99% +[ Thu Sep 15 12:10:32 2022 ] Top5: 93.92% +[ Thu Sep 15 12:10:32 2022 ] Training epoch: 87 +[ Thu Sep 15 12:10:51 2022 ] Batch(21/123) done. Loss: 0.0118 lr:0.001000 network_time: 0.0379 +[ Thu Sep 15 12:12:04 2022 ] Batch(121/123) done. Loss: 0.0058 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 12:12:05 2022 ] Eval epoch: 87 +[ Thu Sep 15 12:12:42 2022 ] Mean test loss of 296 batches: 1.2610098123550415. +[ Thu Sep 15 12:12:42 2022 ] Top1: 71.69% +[ Thu Sep 15 12:12:42 2022 ] Top5: 93.72% +[ Thu Sep 15 12:12:42 2022 ] Training epoch: 88 +[ Thu Sep 15 12:13:57 2022 ] Batch(98/123) done. Loss: 0.0042 lr:0.001000 network_time: 0.0275 +[ Thu Sep 15 12:14:15 2022 ] Eval epoch: 88 +[ Thu Sep 15 12:14:52 2022 ] Mean test loss of 296 batches: 1.2685989141464233. +[ Thu Sep 15 12:14:52 2022 ] Top1: 72.03% +[ Thu Sep 15 12:14:52 2022 ] Top5: 93.72% +[ Thu Sep 15 12:14:52 2022 ] Training epoch: 89 +[ Thu Sep 15 12:15:50 2022 ] Batch(75/123) done. Loss: 0.0288 lr:0.001000 network_time: 0.0248 +[ Thu Sep 15 12:16:25 2022 ] Eval epoch: 89 +[ Thu Sep 15 12:17:02 2022 ] Mean test loss of 296 batches: 1.2469428777694702. +[ Thu Sep 15 12:17:02 2022 ] Top1: 72.15% +[ Thu Sep 15 12:17:02 2022 ] Top5: 93.91% +[ Thu Sep 15 12:17:02 2022 ] Training epoch: 90 +[ Thu Sep 15 12:17:44 2022 ] Batch(52/123) done. Loss: 0.0064 lr:0.001000 network_time: 0.0269 +[ Thu Sep 15 12:18:35 2022 ] Eval epoch: 90 +[ Thu Sep 15 12:19:12 2022 ] Mean test loss of 296 batches: 1.2449932098388672. +[ Thu Sep 15 12:19:12 2022 ] Top1: 72.04% +[ Thu Sep 15 12:19:12 2022 ] Top5: 93.94% +[ Thu Sep 15 12:19:12 2022 ] Training epoch: 91 +[ Thu Sep 15 12:19:37 2022 ] Batch(29/123) done. Loss: 0.0048 lr:0.001000 network_time: 0.0319 +[ Thu Sep 15 12:20:46 2022 ] Eval epoch: 91 +[ Thu Sep 15 12:21:22 2022 ] Mean test loss of 296 batches: 1.2450257539749146. +[ Thu Sep 15 12:21:23 2022 ] Top1: 72.04% +[ Thu Sep 15 12:21:23 2022 ] Top5: 94.00% +[ Thu Sep 15 12:21:23 2022 ] Training epoch: 92 +[ Thu Sep 15 12:21:31 2022 ] Batch(6/123) done. Loss: 0.0074 lr:0.001000 network_time: 0.0283 +[ Thu Sep 15 12:22:44 2022 ] Batch(106/123) done. Loss: 0.0122 lr:0.001000 network_time: 0.0290 +[ Thu Sep 15 12:22:56 2022 ] Eval epoch: 92 +[ Thu Sep 15 12:23:33 2022 ] Mean test loss of 296 batches: 1.2521724700927734. +[ Thu Sep 15 12:23:33 2022 ] Top1: 72.14% +[ Thu Sep 15 12:23:33 2022 ] Top5: 93.93% +[ Thu Sep 15 12:23:33 2022 ] Training epoch: 93 +[ Thu Sep 15 12:24:37 2022 ] Batch(83/123) done. Loss: 0.0121 lr:0.001000 network_time: 0.0283 +[ Thu Sep 15 12:25:06 2022 ] Eval epoch: 93 +[ Thu Sep 15 12:25:43 2022 ] Mean test loss of 296 batches: 1.2700116634368896. +[ Thu Sep 15 12:25:43 2022 ] Top1: 71.63% +[ Thu Sep 15 12:25:43 2022 ] Top5: 93.81% +[ Thu Sep 15 12:25:43 2022 ] Training epoch: 94 +[ Thu Sep 15 12:26:31 2022 ] Batch(60/123) done. Loss: 0.0184 lr:0.001000 network_time: 0.0319 +[ Thu Sep 15 12:27:16 2022 ] Eval epoch: 94 +[ Thu Sep 15 12:27:52 2022 ] Mean test loss of 296 batches: 1.2664250135421753. +[ Thu Sep 15 12:27:52 2022 ] Top1: 71.77% +[ Thu Sep 15 12:27:53 2022 ] Top5: 93.86% +[ Thu Sep 15 12:27:53 2022 ] Training epoch: 95 +[ Thu Sep 15 12:28:23 2022 ] Batch(37/123) done. Loss: 0.0089 lr:0.001000 network_time: 0.0284 +[ Thu Sep 15 12:29:26 2022 ] Eval epoch: 95 +[ Thu Sep 15 12:30:02 2022 ] Mean test loss of 296 batches: 1.2467601299285889. +[ Thu Sep 15 12:30:03 2022 ] Top1: 72.03% +[ Thu Sep 15 12:30:03 2022 ] Top5: 94.04% +[ Thu Sep 15 12:30:03 2022 ] Training epoch: 96 +[ Thu Sep 15 12:30:16 2022 ] Batch(14/123) done. Loss: 0.0055 lr:0.001000 network_time: 0.0350 +[ Thu Sep 15 12:31:29 2022 ] Batch(114/123) done. Loss: 0.0036 lr:0.001000 network_time: 0.0270 +[ Thu Sep 15 12:31:35 2022 ] Eval epoch: 96 +[ Thu Sep 15 12:32:12 2022 ] Mean test loss of 296 batches: 1.268143892288208. +[ Thu Sep 15 12:32:12 2022 ] Top1: 71.84% +[ Thu Sep 15 12:32:12 2022 ] Top5: 93.78% +[ Thu Sep 15 12:32:13 2022 ] Training epoch: 97 +[ Thu Sep 15 12:33:23 2022 ] Batch(91/123) done. Loss: 0.0090 lr:0.001000 network_time: 0.0250 +[ Thu Sep 15 12:33:45 2022 ] Eval epoch: 97 +[ Thu Sep 15 12:34:22 2022 ] Mean test loss of 296 batches: 1.2730531692504883. +[ Thu Sep 15 12:34:22 2022 ] Top1: 71.94% +[ Thu Sep 15 12:34:22 2022 ] Top5: 93.97% +[ Thu Sep 15 12:34:22 2022 ] Training epoch: 98 +[ Thu Sep 15 12:35:15 2022 ] Batch(68/123) done. Loss: 0.0099 lr:0.001000 network_time: 0.0304 +[ Thu Sep 15 12:35:55 2022 ] Eval epoch: 98 +[ Thu Sep 15 12:36:32 2022 ] Mean test loss of 296 batches: 1.273022174835205. +[ Thu Sep 15 12:36:32 2022 ] Top1: 71.93% +[ Thu Sep 15 12:36:32 2022 ] Top5: 93.79% +[ Thu Sep 15 12:36:32 2022 ] Training epoch: 99 +[ Thu Sep 15 12:37:09 2022 ] Batch(45/123) done. Loss: 0.0079 lr:0.001000 network_time: 0.0260 +[ Thu Sep 15 12:38:05 2022 ] Eval epoch: 99 +[ Thu Sep 15 12:38:42 2022 ] Mean test loss of 296 batches: 1.2646883726119995. +[ Thu Sep 15 12:38:42 2022 ] Top1: 72.11% +[ Thu Sep 15 12:38:42 2022 ] Top5: 93.73% +[ Thu Sep 15 12:38:42 2022 ] Training epoch: 100 +[ Thu Sep 15 12:39:02 2022 ] Batch(22/123) done. Loss: 0.0084 lr:0.001000 network_time: 0.0280 +[ Thu Sep 15 12:40:15 2022 ] Batch(122/123) done. Loss: 0.0090 lr:0.001000 network_time: 0.0268 +[ Thu Sep 15 12:40:15 2022 ] Eval epoch: 100 +[ Thu Sep 15 12:40:52 2022 ] Mean test loss of 296 batches: 1.2576135396957397. +[ Thu Sep 15 12:40:52 2022 ] Top1: 72.13% +[ Thu Sep 15 12:40:52 2022 ] Top5: 93.84% +[ Thu Sep 15 12:40:52 2022 ] Training epoch: 101 +[ Thu Sep 15 12:42:08 2022 ] Batch(99/123) done. Loss: 0.0108 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 12:42:25 2022 ] Eval epoch: 101 +[ Thu Sep 15 12:43:02 2022 ] Mean test loss of 296 batches: 1.2688430547714233. +[ Thu Sep 15 12:43:02 2022 ] Top1: 71.78% +[ Thu Sep 15 12:43:02 2022 ] Top5: 93.72% +[ Thu Sep 15 12:43:02 2022 ] Training epoch: 102 +[ Thu Sep 15 12:44:02 2022 ] Batch(76/123) done. Loss: 0.0053 lr:0.000100 network_time: 0.0274 +[ Thu Sep 15 12:44:36 2022 ] Eval epoch: 102 +[ Thu Sep 15 12:45:13 2022 ] Mean test loss of 296 batches: 1.2590831518173218. +[ Thu Sep 15 12:45:13 2022 ] Top1: 71.84% +[ Thu Sep 15 12:45:13 2022 ] Top5: 93.91% +[ Thu Sep 15 12:45:13 2022 ] Training epoch: 103 +[ Thu Sep 15 12:45:55 2022 ] Batch(53/123) done. Loss: 0.0079 lr:0.000100 network_time: 0.0276 +[ Thu Sep 15 12:46:46 2022 ] Eval epoch: 103 +[ Thu Sep 15 12:47:23 2022 ] Mean test loss of 296 batches: 1.2855056524276733. +[ Thu Sep 15 12:47:23 2022 ] Top1: 71.57% +[ Thu Sep 15 12:47:23 2022 ] Top5: 93.87% +[ Thu Sep 15 12:47:23 2022 ] Training epoch: 104 +[ Thu Sep 15 12:47:49 2022 ] Batch(30/123) done. Loss: 0.0342 lr:0.000100 network_time: 0.0320 +[ Thu Sep 15 12:48:56 2022 ] Eval epoch: 104 +[ Thu Sep 15 12:49:33 2022 ] Mean test loss of 296 batches: 1.2522485256195068. +[ Thu Sep 15 12:49:33 2022 ] Top1: 71.99% +[ Thu Sep 15 12:49:33 2022 ] Top5: 93.93% +[ Thu Sep 15 12:49:34 2022 ] Training epoch: 105 +[ Thu Sep 15 12:49:42 2022 ] Batch(7/123) done. Loss: 0.0103 lr:0.000100 network_time: 0.0281 +[ Thu Sep 15 12:50:56 2022 ] Batch(107/123) done. Loss: 0.0062 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 12:51:07 2022 ] Eval epoch: 105 +[ Thu Sep 15 12:51:44 2022 ] Mean test loss of 296 batches: 1.2807689905166626. +[ Thu Sep 15 12:51:44 2022 ] Top1: 71.62% +[ Thu Sep 15 12:51:44 2022 ] Top5: 93.87% +[ Thu Sep 15 12:51:44 2022 ] Training epoch: 106 +[ Thu Sep 15 12:52:49 2022 ] Batch(84/123) done. Loss: 0.0211 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 12:53:17 2022 ] Eval epoch: 106 +[ Thu Sep 15 12:53:54 2022 ] Mean test loss of 296 batches: 1.2796167135238647. +[ Thu Sep 15 12:53:54 2022 ] Top1: 71.97% +[ Thu Sep 15 12:53:54 2022 ] Top5: 93.98% +[ Thu Sep 15 12:53:54 2022 ] Training epoch: 107 +[ Thu Sep 15 12:54:42 2022 ] Batch(61/123) done. Loss: 0.0039 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 12:55:27 2022 ] Eval epoch: 107 +[ Thu Sep 15 12:56:03 2022 ] Mean test loss of 296 batches: 1.2681522369384766. +[ Thu Sep 15 12:56:03 2022 ] Top1: 71.85% +[ Thu Sep 15 12:56:03 2022 ] Top5: 93.87% +[ Thu Sep 15 12:56:04 2022 ] Training epoch: 108 +[ Thu Sep 15 12:56:35 2022 ] Batch(38/123) done. Loss: 0.0063 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 12:57:36 2022 ] Eval epoch: 108 +[ Thu Sep 15 12:58:13 2022 ] Mean test loss of 296 batches: 1.2923107147216797. +[ Thu Sep 15 12:58:13 2022 ] Top1: 71.58% +[ Thu Sep 15 12:58:13 2022 ] Top5: 93.75% +[ Thu Sep 15 12:58:13 2022 ] Training epoch: 109 +[ Thu Sep 15 12:58:28 2022 ] Batch(15/123) done. Loss: 0.0079 lr:0.000100 network_time: 0.0300 +[ Thu Sep 15 12:59:41 2022 ] Batch(115/123) done. Loss: 0.0106 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 12:59:46 2022 ] Eval epoch: 109 +[ Thu Sep 15 13:00:23 2022 ] Mean test loss of 296 batches: 1.2762378454208374. +[ Thu Sep 15 13:00:23 2022 ] Top1: 71.64% +[ Thu Sep 15 13:00:23 2022 ] Top5: 93.77% +[ Thu Sep 15 13:00:23 2022 ] Training epoch: 110 +[ Thu Sep 15 13:01:34 2022 ] Batch(92/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0332 +[ Thu Sep 15 13:01:57 2022 ] Eval epoch: 110 +[ Thu Sep 15 13:02:33 2022 ] Mean test loss of 296 batches: 1.2684098482131958. +[ Thu Sep 15 13:02:33 2022 ] Top1: 71.82% +[ Thu Sep 15 13:02:33 2022 ] Top5: 93.94% +[ Thu Sep 15 13:02:33 2022 ] Training epoch: 111 +[ Thu Sep 15 13:03:27 2022 ] Batch(69/123) done. Loss: 0.0050 lr:0.000100 network_time: 0.0316 +[ Thu Sep 15 13:04:06 2022 ] Eval epoch: 111 +[ Thu Sep 15 13:04:43 2022 ] Mean test loss of 296 batches: 1.2667440176010132. +[ Thu Sep 15 13:04:43 2022 ] Top1: 71.96% +[ Thu Sep 15 13:04:43 2022 ] Top5: 93.89% +[ Thu Sep 15 13:04:44 2022 ] Training epoch: 112 +[ Thu Sep 15 13:05:21 2022 ] Batch(46/123) done. Loss: 0.0051 lr:0.000100 network_time: 0.0290 +[ Thu Sep 15 13:06:17 2022 ] Eval epoch: 112 +[ Thu Sep 15 13:06:53 2022 ] Mean test loss of 296 batches: 1.2608120441436768. +[ Thu Sep 15 13:06:53 2022 ] Top1: 71.88% +[ Thu Sep 15 13:06:53 2022 ] Top5: 93.94% +[ Thu Sep 15 13:06:53 2022 ] Training epoch: 113 +[ Thu Sep 15 13:07:14 2022 ] Batch(23/123) done. Loss: 0.0197 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 13:08:26 2022 ] Eval epoch: 113 +[ Thu Sep 15 13:09:03 2022 ] Mean test loss of 296 batches: 1.2844964265823364. +[ Thu Sep 15 13:09:03 2022 ] Top1: 71.66% +[ Thu Sep 15 13:09:03 2022 ] Top5: 93.80% +[ Thu Sep 15 13:09:04 2022 ] Training epoch: 114 +[ Thu Sep 15 13:09:07 2022 ] Batch(0/123) done. Loss: 0.0103 lr:0.000100 network_time: 0.0623 +[ Thu Sep 15 13:10:20 2022 ] Batch(100/123) done. Loss: 0.0071 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 13:10:37 2022 ] Eval epoch: 114 +[ Thu Sep 15 13:11:13 2022 ] Mean test loss of 296 batches: 1.2554805278778076. +[ Thu Sep 15 13:11:13 2022 ] Top1: 72.02% +[ Thu Sep 15 13:11:14 2022 ] Top5: 93.95% +[ Thu Sep 15 13:11:14 2022 ] Training epoch: 115 +[ Thu Sep 15 13:12:13 2022 ] Batch(77/123) done. Loss: 0.0040 lr:0.000100 network_time: 0.0268 +[ Thu Sep 15 13:12:46 2022 ] Eval epoch: 115 +[ Thu Sep 15 13:13:23 2022 ] Mean test loss of 296 batches: 1.2704654932022095. +[ Thu Sep 15 13:13:23 2022 ] Top1: 71.71% +[ Thu Sep 15 13:13:23 2022 ] Top5: 93.87% +[ Thu Sep 15 13:13:23 2022 ] Training epoch: 116 +[ Thu Sep 15 13:14:07 2022 ] Batch(54/123) done. Loss: 0.0034 lr:0.000100 network_time: 0.0271 +[ Thu Sep 15 13:14:56 2022 ] Eval epoch: 116 +[ Thu Sep 15 13:15:33 2022 ] Mean test loss of 296 batches: 1.2611382007598877. +[ Thu Sep 15 13:15:33 2022 ] Top1: 71.88% +[ Thu Sep 15 13:15:33 2022 ] Top5: 93.84% +[ Thu Sep 15 13:15:33 2022 ] Training epoch: 117 +[ Thu Sep 15 13:15:59 2022 ] Batch(31/123) done. Loss: 0.0107 lr:0.000100 network_time: 0.0280 +[ Thu Sep 15 13:17:06 2022 ] Eval epoch: 117 +[ Thu Sep 15 13:17:43 2022 ] Mean test loss of 296 batches: 1.2616456747055054. +[ Thu Sep 15 13:17:43 2022 ] Top1: 72.14% +[ Thu Sep 15 13:17:43 2022 ] Top5: 93.88% +[ Thu Sep 15 13:17:43 2022 ] Training epoch: 118 +[ Thu Sep 15 13:17:53 2022 ] Batch(8/123) done. Loss: 0.0129 lr:0.000100 network_time: 0.0354 +[ Thu Sep 15 13:19:06 2022 ] Batch(108/123) done. Loss: 0.0166 lr:0.000100 network_time: 0.0285 +[ Thu Sep 15 13:19:16 2022 ] Eval epoch: 118 +[ Thu Sep 15 13:19:53 2022 ] Mean test loss of 296 batches: 1.2641900777816772. +[ Thu Sep 15 13:19:53 2022 ] Top1: 72.16% +[ Thu Sep 15 13:19:53 2022 ] Top5: 93.96% +[ Thu Sep 15 13:19:53 2022 ] Training epoch: 119 +[ Thu Sep 15 13:20:59 2022 ] Batch(85/123) done. Loss: 0.0095 lr:0.000100 network_time: 0.0278 +[ Thu Sep 15 13:21:26 2022 ] Eval epoch: 119 +[ Thu Sep 15 13:22:04 2022 ] Mean test loss of 296 batches: 1.2731181383132935. +[ Thu Sep 15 13:22:04 2022 ] Top1: 71.87% +[ Thu Sep 15 13:22:04 2022 ] Top5: 93.74% +[ Thu Sep 15 13:22:04 2022 ] Training epoch: 120 +[ Thu Sep 15 13:22:53 2022 ] Batch(62/123) done. Loss: 0.0057 lr:0.000100 network_time: 0.0309 +[ Thu Sep 15 13:23:37 2022 ] Eval epoch: 120 +[ Thu Sep 15 13:24:13 2022 ] Mean test loss of 296 batches: 1.278167724609375. +[ Thu Sep 15 13:24:13 2022 ] Top1: 71.71% +[ Thu Sep 15 13:24:14 2022 ] Top5: 93.76% +[ Thu Sep 15 13:24:14 2022 ] Training epoch: 121 +[ Thu Sep 15 13:24:46 2022 ] Batch(39/123) done. Loss: 0.0078 lr:0.000100 network_time: 0.0307 +[ Thu Sep 15 13:25:47 2022 ] Eval epoch: 121 +[ Thu Sep 15 13:26:23 2022 ] Mean test loss of 296 batches: 1.2689993381500244. +[ Thu Sep 15 13:26:23 2022 ] Top1: 71.73% +[ Thu Sep 15 13:26:23 2022 ] Top5: 93.85% +[ Thu Sep 15 13:26:23 2022 ] Training epoch: 122 +[ Thu Sep 15 13:26:38 2022 ] Batch(16/123) done. Loss: 0.0293 lr:0.000100 network_time: 0.0300 +[ Thu Sep 15 13:27:51 2022 ] Batch(116/123) done. Loss: 0.0049 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 13:27:56 2022 ] Eval epoch: 122 +[ Thu Sep 15 13:28:32 2022 ] Mean test loss of 296 batches: 1.2402263879776. +[ Thu Sep 15 13:28:33 2022 ] Top1: 72.45% +[ Thu Sep 15 13:28:33 2022 ] Top5: 93.97% +[ Thu Sep 15 13:28:33 2022 ] Training epoch: 123 +[ Thu Sep 15 13:29:44 2022 ] Batch(93/123) done. Loss: 0.0121 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 13:30:06 2022 ] Eval epoch: 123 +[ Thu Sep 15 13:30:42 2022 ] Mean test loss of 296 batches: 1.2765028476715088. +[ Thu Sep 15 13:30:43 2022 ] Top1: 71.89% +[ Thu Sep 15 13:30:43 2022 ] Top5: 93.75% +[ Thu Sep 15 13:30:43 2022 ] Training epoch: 124 +[ Thu Sep 15 13:31:37 2022 ] Batch(70/123) done. Loss: 0.0124 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 13:32:15 2022 ] Eval epoch: 124 +[ Thu Sep 15 13:32:52 2022 ] Mean test loss of 296 batches: 1.2397212982177734. +[ Thu Sep 15 13:32:52 2022 ] Top1: 72.42% +[ Thu Sep 15 13:32:52 2022 ] Top5: 93.93% +[ Thu Sep 15 13:32:52 2022 ] Training epoch: 125 +[ Thu Sep 15 13:33:30 2022 ] Batch(47/123) done. Loss: 0.0028 lr:0.000100 network_time: 0.0262 +[ Thu Sep 15 13:34:25 2022 ] Eval epoch: 125 +[ Thu Sep 15 13:35:02 2022 ] Mean test loss of 296 batches: 1.248683214187622. +[ Thu Sep 15 13:35:02 2022 ] Top1: 72.09% +[ Thu Sep 15 13:35:02 2022 ] Top5: 93.90% +[ Thu Sep 15 13:35:02 2022 ] Training epoch: 126 +[ Thu Sep 15 13:35:23 2022 ] Batch(24/123) done. Loss: 0.0036 lr:0.000100 network_time: 0.0287 +[ Thu Sep 15 13:36:35 2022 ] Eval epoch: 126 +[ Thu Sep 15 13:37:12 2022 ] Mean test loss of 296 batches: 1.2729686498641968. +[ Thu Sep 15 13:37:12 2022 ] Top1: 72.02% +[ Thu Sep 15 13:37:12 2022 ] Top5: 93.87% +[ Thu Sep 15 13:37:12 2022 ] Training epoch: 127 +[ Thu Sep 15 13:37:16 2022 ] Batch(1/123) done. Loss: 0.0077 lr:0.000100 network_time: 0.0272 +[ Thu Sep 15 13:38:30 2022 ] Batch(101/123) done. Loss: 0.0106 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 13:38:45 2022 ] Eval epoch: 127 +[ Thu Sep 15 13:39:22 2022 ] Mean test loss of 296 batches: 1.2486753463745117. +[ Thu Sep 15 13:39:22 2022 ] Top1: 72.26% +[ Thu Sep 15 13:39:22 2022 ] Top5: 94.02% +[ Thu Sep 15 13:39:22 2022 ] Training epoch: 128 +[ Thu Sep 15 13:40:22 2022 ] Batch(78/123) done. Loss: 0.0103 lr:0.000100 network_time: 0.0295 +[ Thu Sep 15 13:40:55 2022 ] Eval epoch: 128 +[ Thu Sep 15 13:41:31 2022 ] Mean test loss of 296 batches: 1.2673088312149048. +[ Thu Sep 15 13:41:31 2022 ] Top1: 72.02% +[ Thu Sep 15 13:41:32 2022 ] Top5: 93.98% +[ Thu Sep 15 13:41:32 2022 ] Training epoch: 129 +[ Thu Sep 15 13:42:15 2022 ] Batch(55/123) done. Loss: 0.0062 lr:0.000100 network_time: 0.0306 +[ Thu Sep 15 13:43:05 2022 ] Eval epoch: 129 +[ Thu Sep 15 13:43:41 2022 ] Mean test loss of 296 batches: 1.247158169746399. +[ Thu Sep 15 13:43:41 2022 ] Top1: 72.13% +[ Thu Sep 15 13:43:41 2022 ] Top5: 94.06% +[ Thu Sep 15 13:43:41 2022 ] Training epoch: 130 +[ Thu Sep 15 13:44:08 2022 ] Batch(32/123) done. Loss: 0.0134 lr:0.000100 network_time: 0.0270 +[ Thu Sep 15 13:45:14 2022 ] Eval epoch: 130 +[ Thu Sep 15 13:45:51 2022 ] Mean test loss of 296 batches: 1.2655749320983887. +[ Thu Sep 15 13:45:51 2022 ] Top1: 71.96% +[ Thu Sep 15 13:45:51 2022 ] Top5: 93.81% +[ Thu Sep 15 13:45:51 2022 ] Training epoch: 131 +[ Thu Sep 15 13:46:01 2022 ] Batch(9/123) done. Loss: 0.0024 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 13:47:14 2022 ] Batch(109/123) done. Loss: 0.0065 lr:0.000100 network_time: 0.0260 +[ Thu Sep 15 13:47:24 2022 ] Eval epoch: 131 +[ Thu Sep 15 13:48:00 2022 ] Mean test loss of 296 batches: 1.228884220123291. +[ Thu Sep 15 13:48:00 2022 ] Top1: 72.56% +[ Thu Sep 15 13:48:00 2022 ] Top5: 93.99% +[ Thu Sep 15 13:48:00 2022 ] Training epoch: 132 +[ Thu Sep 15 13:49:07 2022 ] Batch(86/123) done. Loss: 0.0105 lr:0.000100 network_time: 0.0328 +[ Thu Sep 15 13:49:33 2022 ] Eval epoch: 132 +[ Thu Sep 15 13:50:10 2022 ] Mean test loss of 296 batches: 1.2533588409423828. +[ Thu Sep 15 13:50:10 2022 ] Top1: 72.03% +[ Thu Sep 15 13:50:10 2022 ] Top5: 93.96% +[ Thu Sep 15 13:50:10 2022 ] Training epoch: 133 +[ Thu Sep 15 13:51:00 2022 ] Batch(63/123) done. Loss: 0.0074 lr:0.000100 network_time: 0.0269 +[ Thu Sep 15 13:51:43 2022 ] Eval epoch: 133 +[ Thu Sep 15 13:52:20 2022 ] Mean test loss of 296 batches: 1.2805049419403076. +[ Thu Sep 15 13:52:20 2022 ] Top1: 71.68% +[ Thu Sep 15 13:52:20 2022 ] Top5: 93.84% +[ Thu Sep 15 13:52:20 2022 ] Training epoch: 134 +[ Thu Sep 15 13:52:53 2022 ] Batch(40/123) done. Loss: 0.0075 lr:0.000100 network_time: 0.0285 +[ Thu Sep 15 13:53:53 2022 ] Eval epoch: 134 +[ Thu Sep 15 13:54:30 2022 ] Mean test loss of 296 batches: 1.262468695640564. +[ Thu Sep 15 13:54:30 2022 ] Top1: 71.97% +[ Thu Sep 15 13:54:30 2022 ] Top5: 93.91% +[ Thu Sep 15 13:54:30 2022 ] Training epoch: 135 +[ Thu Sep 15 13:54:46 2022 ] Batch(17/123) done. Loss: 0.0028 lr:0.000100 network_time: 0.0349 +[ Thu Sep 15 13:55:59 2022 ] Batch(117/123) done. Loss: 0.0111 lr:0.000100 network_time: 0.0261 +[ Thu Sep 15 13:56:03 2022 ] Eval epoch: 135 +[ Thu Sep 15 13:56:40 2022 ] Mean test loss of 296 batches: 1.234968900680542. +[ Thu Sep 15 13:56:40 2022 ] Top1: 72.27% +[ Thu Sep 15 13:56:40 2022 ] Top5: 94.14% +[ Thu Sep 15 13:56:40 2022 ] Training epoch: 136 +[ Thu Sep 15 13:57:52 2022 ] Batch(94/123) done. Loss: 0.0138 lr:0.000100 network_time: 0.0332 +[ Thu Sep 15 13:58:13 2022 ] Eval epoch: 136 +[ Thu Sep 15 13:58:50 2022 ] Mean test loss of 296 batches: 1.2445356845855713. +[ Thu Sep 15 13:58:50 2022 ] Top1: 72.50% +[ Thu Sep 15 13:58:50 2022 ] Top5: 94.01% +[ Thu Sep 15 13:58:50 2022 ] Training epoch: 137 +[ Thu Sep 15 13:59:45 2022 ] Batch(71/123) done. Loss: 0.0117 lr:0.000100 network_time: 0.0275 +[ Thu Sep 15 14:00:23 2022 ] Eval epoch: 137 +[ Thu Sep 15 14:01:00 2022 ] Mean test loss of 296 batches: 1.2410402297973633. +[ Thu Sep 15 14:01:00 2022 ] Top1: 72.16% +[ Thu Sep 15 14:01:00 2022 ] Top5: 94.05% +[ Thu Sep 15 14:01:00 2022 ] Training epoch: 138 +[ Thu Sep 15 14:01:39 2022 ] Batch(48/123) done. Loss: 0.0055 lr:0.000100 network_time: 0.0286 +[ Thu Sep 15 14:02:33 2022 ] Eval epoch: 138 +[ Thu Sep 15 14:03:10 2022 ] Mean test loss of 296 batches: 1.2681639194488525. +[ Thu Sep 15 14:03:10 2022 ] Top1: 71.83% +[ Thu Sep 15 14:03:10 2022 ] Top5: 93.81% +[ Thu Sep 15 14:03:10 2022 ] Training epoch: 139 +[ Thu Sep 15 14:03:31 2022 ] Batch(25/123) done. Loss: 0.0064 lr:0.000100 network_time: 0.0273 +[ Thu Sep 15 14:04:43 2022 ] Eval epoch: 139 +[ Thu Sep 15 14:05:19 2022 ] Mean test loss of 296 batches: 1.2698543071746826. +[ Thu Sep 15 14:05:19 2022 ] Top1: 71.85% +[ Thu Sep 15 14:05:19 2022 ] Top5: 93.88% +[ Thu Sep 15 14:05:19 2022 ] Training epoch: 140 +[ Thu Sep 15 14:05:24 2022 ] Batch(2/123) done. Loss: 0.0064 lr:0.000100 network_time: 0.0292 +[ Thu Sep 15 14:06:37 2022 ] Batch(102/123) done. Loss: 0.0059 lr:0.000100 network_time: 0.0279 +[ Thu Sep 15 14:06:52 2022 ] Eval epoch: 140 +[ Thu Sep 15 14:07:29 2022 ] Mean test loss of 296 batches: 1.273708462715149. +[ Thu Sep 15 14:07:29 2022 ] Top1: 71.73% +[ Thu Sep 15 14:07:29 2022 ] Top5: 93.88% diff --git a/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/shift_gcn.py b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/shift_gcn.py new file mode 100644 index 0000000000000000000000000000000000000000..0731e82fdb8bd0ae2e4c4ef4f29f7aab0c458bf4 --- /dev/null +++ b/ckpt/Others/Shift-GCN/ntu60_xview/ntu_ShiftGCN_joint_xview/shift_gcn.py @@ -0,0 +1,216 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math + +import sys +sys.path.append("./model/Temporal_shift/") + +from cuda.shift import Shift + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class Shift_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(Shift_tcn, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + self.bn = nn.BatchNorm2d(in_channels) + self.bn2 = nn.BatchNorm2d(in_channels) + bn_init(self.bn2, 1) + self.relu = nn.ReLU(inplace=True) + self.shift_in = Shift(channel=in_channels, stride=1, init_scale=1) + self.shift_out = Shift(channel=out_channels, stride=stride, init_scale=1) + + self.temporal_linear = nn.Conv2d(in_channels, out_channels, 1) + nn.init.kaiming_normal(self.temporal_linear.weight, mode='fan_out') + + def forward(self, x): + x = self.bn(x) + # shift1 + x = self.shift_in(x) + x = self.temporal_linear(x) + x = self.relu(x) + # shift2 + x = self.shift_out(x) + x = self.bn2(x) + return x + + +class Shift_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3): + super(Shift_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.Linear_weight = nn.Parameter(torch.zeros(in_channels, out_channels, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0,math.sqrt(1.0/out_channels)) + + self.Linear_bias = nn.Parameter(torch.zeros(1,1,out_channels,requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Linear_bias, 0) + + self.Feature_Mask = nn.Parameter(torch.ones(1,25,in_channels, requires_grad=True,device='cuda'),requires_grad=True) + nn.init.constant(self.Feature_Mask, 0) + + self.bn = nn.BatchNorm1d(25*out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + + index_array = np.empty(25*in_channels).astype(np.int) + for i in range(25): + for j in range(in_channels): + index_array[i*in_channels + j] = (i*in_channels + j + j*in_channels)%(in_channels*25) + self.shift_in = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + index_array = np.empty(25*out_channels).astype(np.int) + for i in range(25): + for j in range(out_channels): + index_array[i*out_channels + j] = (i*out_channels + j - j*out_channels)%(out_channels*25) + self.shift_out = nn.Parameter(torch.from_numpy(index_array),requires_grad=False) + + + def forward(self, x0): + n, c, t, v = x0.size() + x = x0.permute(0,2,3,1).contiguous() + + # shift1 + x = x.view(n*t,v*c) + x = torch.index_select(x, 1, self.shift_in) + x = x.view(n*t,v,c) + x = x * (torch.tanh(self.Feature_Mask)+1) + + x = torch.einsum('nwc,cd->nwd', (x, self.Linear_weight)).contiguous() # nt,v,c + x = x + self.Linear_bias + + # shift2 + x = x.view(n*t,-1) + x = torch.index_select(x, 1, self.shift_out) + x = self.bn(x) + x = x.view(n,t,v,self.out_channels).permute(0,3,1,2) # n,c,t,v + + x = x + self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, stride=1, residual=True): + super(TCN_GCN_unit, self).__init__() + self.gcn1 = Shift_gcn(in_channels, out_channels, A) + self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride) + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x): + x = self.tcn1(self.gcn1(x)) + self.residual(x) + return self.relu(x) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(3, 64, A, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A) + self.l3 = TCN_GCN_unit(64, 64, A) + self.l4 = TCN_GCN_unit(64, 64, A) + self.l5 = TCN_GCN_unit(64, 128, A, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A) + self.l7 = TCN_GCN_unit(128, 128, A) + self.l8 = TCN_GCN_unit(128, 256, A, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A) + self.l10 = TCN_GCN_unit(256, 256, A) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x): + N, C, T, V, M = x.size() + + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x) + x = self.l2(x) + x = self.l3(x) + x = self.l4(x) + x = self.l5(x) + x = self.l6(x) + x = self.l7(x) + x = self.l8(x) + x = self.l9(x) + x = self.l10(x) + + # N*M,C,T,V + c_new = x.size(1) + x = x.view(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x)