firework8 commited on
Commit
fd62532
·
verified ·
1 Parent(s): 815ebee

Upload 41 files

Browse files
Files changed (41) hide show
  1. ntu60_xsub/b_2/20231207_201701.log +0 -0
  2. ntu60_xsub/b_2/20231207_201701.log.json +0 -0
  3. ntu60_xsub/b_2/b_2.py +95 -0
  4. ntu60_xsub/b_2/best_pred.pkl +3 -0
  5. ntu60_xsub/b_2/best_top1_acc_epoch_150.pth +3 -0
  6. ntu60_xsub/bm/20231223_130330.log +0 -0
  7. ntu60_xsub/bm/20231223_130330.log.json +0 -0
  8. ntu60_xsub/bm/best_pred.pkl +3 -0
  9. ntu60_xsub/bm/best_top1_acc_epoch_150.pth +3 -0
  10. ntu60_xsub/bm/bm.py +95 -0
  11. ntu60_xsub/j_1/20231220_213426.log +0 -0
  12. ntu60_xsub/j_1/20231220_213426.log.json +0 -0
  13. ntu60_xsub/j_1/best_pred.pkl +3 -0
  14. ntu60_xsub/j_1/best_top1_acc_epoch_149.pth +3 -0
  15. ntu60_xsub/j_1/j_1.py +93 -0
  16. ntu60_xsub/j_2/20231226_092400.log +0 -0
  17. ntu60_xsub/j_2/20231226_092400.log.json +0 -0
  18. ntu60_xsub/j_2/best_pred.pkl +3 -0
  19. ntu60_xsub/j_2/best_top1_acc_epoch_142.pth +3 -0
  20. ntu60_xsub/j_2/j_2.py +93 -0
  21. ntu60_xsub/jm/20231223_130342.log +0 -0
  22. ntu60_xsub/jm/20231223_130342.log.json +0 -0
  23. ntu60_xsub/jm/best_pred.pkl +3 -0
  24. ntu60_xsub/jm/best_top1_acc_epoch_147.pth +3 -0
  25. ntu60_xsub/jm/jm.py +93 -0
  26. ntu60_xsub/k_1/20231226_092345.log +0 -0
  27. ntu60_xsub/k_1/20231226_092345.log.json +0 -0
  28. ntu60_xsub/k_1/best_pred.pkl +3 -0
  29. ntu60_xsub/k_1/best_top1_acc_epoch_150.pth +3 -0
  30. ntu60_xsub/k_1/k_1.py +95 -0
  31. ntu60_xsub/k_2/20231223_130318.log +0 -0
  32. ntu60_xsub/k_2/20231223_130318.log.json +0 -0
  33. ntu60_xsub/k_2/best_pred.pkl +3 -0
  34. ntu60_xsub/k_2/best_top1_acc_epoch_150.pth +3 -0
  35. ntu60_xsub/k_2/k_2.py +95 -0
  36. ntu60_xsub/km/20231223_130410.log +0 -0
  37. ntu60_xsub/km/20231223_130410.log.json +0 -0
  38. ntu60_xsub/km/best_pred.pkl +3 -0
  39. ntu60_xsub/km/best_top1_acc_epoch_147.pth +3 -0
  40. ntu60_xsub/km/km.py +95 -0
  41. ntu60_xsub/ntu60_xsub_ensemble.py +65 -0
ntu60_xsub/b_2/20231207_201701.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/b_2/20231207_201701.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/b_2/b_2.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'b'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/b_2'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_1',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_3', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
22
+ dict(type='GenSkeFeat', feats=['b']),
23
+ dict(type='UniformSampleDecode', clip_len=100),
24
+ dict(type='FormatGCNInput'),
25
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
26
+ dict(type='ToTensor', keys=['keypoint'])
27
+ ]
28
+ val_pipeline = [
29
+ dict(type='PreNormalize3D', align_spine=False),
30
+ dict(type='GenSkeFeat', feats=['b']),
31
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
32
+ dict(type='FormatGCNInput'),
33
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
34
+ dict(type='ToTensor', keys=['keypoint'])
35
+ ]
36
+ test_pipeline = [
37
+ dict(type='PreNormalize3D', align_spine=False),
38
+ dict(type='GenSkeFeat', feats=['b']),
39
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
40
+ dict(type='FormatGCNInput'),
41
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
42
+ dict(type='ToTensor', keys=['keypoint'])
43
+ ]
44
+ data = dict(
45
+ videos_per_gpu=16,
46
+ workers_per_gpu=4,
47
+ test_dataloader=dict(videos_per_gpu=1),
48
+ train=dict(
49
+ type='PoseDataset',
50
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
51
+ pipeline=[
52
+ dict(type='PreNormalize3D', align_spine=False),
53
+ dict(type='RandomRot', theta=0.2),
54
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
55
+ dict(type='GenSkeFeat', feats=['b']),
56
+ dict(type='UniformSampleDecode', clip_len=100),
57
+ dict(type='FormatGCNInput'),
58
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
59
+ dict(type='ToTensor', keys=['keypoint'])
60
+ ],
61
+ split='xsub_train'),
62
+ val=dict(
63
+ type='PoseDataset',
64
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
65
+ pipeline=[
66
+ dict(type='PreNormalize3D', align_spine=False),
67
+ dict(type='GenSkeFeat', feats=['b']),
68
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
69
+ dict(type='FormatGCNInput'),
70
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
71
+ dict(type='ToTensor', keys=['keypoint'])
72
+ ],
73
+ split='xsub_val'),
74
+ test=dict(
75
+ type='PoseDataset',
76
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
77
+ pipeline=[
78
+ dict(type='PreNormalize3D', align_spine=False),
79
+ dict(type='GenSkeFeat', feats=['b']),
80
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
81
+ dict(type='FormatGCNInput'),
82
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
83
+ dict(type='ToTensor', keys=['keypoint'])
84
+ ],
85
+ split='xsub_val'))
86
+ optimizer = dict(
87
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
88
+ optimizer_config = dict(grad_clip=None)
89
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
90
+ total_epochs = 150
91
+ checkpoint_config = dict(interval=1)
92
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
93
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
94
+ dist_params = dict(backend='nccl')
95
+ gpu_ids = range(0, 1)
ntu60_xsub/b_2/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e189bfbc993f4600347b97a7d4f7f14ebc9e3048bd0035b585ddb728d923e872
3
+ size 6636301
ntu60_xsub/b_2/best_top1_acc_epoch_150.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057c434c0ae26e9045af62905e81531be88b16d3d1758bd629e1acab06137cba
3
+ size 33472486
ntu60_xsub/bm/20231223_130330.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/bm/20231223_130330.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/bm/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e56d00a9b50bd241bb59fed4cd1138b5dde74361abb93d631c2d8f8eec554a
3
+ size 6638098
ntu60_xsub/bm/best_top1_acc_epoch_150.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db5a0292c4aca801c35698f8451184939e22d95ab7529ee99d43c119db125c5
3
+ size 33472486
ntu60_xsub/bm/bm.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'bm'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/bm'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_1',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_3', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
22
+ dict(type='GenSkeFeat', feats=['bm']),
23
+ dict(type='UniformSampleDecode', clip_len=100),
24
+ dict(type='FormatGCNInput'),
25
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
26
+ dict(type='ToTensor', keys=['keypoint'])
27
+ ]
28
+ val_pipeline = [
29
+ dict(type='PreNormalize3D', align_spine=False),
30
+ dict(type='GenSkeFeat', feats=['bm']),
31
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
32
+ dict(type='FormatGCNInput'),
33
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
34
+ dict(type='ToTensor', keys=['keypoint'])
35
+ ]
36
+ test_pipeline = [
37
+ dict(type='PreNormalize3D', align_spine=False),
38
+ dict(type='GenSkeFeat', feats=['bm']),
39
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
40
+ dict(type='FormatGCNInput'),
41
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
42
+ dict(type='ToTensor', keys=['keypoint'])
43
+ ]
44
+ data = dict(
45
+ videos_per_gpu=16,
46
+ workers_per_gpu=4,
47
+ test_dataloader=dict(videos_per_gpu=1),
48
+ train=dict(
49
+ type='PoseDataset',
50
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
51
+ pipeline=[
52
+ dict(type='PreNormalize3D', align_spine=False),
53
+ dict(type='RandomRot', theta=0.2),
54
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
55
+ dict(type='GenSkeFeat', feats=['bm']),
56
+ dict(type='UniformSampleDecode', clip_len=100),
57
+ dict(type='FormatGCNInput'),
58
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
59
+ dict(type='ToTensor', keys=['keypoint'])
60
+ ],
61
+ split='xsub_train'),
62
+ val=dict(
63
+ type='PoseDataset',
64
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
65
+ pipeline=[
66
+ dict(type='PreNormalize3D', align_spine=False),
67
+ dict(type='GenSkeFeat', feats=['bm']),
68
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
69
+ dict(type='FormatGCNInput'),
70
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
71
+ dict(type='ToTensor', keys=['keypoint'])
72
+ ],
73
+ split='xsub_val'),
74
+ test=dict(
75
+ type='PoseDataset',
76
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
77
+ pipeline=[
78
+ dict(type='PreNormalize3D', align_spine=False),
79
+ dict(type='GenSkeFeat', feats=['bm']),
80
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
81
+ dict(type='FormatGCNInput'),
82
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
83
+ dict(type='ToTensor', keys=['keypoint'])
84
+ ],
85
+ split='xsub_val'))
86
+ optimizer = dict(
87
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
88
+ optimizer_config = dict(grad_clip=None)
89
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
90
+ total_epochs = 150
91
+ checkpoint_config = dict(interval=1)
92
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
93
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
94
+ dist_params = dict(backend='nccl')
95
+ gpu_ids = range(0, 1)
ntu60_xsub/j_1/20231220_213426.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/j_1/20231220_213426.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/j_1/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03499b0dc1823e5c3612736cece778cb1e0d0ee4e4f9b07ce33295ad4f7db597
3
+ size 6639657
ntu60_xsub/j_1/best_top1_acc_epoch_149.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfb3cebf628c997921518f6ff897d8195a1e1310152c7ebd26613dff623dc5e
3
+ size 35315686
ntu60_xsub/j_1/j_1.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'j'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/j_1'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_2',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_2', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='GenSkeFeat', feats=['j']),
22
+ dict(type='UniformSampleDecode', clip_len=100),
23
+ dict(type='FormatGCNInput'),
24
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
25
+ dict(type='ToTensor', keys=['keypoint'])
26
+ ]
27
+ val_pipeline = [
28
+ dict(type='PreNormalize3D', align_spine=False),
29
+ dict(type='GenSkeFeat', feats=['j']),
30
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
31
+ dict(type='FormatGCNInput'),
32
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
33
+ dict(type='ToTensor', keys=['keypoint'])
34
+ ]
35
+ test_pipeline = [
36
+ dict(type='PreNormalize3D', align_spine=False),
37
+ dict(type='GenSkeFeat', feats=['j']),
38
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
39
+ dict(type='FormatGCNInput'),
40
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
41
+ dict(type='ToTensor', keys=['keypoint'])
42
+ ]
43
+ data = dict(
44
+ videos_per_gpu=16,
45
+ workers_per_gpu=4,
46
+ test_dataloader=dict(videos_per_gpu=1),
47
+ train=dict(
48
+ type='PoseDataset',
49
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
50
+ pipeline=[
51
+ dict(type='PreNormalize3D', align_spine=False),
52
+ dict(type='RandomRot', theta=0.2),
53
+ dict(type='GenSkeFeat', feats=['j']),
54
+ dict(type='UniformSampleDecode', clip_len=100),
55
+ dict(type='FormatGCNInput'),
56
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
57
+ dict(type='ToTensor', keys=['keypoint'])
58
+ ],
59
+ split='xsub_train'),
60
+ val=dict(
61
+ type='PoseDataset',
62
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
63
+ pipeline=[
64
+ dict(type='PreNormalize3D', align_spine=False),
65
+ dict(type='GenSkeFeat', feats=['j']),
66
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
67
+ dict(type='FormatGCNInput'),
68
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
69
+ dict(type='ToTensor', keys=['keypoint'])
70
+ ],
71
+ split='xsub_val'),
72
+ test=dict(
73
+ type='PoseDataset',
74
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
75
+ pipeline=[
76
+ dict(type='PreNormalize3D', align_spine=False),
77
+ dict(type='GenSkeFeat', feats=['j']),
78
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
79
+ dict(type='FormatGCNInput'),
80
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
81
+ dict(type='ToTensor', keys=['keypoint'])
82
+ ],
83
+ split='xsub_val'))
84
+ optimizer = dict(
85
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
86
+ optimizer_config = dict(grad_clip=None)
87
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
88
+ total_epochs = 150
89
+ checkpoint_config = dict(interval=1)
90
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
91
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
92
+ dist_params = dict(backend='nccl')
93
+ gpu_ids = range(0, 1)
ntu60_xsub/j_2/20231226_092400.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/j_2/20231226_092400.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/j_2/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31c61d3aa107d57ebbf2f47a01e4e6a3ea64d5f57da42194d502f399e5d5c443
3
+ size 6634538
ntu60_xsub/j_2/best_top1_acc_epoch_142.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00cb9b188ab8d6a0e66021762e1c3c400f04029b146ce55356bd9cdc9c5b6657
3
+ size 35315686
ntu60_xsub/j_2/j_2.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'j'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/j_2'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_2',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_2', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='GenSkeFeat', feats=['j']),
22
+ dict(type='UniformSampleDecode', clip_len=100),
23
+ dict(type='FormatGCNInput'),
24
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
25
+ dict(type='ToTensor', keys=['keypoint'])
26
+ ]
27
+ val_pipeline = [
28
+ dict(type='PreNormalize3D', align_spine=False),
29
+ dict(type='GenSkeFeat', feats=['j']),
30
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
31
+ dict(type='FormatGCNInput'),
32
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
33
+ dict(type='ToTensor', keys=['keypoint'])
34
+ ]
35
+ test_pipeline = [
36
+ dict(type='PreNormalize3D', align_spine=False),
37
+ dict(type='GenSkeFeat', feats=['j']),
38
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
39
+ dict(type='FormatGCNInput'),
40
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
41
+ dict(type='ToTensor', keys=['keypoint'])
42
+ ]
43
+ data = dict(
44
+ videos_per_gpu=16,
45
+ workers_per_gpu=4,
46
+ test_dataloader=dict(videos_per_gpu=1),
47
+ train=dict(
48
+ type='PoseDataset',
49
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
50
+ pipeline=[
51
+ dict(type='PreNormalize3D', align_spine=False),
52
+ dict(type='RandomRot', theta=0.2),
53
+ dict(type='GenSkeFeat', feats=['j']),
54
+ dict(type='UniformSampleDecode', clip_len=100),
55
+ dict(type='FormatGCNInput'),
56
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
57
+ dict(type='ToTensor', keys=['keypoint'])
58
+ ],
59
+ split='xsub_train'),
60
+ val=dict(
61
+ type='PoseDataset',
62
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
63
+ pipeline=[
64
+ dict(type='PreNormalize3D', align_spine=False),
65
+ dict(type='GenSkeFeat', feats=['j']),
66
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
67
+ dict(type='FormatGCNInput'),
68
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
69
+ dict(type='ToTensor', keys=['keypoint'])
70
+ ],
71
+ split='xsub_val'),
72
+ test=dict(
73
+ type='PoseDataset',
74
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
75
+ pipeline=[
76
+ dict(type='PreNormalize3D', align_spine=False),
77
+ dict(type='GenSkeFeat', feats=['j']),
78
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
79
+ dict(type='FormatGCNInput'),
80
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
81
+ dict(type='ToTensor', keys=['keypoint'])
82
+ ],
83
+ split='xsub_val'))
84
+ optimizer = dict(
85
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
86
+ optimizer_config = dict(grad_clip=None)
87
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
88
+ total_epochs = 150
89
+ checkpoint_config = dict(interval=1)
90
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
91
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
92
+ dist_params = dict(backend='nccl')
93
+ gpu_ids = range(0, 1)
ntu60_xsub/jm/20231223_130342.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/jm/20231223_130342.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/jm/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126dcb53123cdc8dd74798f1266a68f9d827e4793285241917935c16c6e07cca
3
+ size 6639033
ntu60_xsub/jm/best_top1_acc_epoch_147.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01a10133fc05e8f77519bb20279b4652a3a1c5643a59e3c06b2a95a5da326ff3
3
+ size 35315686
ntu60_xsub/jm/jm.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'jm'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/jm'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_2',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_2', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='GenSkeFeat', feats=['jm']),
22
+ dict(type='UniformSampleDecode', clip_len=100),
23
+ dict(type='FormatGCNInput'),
24
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
25
+ dict(type='ToTensor', keys=['keypoint'])
26
+ ]
27
+ val_pipeline = [
28
+ dict(type='PreNormalize3D', align_spine=False),
29
+ dict(type='GenSkeFeat', feats=['jm']),
30
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
31
+ dict(type='FormatGCNInput'),
32
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
33
+ dict(type='ToTensor', keys=['keypoint'])
34
+ ]
35
+ test_pipeline = [
36
+ dict(type='PreNormalize3D', align_spine=False),
37
+ dict(type='GenSkeFeat', feats=['jm']),
38
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
39
+ dict(type='FormatGCNInput'),
40
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
41
+ dict(type='ToTensor', keys=['keypoint'])
42
+ ]
43
+ data = dict(
44
+ videos_per_gpu=16,
45
+ workers_per_gpu=4,
46
+ test_dataloader=dict(videos_per_gpu=1),
47
+ train=dict(
48
+ type='PoseDataset',
49
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
50
+ pipeline=[
51
+ dict(type='PreNormalize3D', align_spine=False),
52
+ dict(type='RandomRot', theta=0.2),
53
+ dict(type='GenSkeFeat', feats=['jm']),
54
+ dict(type='UniformSampleDecode', clip_len=100),
55
+ dict(type='FormatGCNInput'),
56
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
57
+ dict(type='ToTensor', keys=['keypoint'])
58
+ ],
59
+ split='xsub_train'),
60
+ val=dict(
61
+ type='PoseDataset',
62
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
63
+ pipeline=[
64
+ dict(type='PreNormalize3D', align_spine=False),
65
+ dict(type='GenSkeFeat', feats=['jm']),
66
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
67
+ dict(type='FormatGCNInput'),
68
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
69
+ dict(type='ToTensor', keys=['keypoint'])
70
+ ],
71
+ split='xsub_val'),
72
+ test=dict(
73
+ type='PoseDataset',
74
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
75
+ pipeline=[
76
+ dict(type='PreNormalize3D', align_spine=False),
77
+ dict(type='GenSkeFeat', feats=['jm']),
78
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
79
+ dict(type='FormatGCNInput'),
80
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
81
+ dict(type='ToTensor', keys=['keypoint'])
82
+ ],
83
+ split='xsub_val'))
84
+ optimizer = dict(
85
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
86
+ optimizer_config = dict(grad_clip=None)
87
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
88
+ total_epochs = 150
89
+ checkpoint_config = dict(interval=1)
90
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
91
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
92
+ dist_params = dict(backend='nccl')
93
+ gpu_ids = range(0, 1)
ntu60_xsub/k_1/20231226_092345.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/k_1/20231226_092345.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/k_1/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d8866bcbda5640066221bb17ae67dfd76344717952c5e72b03e026671754491
3
+ size 6638194
ntu60_xsub/k_1/best_top1_acc_epoch_150.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acff47d51c01a74d1c612d9881c1c950f676eb15999a4548e1f4a7fc2684aaf4
3
+ size 33472486
ntu60_xsub/k_1/k_1.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'k'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/k_1'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_1',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_1', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
22
+ dict(type='GenSkeFeat', feats=['k']),
23
+ dict(type='UniformSampleDecode', clip_len=100),
24
+ dict(type='FormatGCNInput'),
25
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
26
+ dict(type='ToTensor', keys=['keypoint'])
27
+ ]
28
+ val_pipeline = [
29
+ dict(type='PreNormalize3D', align_spine=False),
30
+ dict(type='GenSkeFeat', feats=['k']),
31
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
32
+ dict(type='FormatGCNInput'),
33
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
34
+ dict(type='ToTensor', keys=['keypoint'])
35
+ ]
36
+ test_pipeline = [
37
+ dict(type='PreNormalize3D', align_spine=False),
38
+ dict(type='GenSkeFeat', feats=['k']),
39
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
40
+ dict(type='FormatGCNInput'),
41
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
42
+ dict(type='ToTensor', keys=['keypoint'])
43
+ ]
44
+ data = dict(
45
+ videos_per_gpu=16,
46
+ workers_per_gpu=4,
47
+ test_dataloader=dict(videos_per_gpu=1),
48
+ train=dict(
49
+ type='PoseDataset',
50
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
51
+ pipeline=[
52
+ dict(type='PreNormalize3D', align_spine=False),
53
+ dict(type='RandomRot', theta=0.2),
54
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
55
+ dict(type='GenSkeFeat', feats=['k']),
56
+ dict(type='UniformSampleDecode', clip_len=100),
57
+ dict(type='FormatGCNInput'),
58
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
59
+ dict(type='ToTensor', keys=['keypoint'])
60
+ ],
61
+ split='xsub_train'),
62
+ val=dict(
63
+ type='PoseDataset',
64
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
65
+ pipeline=[
66
+ dict(type='PreNormalize3D', align_spine=False),
67
+ dict(type='GenSkeFeat', feats=['k']),
68
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
69
+ dict(type='FormatGCNInput'),
70
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
71
+ dict(type='ToTensor', keys=['keypoint'])
72
+ ],
73
+ split='xsub_val'),
74
+ test=dict(
75
+ type='PoseDataset',
76
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
77
+ pipeline=[
78
+ dict(type='PreNormalize3D', align_spine=False),
79
+ dict(type='GenSkeFeat', feats=['k']),
80
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
81
+ dict(type='FormatGCNInput'),
82
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
83
+ dict(type='ToTensor', keys=['keypoint'])
84
+ ],
85
+ split='xsub_val'))
86
+ optimizer = dict(
87
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
88
+ optimizer_config = dict(grad_clip=None)
89
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
90
+ total_epochs = 150
91
+ checkpoint_config = dict(interval=1)
92
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
93
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
94
+ dist_params = dict(backend='nccl')
95
+ gpu_ids = range(0, 1)
ntu60_xsub/k_2/20231223_130318.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/k_2/20231223_130318.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/k_2/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357ba533d32664dbff563a10f48fac584b674bad805acfffbb1befc0007aa8c3
3
+ size 6639038
ntu60_xsub/k_2/best_top1_acc_epoch_150.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba8b38496a25924d347aec21e4e7006f6ed19c0e4abc06443ee791da8aebf5fb
3
+ size 33472486
ntu60_xsub/k_2/k_2.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'k'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/k_2'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_1',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_1', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
22
+ dict(type='GenSkeFeat', feats=['k']),
23
+ dict(type='UniformSampleDecode', clip_len=100),
24
+ dict(type='FormatGCNInput'),
25
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
26
+ dict(type='ToTensor', keys=['keypoint'])
27
+ ]
28
+ val_pipeline = [
29
+ dict(type='PreNormalize3D', align_spine=False),
30
+ dict(type='GenSkeFeat', feats=['k']),
31
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
32
+ dict(type='FormatGCNInput'),
33
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
34
+ dict(type='ToTensor', keys=['keypoint'])
35
+ ]
36
+ test_pipeline = [
37
+ dict(type='PreNormalize3D', align_spine=False),
38
+ dict(type='GenSkeFeat', feats=['k']),
39
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
40
+ dict(type='FormatGCNInput'),
41
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
42
+ dict(type='ToTensor', keys=['keypoint'])
43
+ ]
44
+ data = dict(
45
+ videos_per_gpu=16,
46
+ workers_per_gpu=4,
47
+ test_dataloader=dict(videos_per_gpu=1),
48
+ train=dict(
49
+ type='PoseDataset',
50
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
51
+ pipeline=[
52
+ dict(type='PreNormalize3D', align_spine=False),
53
+ dict(type='RandomRot', theta=0.2),
54
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
55
+ dict(type='GenSkeFeat', feats=['k']),
56
+ dict(type='UniformSampleDecode', clip_len=100),
57
+ dict(type='FormatGCNInput'),
58
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
59
+ dict(type='ToTensor', keys=['keypoint'])
60
+ ],
61
+ split='xsub_train'),
62
+ val=dict(
63
+ type='PoseDataset',
64
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
65
+ pipeline=[
66
+ dict(type='PreNormalize3D', align_spine=False),
67
+ dict(type='GenSkeFeat', feats=['k']),
68
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
69
+ dict(type='FormatGCNInput'),
70
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
71
+ dict(type='ToTensor', keys=['keypoint'])
72
+ ],
73
+ split='xsub_val'),
74
+ test=dict(
75
+ type='PoseDataset',
76
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
77
+ pipeline=[
78
+ dict(type='PreNormalize3D', align_spine=False),
79
+ dict(type='GenSkeFeat', feats=['k']),
80
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
81
+ dict(type='FormatGCNInput'),
82
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
83
+ dict(type='ToTensor', keys=['keypoint'])
84
+ ],
85
+ split='xsub_val'))
86
+ optimizer = dict(
87
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
88
+ optimizer_config = dict(grad_clip=None)
89
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
90
+ total_epochs = 150
91
+ checkpoint_config = dict(interval=1)
92
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
93
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
94
+ dist_params = dict(backend='nccl')
95
+ gpu_ids = range(0, 1)
ntu60_xsub/km/20231223_130410.log ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/km/20231223_130410.log.json ADDED
The diff for this file is too large to render. See raw diff
 
ntu60_xsub/km/best_pred.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c69dfb5cee3f4e88b2fba2d54b53a5036d08c730d85e7ece32e380de53fad80
3
+ size 6635960
ntu60_xsub/km/best_top1_acc_epoch_147.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e140e9b4e56001408804f205b9e65405d0deb738936f6d56ed8a58ad75971847
3
+ size 33472486
ntu60_xsub/km/km.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ modality = 'km'
2
+ graph = 'nturgb+d'
3
+ work_dir = './work_dirs/test_prototype/ntu60_xsub/km'
4
+ model = dict(
5
+ type='RecognizerGCN_7_1_1',
6
+ backbone=dict(
7
+ type='GCN_7_1_1',
8
+ tcn_ms_cfg=[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1'],
9
+ graph_cfg=dict(
10
+ layout='nturgb+d',
11
+ mode='random',
12
+ num_filter=8,
13
+ init_off=0.04,
14
+ init_std=0.02)),
15
+ cls_head=dict(type='SimpleHead_7_4_1', num_classes=60, in_channels=384))
16
+ dataset_type = 'PoseDataset'
17
+ ann_file = '/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl'
18
+ train_pipeline = [
19
+ dict(type='PreNormalize3D', align_spine=False),
20
+ dict(type='RandomRot', theta=0.2),
21
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
22
+ dict(type='GenSkeFeat', feats=['km']),
23
+ dict(type='UniformSampleDecode', clip_len=100),
24
+ dict(type='FormatGCNInput'),
25
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
26
+ dict(type='ToTensor', keys=['keypoint'])
27
+ ]
28
+ val_pipeline = [
29
+ dict(type='PreNormalize3D', align_spine=False),
30
+ dict(type='GenSkeFeat', feats=['km']),
31
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
32
+ dict(type='FormatGCNInput'),
33
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
34
+ dict(type='ToTensor', keys=['keypoint'])
35
+ ]
36
+ test_pipeline = [
37
+ dict(type='PreNormalize3D', align_spine=False),
38
+ dict(type='GenSkeFeat', feats=['km']),
39
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
40
+ dict(type='FormatGCNInput'),
41
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
42
+ dict(type='ToTensor', keys=['keypoint'])
43
+ ]
44
+ data = dict(
45
+ videos_per_gpu=16,
46
+ workers_per_gpu=4,
47
+ test_dataloader=dict(videos_per_gpu=1),
48
+ train=dict(
49
+ type='PoseDataset',
50
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
51
+ pipeline=[
52
+ dict(type='PreNormalize3D', align_spine=False),
53
+ dict(type='RandomRot', theta=0.2),
54
+ dict(type='Spatial_Flip', dataset='nturgb+d', p=0.5),
55
+ dict(type='GenSkeFeat', feats=['km']),
56
+ dict(type='UniformSampleDecode', clip_len=100),
57
+ dict(type='FormatGCNInput'),
58
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
59
+ dict(type='ToTensor', keys=['keypoint'])
60
+ ],
61
+ split='xsub_train'),
62
+ val=dict(
63
+ type='PoseDataset',
64
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
65
+ pipeline=[
66
+ dict(type='PreNormalize3D', align_spine=False),
67
+ dict(type='GenSkeFeat', feats=['km']),
68
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=1),
69
+ dict(type='FormatGCNInput'),
70
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
71
+ dict(type='ToTensor', keys=['keypoint'])
72
+ ],
73
+ split='xsub_val'),
74
+ test=dict(
75
+ type='PoseDataset',
76
+ ann_file='/data/lhd/pyskl_data/nturgbd/ntu60_3danno.pkl',
77
+ pipeline=[
78
+ dict(type='PreNormalize3D', align_spine=False),
79
+ dict(type='GenSkeFeat', feats=['km']),
80
+ dict(type='UniformSampleDecode', clip_len=100, num_clips=10),
81
+ dict(type='FormatGCNInput'),
82
+ dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
83
+ dict(type='ToTensor', keys=['keypoint'])
84
+ ],
85
+ split='xsub_val'))
86
+ optimizer = dict(
87
+ type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0005, nesterov=True)
88
+ optimizer_config = dict(grad_clip=None)
89
+ lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
90
+ total_epochs = 150
91
+ checkpoint_config = dict(interval=1)
92
+ evaluation = dict(interval=1, metrics=['top_k_accuracy'])
93
+ log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
94
+ dist_params = dict(backend='nccl')
95
+ gpu_ids = range(0, 1)
ntu60_xsub/ntu60_xsub_ensemble.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mmcv import load
2
+ import sys
3
+ # Note: please adjust the relative path according to the actual situation.
4
+ sys.path.append('../..')
5
+ from protogcn.smp import *
6
+
7
+
8
+ j_1 = load('j_1/best_pred.pkl')
9
+ b_1 = load('b_1/best_pred.pkl')
10
+ k_1 = load('k_1/best_pred.pkl')
11
+ j_2 = load('j_2/best_pred.pkl')
12
+ b_2 = load('b_2/best_pred.pkl')
13
+ k_2 = load('k_2/best_pred.pkl')
14
+ jm = load('jm/best_pred.pkl')
15
+ bm = load('bm/best_pred.pkl')
16
+ km = load('km/best_pred.pkl')
17
+ label = load_label('/data/nturgbd/ntu60_3danno.pkl', 'xsub_val')
18
+
19
+
20
+ """
21
+ ***************
22
+ InfoGCN v0:
23
+ j jm b bm k km
24
+ 2S: 92.96
25
+ 4S: 93.27
26
+ 6S: 93.51
27
+ ***************
28
+ """
29
+ print('InfoGCN v0:')
30
+ print('j jm b bm k km')
31
+ print('2S')
32
+ fused = comb([j_1, b_1], [1, 1])
33
+ print('Top-1', top1(fused, label))
34
+
35
+ print('4S')
36
+ fused = comb([j_1, b_1, jm, bm], [2, 2, 1, 1])
37
+ print('Top-1', top1(fused, label))
38
+
39
+ print('6S')
40
+ fused = comb([j_1, b_1, k_1, jm, bm, km], [2, 2, 2, 1, 1, 1])
41
+ print('Top-1', top1(fused, label))
42
+
43
+
44
+ """
45
+ ***************
46
+ InfoGCN v1:
47
+ j j b b k k
48
+ 2S: 92.96
49
+ 4S: 93.53
50
+ 6S: 93.81
51
+ ***************
52
+ """
53
+ print('InfoGCN v1:')
54
+ print('j j b b k k')
55
+ print('2S')
56
+ fused = comb([j_1, b_1], [1, 1])
57
+ print('Top-1', top1(fused, label))
58
+
59
+ print('4S')
60
+ fused = comb([j_1, b_1, j_2, b_2], [1, 1, 1, 1])
61
+ print('Top-1', top1(fused, label))
62
+
63
+ print('6S')
64
+ fused = comb([j_1, j_2, b_1, b_2, k_1, k_2], [3, 3, 4, 4, 1, 1])
65
+ print('Top-1', top1(fused, label))