yuanxuewei commited on
Commit
1a2f2af
·
verified ·
1 Parent(s): e8d18ef

Add files using upload-large-folder tool

Browse files
checkpoints/steps_30000_pytorch_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a78d7dc2ac635e77afdf08c54ce3dd738364e47a087b6bc66d5c31c0c5ffc43
3
+ size 8935435793
config.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run_id": "0903_libero_spatial_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm",
3
+ "run_root_dir": "./playground/Checkpoints",
4
+ "seed": 42,
5
+ "trackers": [
6
+ "jsonl",
7
+ "wandb"
8
+ ],
9
+ "wandb_entity": "michaelyu-1101-fudanuniversity",
10
+ "wandb_project": "Internvla",
11
+ "is_debug": false,
12
+ "framework": {
13
+ "framework_py": "DinoQFormerACT",
14
+ "qwenvl": {
15
+ "base_vlm": "/mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000",
16
+ "attn_implementation": "flash_attention_2",
17
+ "vl_hidden_dim": 2048
18
+ },
19
+ "dino": {
20
+ "dino_backbone": "dinov2_vitl14"
21
+ },
22
+ "layer_qformer": {
23
+ "qformer_end_layer": 37,
24
+ "qformer_start_layer": 36,
25
+ "num_query_tokens": 64,
26
+ "grad_scale": 0.5
27
+ },
28
+ "action_model": {
29
+ "action_model_type": "DiT-B",
30
+ "action_hidden_dim": 768,
31
+ "action_dim": 7,
32
+ "use_ema": false,
33
+ "future_action_window_size": 7,
34
+ "past_action_window_size": 0,
35
+ "repeated_diffusion_steps": 8
36
+ },
37
+ "reduce_in_full_precision": true
38
+ },
39
+ "datasets": {
40
+ "vlm_data": {
41
+ "dataformat": "llava_json",
42
+ "dataset_use": "asv2_conversation_en,asv2_detailed_description_en,asv2_region_captioning_en,coco_internvl_longcap_en,coco_karpathy_train_567_en,coco_negative_gpt4o_en,coco_poetry_zh,coco_rem_en_zh,cocorem_exist_yorn_en,cocotextv2_en,cocotextv2_gpt4o_en,okvqa_en,refcoco_grounding_aug_en,refcoco_grounding_en,tallyqa_coco_en,toloka_grounding_aug_en,vqav2_en,vsr_en",
43
+ "eval_dataset": "aokvqa_cauldron_llava_format",
44
+ "data_flatten": false,
45
+ "base_interval": 2,
46
+ "max_pixels": 50176,
47
+ "min_pixels": 784,
48
+ "fix_image_size": [
49
+ 224,
50
+ 224
51
+ ],
52
+ "model_max_length": 1024,
53
+ "model_type": "qwen2.5vl",
54
+ "per_device_batch_size": 4
55
+ },
56
+ "vla_data": {
57
+ "dataset_py": "lerobot_libero",
58
+ "data_root_dir": "playground/Datasets/LEROBOT_LIBERO_DATA",
59
+ "data_mix": "libero_spatial",
60
+ "action_type": "delta_qpos",
61
+ "CoT_prompt": "Your task is {instruction}. To identify the key objects for your task. Locate their bounding boxes in [x1,y1,x2,y2] format.",
62
+ "CoT_answer": "bbox",
63
+ "default_image_resolution": [
64
+ 3,
65
+ 224,
66
+ 224
67
+ ],
68
+ "per_device_batch_size": 16,
69
+ "load_all_data_for_training": true,
70
+ "obs": [
71
+ "image_0"
72
+ ]
73
+ }
74
+ },
75
+ "trainer": {
76
+ "epochs": 100,
77
+ "max_train_steps": 100000,
78
+ "num_warmup_steps": 5000,
79
+ "save_interval": 10000,
80
+ "eval_interval": 1000,
81
+ "learning_rate": {
82
+ "base": 2.5e-05
83
+ },
84
+ "lr_scheduler_type": "cosine_with_min_lr",
85
+ "scheduler_specific_kwargs": {
86
+ "min_lr": 1e-06
87
+ },
88
+ "freeze_modules": "",
89
+ "loss_scale": {
90
+ "vla": 1.0,
91
+ "vlm": 0.1
92
+ },
93
+ "max_grad_norm": 1.0,
94
+ "warmup_ratio": 0.1,
95
+ "weight_decay": 0.0,
96
+ "logging_frequency": 10,
97
+ "gradient_clipping": 1.0,
98
+ "gradient_accumulation_steps": 1,
99
+ "optimizer": {
100
+ "name": "AdamW",
101
+ "betas": [
102
+ 0.9,
103
+ 0.95
104
+ ],
105
+ "eps": 1e-08,
106
+ "weight_decay": 1e-08
107
+ },
108
+ "is_resume": false,
109
+ "resume_epoch": null,
110
+ "resume_step": null,
111
+ "enable_gradient_checkpointing": true,
112
+ "enable_mixed_precision_training": true
113
+ },
114
+ "output_dir": "./playground/Checkpoints/0903_libero_spatial_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm"
115
+ }
config.yaml ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_id: 0903_libero_spatial_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
2
+ run_root_dir: ./playground/Checkpoints
3
+ seed: 42
4
+ trackers:
5
+ - jsonl
6
+ - wandb
7
+ wandb_entity: michaelyu-1101-fudanuniversity
8
+ wandb_project: Internvla
9
+ is_debug: false
10
+ framework:
11
+ framework_py: DinoQFormerACT
12
+ qwenvl:
13
+ base_vlm: /mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000
14
+ attn_implementation: flash_attention_2
15
+ vl_hidden_dim: 2048
16
+ dino:
17
+ dino_backbone: dinov2_vitl14
18
+ layer_qformer:
19
+ qformer_end_layer: 37
20
+ qformer_start_layer: 36
21
+ num_query_tokens: 64
22
+ grad_scale: 0.5
23
+ action_model:
24
+ action_model_type: DiT-B
25
+ action_hidden_dim: 768
26
+ action_dim: 7
27
+ use_ema: false
28
+ future_action_window_size: 7
29
+ past_action_window_size: 0
30
+ repeated_diffusion_steps: 8
31
+ reduce_in_full_precision: true
32
+ datasets:
33
+ vlm_data:
34
+ dataformat: llava_json
35
+ dataset_use: asv2_conversation_en,asv2_detailed_description_en,asv2_region_captioning_en,coco_internvl_longcap_en,coco_karpathy_train_567_en,coco_negative_gpt4o_en,coco_poetry_zh,coco_rem_en_zh,cocorem_exist_yorn_en,cocotextv2_en,cocotextv2_gpt4o_en,okvqa_en,refcoco_grounding_aug_en,refcoco_grounding_en,tallyqa_coco_en,toloka_grounding_aug_en,vqav2_en,vsr_en
36
+ eval_dataset: aokvqa_cauldron_llava_format
37
+ data_flatten: false
38
+ base_interval: 2
39
+ max_pixels: 50176
40
+ min_pixels: 784
41
+ fix_image_size:
42
+ - 224
43
+ - 224
44
+ model_max_length: 1024
45
+ model_type: qwen2.5vl
46
+ per_device_batch_size: 4
47
+ vla_data:
48
+ dataset_py: lerobot_libero
49
+ data_root_dir: playground/Datasets/LEROBOT_LIBERO_DATA
50
+ data_mix: libero_spatial
51
+ action_type: delta_qpos
52
+ CoT_prompt: Your task is {instruction}. To identify the key objects for your task.
53
+ Locate their bounding boxes in [x1,y1,x2,y2] format.
54
+ CoT_answer: bbox
55
+ default_image_resolution:
56
+ - 3
57
+ - 224
58
+ - 224
59
+ per_device_batch_size: 16
60
+ load_all_data_for_training: true
61
+ obs:
62
+ - image_0
63
+ trainer:
64
+ epochs: 100
65
+ max_train_steps: 100000
66
+ num_warmup_steps: 5000
67
+ save_interval: 10000
68
+ eval_interval: 1000
69
+ learning_rate:
70
+ base: 2.5e-05
71
+ lr_scheduler_type: cosine_with_min_lr
72
+ scheduler_specific_kwargs:
73
+ min_lr: 1.0e-06
74
+ freeze_modules: ''
75
+ loss_scale:
76
+ vla: 1.0
77
+ vlm: 0.1
78
+ max_grad_norm: 1.0
79
+ warmup_ratio: 0.1
80
+ weight_decay: 0.0
81
+ logging_frequency: 10
82
+ gradient_clipping: 1.0
83
+ gradient_accumulation_steps: 1
84
+ optimizer:
85
+ name: AdamW
86
+ betas:
87
+ - 0.9
88
+ - 0.95
89
+ eps: 1.0e-08
90
+ weight_decay: 1.0e-08
91
+ is_resume: false
92
+ resume_epoch: null
93
+ resume_step: null
94
+ enable_gradient_checkpointing: true
95
+ enable_mixed_precision_training: true
96
+ output_dir: ./playground/Checkpoints/0903_libero_spatial_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
dataset_statistics.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "franka": {
3
+ "action": {
4
+ "mean": [
5
+ 0.15312479436397552,
6
+ 0.13707277178764343,
7
+ -0.15526802837848663,
8
+ -0.005176450591534376,
9
+ -0.01120874285697937,
10
+ -0.020194264128804207,
11
+ 0.4578818082809448
12
+ ],
13
+ "std": [
14
+ 0.41272708773612976,
15
+ 0.34724321961402893,
16
+ 0.50869220495224,
17
+ 0.037266165018081665,
18
+ 0.07244449853897095,
19
+ 0.05762382969260216,
20
+ 0.49827873706817627
21
+ ],
22
+ "max": [
23
+ 0.9375,
24
+ 0.9375,
25
+ 0.9375,
26
+ 0.1971428543329239,
27
+ 0.33642858266830444,
28
+ 0.375,
29
+ 1.0
30
+ ],
31
+ "min": [
32
+ -0.9375,
33
+ -0.9375,
34
+ -0.9375,
35
+ -0.1875,
36
+ -0.3675000071525574,
37
+ -0.36000001430511475,
38
+ 0.0
39
+ ],
40
+ "q01": [
41
+ -0.7454732114076613,
42
+ -0.6616071462631226,
43
+ -0.9375,
44
+ -0.1071428582072258,
45
+ -0.20678570866584778,
46
+ -0.1842857152223587,
47
+ 0.0
48
+ ],
49
+ "q99": [
50
+ 0.9375,
51
+ 0.8758928775787354,
52
+ 0.9321428537368774,
53
+ 0.1039285734295845,
54
+ 0.17678570747375488,
55
+ 0.14571428298950195,
56
+ 1.0
57
+ ],
58
+ "mask": [
59
+ true,
60
+ true,
61
+ true,
62
+ true,
63
+ true,
64
+ true,
65
+ false
66
+ ]
67
+ },
68
+ "state": {
69
+ "mean": [
70
+ -0.024462558329105377,
71
+ 0.106529600918293,
72
+ 1.0580483675003052,
73
+ 3.0628468990325928,
74
+ -0.10464039444923401,
75
+ 0.08307311683893204,
76
+ 0.01995457336306572,
77
+ -0.020162804052233696
78
+ ],
79
+ "std": [
80
+ 0.1101478561758995,
81
+ 0.13784688711166382,
82
+ 0.10442823916673634,
83
+ 0.10451053828000996,
84
+ 0.4112098217010498,
85
+ 0.2176690548658371,
86
+ 0.017260896041989326,
87
+ 0.0171116404235363
88
+ ],
89
+ "max": [
90
+ 0.1759040206670761,
91
+ 0.3904820382595062,
92
+ 1.3290715217590332,
93
+ 3.4566118717193604,
94
+ 1.2268599271774292,
95
+ 1.0429412126541138,
96
+ 0.041053611785173416,
97
+ 0.000775813648942858
98
+ ],
99
+ "min": [
100
+ -0.3095473051071167,
101
+ -0.29250794649124146,
102
+ 0.9095591306686401,
103
+ 2.497488260269165,
104
+ -1.8006486892700195,
105
+ -0.7207611203193665,
106
+ -0.0004703797458205372,
107
+ -0.041536275297403336
108
+ ],
109
+ "q01": [
110
+ -0.2727657300233841,
111
+ -0.23721413239836692,
112
+ 0.9160063165426254,
113
+ 2.77949666261673,
114
+ -1.3187511622905732,
115
+ -0.41989982962608335,
116
+ 0.001503719249740243,
117
+ -0.03989770736545324
118
+ ],
119
+ "q99": [
120
+ 0.13529365032911292,
121
+ 0.3629165390133857,
122
+ 1.2862326657772063,
123
+ 3.2829698753356933,
124
+ 0.9332760351896285,
125
+ 0.6325724506378171,
126
+ 0.039933966137468815,
127
+ -0.001671919699292631
128
+ ]
129
+ },
130
+ "num_transitions": 52970,
131
+ "num_trajectories": 432
132
+ }
133
+ }
run_lerobot_datasets.sh ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export HF_HOME=/mnt/petrelfs/share/yejinhui/Models/huggingface_cache
2
+
3
+ export NCCL_SOCKET_IFNAME=bond0
4
+ export NCCL_IB_HCA=mlx5_2,mlx5_3
5
+
6
+ # 用于check save 的时候的通信
7
+ export NCCL_BLOCKING_WAIT=1
8
+ export NCCL_ASYNC_ERROR_HANDLING=1
9
+ export NCCL_TIMEOUT=1000 # 超时时间设为 1 小时(单位:秒)
10
+
11
+ cd /mnt/petrelfs/yujunqiu/code/vla-baseline/llavavla-00hf1
12
+
13
+ # MODEL_PATH=/mnt/petrelfs/yejinhui/Projects/llavavla/playground/Pretrained_models/Qwen2.5-VL-3B-Instruct # must be a local path, due to simpler will run in other where
14
+ # data_root_dir=./playground/Datasets/OXE_LEROBOT_DATASET
15
+ run_root_dir=./playground/Checkpoints
16
+ task_name=libero_spatial
17
+ run_id=0903_${task_name}_augsteps_0_wo_flash_attention_wo_augsteps_two_view_action_chunk_16_pretrained_vlm
18
+
19
+
20
+ export WANDB_MODE=disabled
21
+
22
+ output_dir=${run_root_dir}/${run_id}
23
+ mkdir -p ${output_dir}
24
+ # mv this script to the output dir
25
+ cp $0 ${output_dir}/
26
+
27
+ # --pretrained_checkpoint ${MODEL_PATH} \
28
+ # export CUDA_VISIBLE_DEVICES=4,5,6,7
29
+
30
+ # --datasets.vla_data.data_mix libero_goal \
31
+ # --framework.framework_py qwenpi \
32
+
33
+ DEBUG=False
34
+ # DEBUG=True
35
+
36
+ if [ "$DEBUG" = True ]; then
37
+ num_processes=1
38
+ run_id=debug
39
+ else
40
+ num_processes=8
41
+ fi
42
+
43
+
44
+ accelerate launch \
45
+ --config_file scripts/run_scripts/deepspeed_zero2.yaml \
46
+ --num_processes ${num_processes} \
47
+ llavavla/training/train_qwenvla.py \
48
+ --config_yaml ./llavavla/config/lerobot_data/qwenvla_cotrain_libero.yaml \
49
+ --datasets.vla_data.per_device_batch_size 16 \
50
+ --datasets.vla_data.data_mix ${task_name} \
51
+ --framework.action_model.future_action_window_size 7 \
52
+ --trainer.max_train_steps 100_000 \
53
+ --trainer.save_interval 10_000 \
54
+ --run_root_dir ${run_root_dir} \
55
+ --run_id ${run_id} \
56
+ --wandb_project Internvla \
57
+ --wandb_entity michaelyu-1101-fudanuniversity \
58
+ --is_debug ${DEBUG} \
59
+ --framework.qwenvl.base_vlm /mnt/phwfile/efm_t/zhuyangkun_tmp_need_del/exp/exp_08_09/manip_sys2_qwen25_3b_onevision_molmo_a0all_refsp20/checkpoint-20000
60
+
61
+ # --framework.qwenvl.base_vlm ${MODEL_PATH} \
62
+ # --data_root_dir ${data_root_dir} \
63
+
64
+ # --is_debug True
summary.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"steps": 10000}
2
+ {"steps": 20000}
3
+ {"steps": 30000}