slz1 commited on
Commit
62efd8f
·
verified ·
1 Parent(s): 26aa799

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. condition/example/c2i/depth/48901.png +3 -0
  2. condition/example/t2i/multi_resolution/bird.jpg +3 -0
  3. condition/example/t2i/multi_resolution/car.jpg +3 -0
  4. condition/example/t2i/multigen/bird.jpg +3 -0
  5. condition/example/t2i/multigen/draw.png +3 -0
  6. condition/example/t2i/multigen/eye.png +3 -0
  7. condition/example/t2i/multigen/girl.jpg +3 -0
  8. condition/example/t2i/multigen/house.jpg +3 -0
  9. condition/example/t2i/multigen/landscape.jpg +3 -0
  10. condition/example/t2i/multigen/sofa.png +3 -0
  11. data/Captioned_ADE20K/data/train-00002-of-00023.parquet +3 -0
  12. data/Captioned_ADE20K/data/train-00004-of-00023.parquet +3 -0
  13. data/Captioned_ADE20K/data/train-00005-of-00023.parquet +3 -0
  14. data/Captioned_ADE20K/data/train-00006-of-00023.parquet +3 -0
  15. data/Captioned_ADE20K/data/train-00008-of-00023.parquet +3 -0
  16. data/Captioned_ADE20K/data/train-00013-of-00023.parquet +3 -0
  17. language/__pycache__/__init__.cpython-310.pyc +0 -0
  18. scripts/autoregressive/extract_file_ade.sh +11 -0
  19. scripts/autoregressive/extract_file_cocostuff.sh +7 -0
  20. scripts/autoregressive/extract_file_imagenet.sh +7 -0
  21. scripts/autoregressive/extract_file_multigen.sh +10 -0
  22. scripts/autoregressive/sample_c2i.sh +9 -0
  23. scripts/autoregressive/sample_t2i_coco.sh +11 -0
  24. scripts/autoregressive/sample_t2i_parti.sh +11 -0
  25. scripts/autoregressive/test_c2i.sh +8 -0
  26. scripts/autoregressive/test_t2i.sh +8 -0
  27. scripts/autoregressive/train_c2i.sh +7 -0
  28. scripts/autoregressive/train_c2i_canny.sh +8 -0
  29. scripts/autoregressive/train_c2i_depth.sh +8 -0
  30. scripts/autoregressive/train_c2i_fsdp.sh +7 -0
  31. scripts/autoregressive/train_t2i_canny.sh +14 -0
  32. scripts/autoregressive/train_t2i_depth.sh +15 -0
  33. scripts/autoregressive/train_t2i_depth_multiscale.sh +17 -0
  34. scripts/autoregressive/train_t2i_hed.sh +16 -0
  35. scripts/autoregressive/train_t2i_hed_multiscale.sh +17 -0
  36. scripts/autoregressive/train_t2i_lineart.sh +15 -0
  37. scripts/autoregressive/train_t2i_lineart_multiscale.sh +17 -0
  38. scripts/autoregressive/train_t2i_seg.sh +15 -0
  39. scripts/autoregressive/train_t2i_seg_multiscale.sh +17 -0
  40. scripts/autoregressive/train_t2i_stage1.sh +13 -0
  41. scripts/autoregressive/train_t2i_stage2.sh +14 -0
  42. scripts/language/extract_flan_t5_feat_laion_coco_stage1.sh +11 -0
  43. scripts/language/extract_flan_t5_feat_stage2.sh +10 -0
  44. scripts/language/extract_flan_t5_feat_trunc_stage2.sh +11 -0
  45. scripts/tokenizer/reconstruction_consistency_decoder.sh +8 -0
  46. scripts/tokenizer/reconstruction_vae.sh +8 -0
  47. scripts/tokenizer/reconstruction_vq.sh +8 -0
  48. scripts/tokenizer/reconstruction_vqgan.sh +8 -0
  49. scripts/tokenizer/train_vq.sh +7 -0
  50. tokenizer/consistencydecoder/cd_demo.py +57 -0
condition/example/c2i/depth/48901.png ADDED

Git LFS Details

  • SHA256: 533dc5ee8d30943d9315824f8302492d688a6b8d2076944bc733b4aa20c5fb0b
  • Pointer size: 129 Bytes
  • Size of remote file: 5.94 kB
condition/example/t2i/multi_resolution/bird.jpg ADDED

Git LFS Details

  • SHA256: 9dc8d56bb056b8061147bf5cf5eadd43743792b5688657baa5c01e729280d08b
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
condition/example/t2i/multi_resolution/car.jpg ADDED

Git LFS Details

  • SHA256: 8f9d9b5feb6a5e206b11195a1e034351a311ba4ae0b46227b872c713ffc1c36c
  • Pointer size: 130 Bytes
  • Size of remote file: 49.3 kB
condition/example/t2i/multigen/bird.jpg ADDED

Git LFS Details

  • SHA256: f49df68f93ef71f0a64eef3f1bcbaa28b79d146c9892e04b22b8ac28d05852f8
  • Pointer size: 130 Bytes
  • Size of remote file: 23.6 kB
condition/example/t2i/multigen/draw.png ADDED

Git LFS Details

  • SHA256: 7cdf134009dcbc0e50970b83b04f27b2e209ffc1319a09bd930a5e697657ca18
  • Pointer size: 131 Bytes
  • Size of remote file: 579 kB
condition/example/t2i/multigen/eye.png ADDED

Git LFS Details

  • SHA256: a20a11bc5dc6f4bb5f0b7ce165a61e3d3f2bfde34fb83507119db16e3a4ac383
  • Pointer size: 131 Bytes
  • Size of remote file: 426 kB
condition/example/t2i/multigen/girl.jpg ADDED

Git LFS Details

  • SHA256: 2499a4e5b0e762d7d8b544c38abbdda8fe6ff2561ba2d35cb90f17bf2e1365a1
  • Pointer size: 130 Bytes
  • Size of remote file: 30 kB
condition/example/t2i/multigen/house.jpg ADDED

Git LFS Details

  • SHA256: 53c4513c0e00fdbda20340be7083ddcbe0ab3041f5de3830bf332156a9073904
  • Pointer size: 130 Bytes
  • Size of remote file: 56.9 kB
condition/example/t2i/multigen/landscape.jpg ADDED

Git LFS Details

  • SHA256: 9835b2dbf4875238f766e2e131ef41f4a745c3bd70f23ed52e706c04a66f3e49
  • Pointer size: 130 Bytes
  • Size of remote file: 69.7 kB
condition/example/t2i/multigen/sofa.png ADDED

Git LFS Details

  • SHA256: bf99f782173809eaf945eebf97eb37c67e497a1f61b776592a7df961a199e7cb
  • Pointer size: 131 Bytes
  • Size of remote file: 429 kB
data/Captioned_ADE20K/data/train-00002-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7f700386860048a404ce32a7a363997574c1c8c89d74653f907577f313724e
3
+ size 266022402
data/Captioned_ADE20K/data/train-00004-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638e4a4417fbd5d0cc890b3ee7181ecd4b529756dcb96f002c5f17cacef3abe3
3
+ size 250129375
data/Captioned_ADE20K/data/train-00005-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0952be30f4a381e78eb668d6fbe7bca3b935cda1f9e0db9a990808fa11dc22c9
3
+ size 244873040
data/Captioned_ADE20K/data/train-00006-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ba3b1a46e4a860279418caffaa67c7fcea30a2b580b0b7222427b86ae8a546c
3
+ size 249662709
data/Captioned_ADE20K/data/train-00008-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e3eecc3fdf180df896458bfe8f51ba1b39f90d2272e9948cc6e0e2e426f2f20
3
+ size 286389435
data/Captioned_ADE20K/data/train-00013-of-00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d99acc7159ca18248c31627673c88589921b357c9c0a7bd9b367cb57960c11a7
3
+ size 260730749
language/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (159 Bytes). View file
 
scripts/autoregressive/extract_file_ade.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ # torchrun \
5
+ # --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ # --master_port=12336 \
7
+ # autoregressive/train/extract_file_ade.py "$@"
8
+ torchrun \
9
+ --nnodes=1 --nproc_per_node=2 --node_rank=0 \
10
+ --master_port=12336 \
11
+ autoregressive/train/extract_file_ade.py "$@"
scripts/autoregressive/extract_file_cocostuff.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12336 \
7
+ autoregressive/train/extract_file_cocostuff.py "$@"
scripts/autoregressive/extract_file_imagenet.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12336 \
7
+ autoregressive/train/extract_file_imagenet.py "$@"
scripts/autoregressive/extract_file_multigen.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+
3
+ # sleep 21600
4
+
5
+ set -x
6
+
7
+ torchrun \
8
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
9
+ --master_port=12336 \
10
+ autoregressive/train/extract_file_multigen.py "$@"
scripts/autoregressive/sample_c2i.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
6
+ --master_port=12346 \
7
+ autoregressive/sample/sample_c2i_ddp.py \
8
+ --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt \
9
+ "$@"
scripts/autoregressive/sample_t2i_coco.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12346 \
7
+ autoregressive/sample/sample_t2i_ddp.py \
8
+ --prompt-csv evaluations/t2i/coco_captions.csv \
9
+ --sample-dir samples_coco \
10
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
11
+ "$@"
scripts/autoregressive/sample_t2i_parti.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12347 \
7
+ autoregressive/sample/sample_t2i_ddp.py \
8
+ --prompt-csv evaluations/t2i/PartiPrompts.tsv \
9
+ --sample-dir samples_parti \
10
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
11
+ "$@"
scripts/autoregressive/test_c2i.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12349 \
7
+ autoregressive/test/test_c2i.py \
8
+ "$@"
scripts/autoregressive/test_t2i.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12349 \
7
+ autoregressive/test/test_t2i.py \
8
+ "$@"
scripts/autoregressive/train_c2i.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12345 \
7
+ autoregressive/train/train_c2i.py "$@"
scripts/autoregressive/train_c2i_canny.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 43200
3
+ set -x
4
+
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12345 \
8
+ autoregressive/train/train_c2i_canny.py "$@"
scripts/autoregressive/train_c2i_depth.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 39600
3
+ set -x
4
+
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12345 \
8
+ autoregressive/train/train_c2i_depth.py "$@"
scripts/autoregressive/train_c2i_fsdp.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ autoregressive/train/train_c2i_fsdp.py "$@"
scripts/autoregressive/train_t2i_canny.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+ export TOKENIZERS_PARALLELISM=true
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12346 \
7
+ autoregressive/train/train_t2i_canny.py \
8
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
9
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
10
+ --dataset t2i_control \
11
+ --image-size 512 \
12
+ --cloud-save-path output \
13
+ --code-path data/MultiGen20M/train \
14
+ "$@"
scripts/autoregressive/train_t2i_depth.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_depth.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 512 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ "$@"
scripts/autoregressive/train_t2i_depth_multiscale.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_depth_multiscale.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 768 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ --no-compile
16
+ # --adapter-size base \
17
+ "$@"
scripts/autoregressive/train_t2i_hed.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_hed.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 512 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ --adapter-size base \
16
+ "$@"
scripts/autoregressive/train_t2i_hed_multiscale.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_hed_multiscale.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 768 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ --no-compile
16
+ # --adapter-size base \
17
+ "$@"
scripts/autoregressive/train_t2i_lineart.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_lineart.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 512 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ "$@"
scripts/autoregressive/train_t2i_lineart_multiscale.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ # sleep 36000
3
+ set -x
4
+ export TOKENIZERS_PARALLELISM=true
5
+ torchrun \
6
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
7
+ --master_addr=127.0.0.1 --master_port=12346 \
8
+ autoregressive/train/train_t2i_lineart_multiscale.py \
9
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
10
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
11
+ --dataset t2i_control \
12
+ --image-size 768 \
13
+ --cloud-save-path output \
14
+ --code-path data/MultiGen20M/train \
15
+ --no-compile
16
+ # --adapter-size base \
17
+ "$@"
scripts/autoregressive/train_t2i_seg.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+ export TOKENIZERS_PARALLELISM=true
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12346 \
7
+ autoregressive/train/train_t2i_seg.py \
8
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
9
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
10
+ --dataset t2i_control \
11
+ --image-size 512 \
12
+ --cloud-save-path output \
13
+ --code-path data/Captioned_ADE20K/train \
14
+ --code-path2 data/Captioned_COCOStuff/train \
15
+ "$@"
scripts/autoregressive/train_t2i_seg_multiscale.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+ export TOKENIZERS_PARALLELISM=true
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12346 \
7
+ autoregressive/train/train_t2i_seg_multiscale.py \
8
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
9
+ --gpt-ckpt checkpoints/llamagen/t2i_XL_stage2_512.pt \
10
+ --data-path /path/to/high_aesthetic_10M \
11
+ --dataset t2i_control \
12
+ --image-size 512 \
13
+ --cloud-save-path output \
14
+ --code-path data/Captioned_COCOStuff/train \
15
+ --code-path2 data/Captioned_ADE20K/train \
16
+ --no-compile
17
+ "$@"
scripts/autoregressive/train_t2i_stage1.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ autoregressive/train/train_t2i.py \
8
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
9
+ --data-path /path/to/laion_coco50M \
10
+ --t5-feat-path /path/to/laion_coco50M_flan_t5_xl \
11
+ --dataset t2i \
12
+ --image-size 256 \
13
+ "$@"
scripts/autoregressive/train_t2i_stage2.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ autoregressive/train/train_t2i.py \
8
+ --vq-ckpt ./pretrained_models/vq_ds16_t2i.pt \
9
+ --data-path /path/to/high_aesthetic_10M \
10
+ --t5-feat-path /path/to/high_aesthetic_10M_flan_t5_xl \
11
+ --short-t5-feat-path /path/to/high_aesthetic_10M_trunc_flan_t5_xl \
12
+ --dataset t2i \
13
+ --image-size 512 \
14
+ "$@"
scripts/language/extract_flan_t5_feat_laion_coco_stage1.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/laion_coco50M \
9
+ --t5-path /path/to/laion_coco50M_flan_t5_xl \
10
+ --caption-key blip \
11
+ "$@"
scripts/language/extract_flan_t5_feat_stage2.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/high_aesthetic_10M \
9
+ --t5-path /path/to/high_aesthetic_10M_flan_t5_xl \
10
+ "$@"
scripts/language/extract_flan_t5_feat_trunc_stage2.sh ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12337 \
7
+ language/extract_t5_feature.py \
8
+ --data-path /path/to/high_aesthetic_10M \
9
+ --t5-path /path/to/high_aesthetic_10M_trunc_flan_t5_xl \
10
+ --trunc-caption \
11
+ "$@"
scripts/tokenizer/reconstruction_consistency_decoder.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/consistencydecoder/reconstruction_cd_ddp.py \
8
+ "$@"
scripts/tokenizer/reconstruction_vae.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/vae/reconstruction_vae_ddp.py \
8
+ "$@"
scripts/tokenizer/reconstruction_vq.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=1 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/tokenizer_image/reconstruction_vq_ddp.py \
8
+ "$@"
scripts/tokenizer/reconstruction_vqgan.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=8 --node_rank=0 \
6
+ --master_port=12344 \
7
+ tokenizer/vqgan/reconstruction_vqgan_ddp.py \
8
+ "$@"
scripts/tokenizer/train_vq.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12345 \
7
+ tokenizer/tokenizer_image/vq_train.py "$@"
tokenizer/consistencydecoder/cd_demo.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from PIL import Image
6
+ from diffusers import ConsistencyDecoderVAE
7
+
8
+
9
+ def main(args):
10
+ # Setup PyTorch:
11
+ torch.manual_seed(args.seed)
12
+ torch.set_grad_enabled(False)
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+
15
+ # create and load model
16
+ vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16).to(device)
17
+
18
+ # load image
19
+ img_path = args.image_path
20
+ out_path = args.image_path.replace('.jpg', '_cd.jpg').replace('.jpeg', '_cd.jpeg').replace('.png', '_cd.png')
21
+ input_size = args.image_size
22
+ img = Image.open(img_path).convert("RGB")
23
+
24
+ # preprocess
25
+ size_org = img.size
26
+ img = img.resize((input_size, input_size))
27
+ img = np.array(img) / 255.
28
+ x = 2.0 * img - 1.0 # x value is between [-1, 1]
29
+ x = torch.tensor(x)
30
+ x = x.unsqueeze(dim=0)
31
+ x = torch.einsum('nhwc->nchw', x)
32
+ x_input = x.half().to(device)
33
+
34
+ # inference
35
+ with torch.no_grad():
36
+ # Map input images to latent space + normalize latents:
37
+ latent = vae.encode(x_input).latent_dist.sample().mul_(0.18215)
38
+ # reconstruct:
39
+ output = vae.decode(latent / 0.18215).sample # output value is between [-1, 1]
40
+
41
+ # postprocess
42
+ output = F.interpolate(output, size=[size_org[1], size_org[0]], mode='bilinear').permute(0, 2, 3, 1)[0]
43
+ sample = torch.clamp(127.5 * output + 128.0, 0, 255).to("cpu", dtype=torch.uint8).numpy()
44
+
45
+ # save
46
+ Image.fromarray(sample).save(out_path)
47
+ print("Reconstructed image is saved to {}".format(out_path))
48
+
49
+
50
+
51
+ if __name__ == "__main__":
52
+ parser = argparse.ArgumentParser()
53
+ parser.add_argument("--image-path", type=str, default="assets/example.jpg")
54
+ parser.add_argument("--image-size", type=int, choices=[256, 512, 1024], default=512)
55
+ parser.add_argument("--seed", type=int, default=0)
56
+ args = parser.parse_args()
57
+ main(args)