slz1 commited on
Commit
0f586c0
·
verified ·
1 Parent(s): 33b03a3

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. autoregressive/train/train_t2i.py +268 -0
  2. autoregressive/train/train_t2i_canny.py +283 -0
  3. autoregressive/train/train_t2i_depth.py +286 -0
  4. autoregressive/train/train_t2i_depth_multiscale.py +357 -0
  5. autoregressive/train/train_t2i_hed.py +291 -0
  6. autoregressive/train/train_t2i_hed_multiscale.py +342 -0
  7. autoregressive/train/train_t2i_lineart.py +290 -0
  8. autoregressive/train/train_t2i_lineart_multiscale.py +343 -0
  9. autoregressive/train/train_t2i_seg.py +298 -0
  10. autoregressive/train/train_t2i_seg_multiscale.py +338 -0
  11. condition/README.md +23 -0
  12. condition/__pycache__/canny.cpython-310.pyc +0 -0
  13. condition/__pycache__/hed.cpython-310.pyc +0 -0
  14. condition/__pycache__/utils.cpython-310.pyc +0 -0
  15. condition/canny.py +25 -0
  16. condition/depth.py +47 -0
  17. condition/example/c2i/canny/15000.npy +3 -0
  18. condition/example/c2i/canny/15000.png +3 -0
  19. condition/example/c2i/canny/2312.npy +3 -0
  20. condition/example/c2i/canny/2312.png +3 -0
  21. condition/example/c2i/canny/48850.npy +3 -0
  22. condition/example/c2i/canny/48850.png +3 -0
  23. condition/example/c2i/canny/650.npy +3 -0
  24. condition/example/c2i/canny/650.png +3 -0
  25. condition/example/c2i/depth/101.npy +3 -0
  26. condition/example/c2i/depth/101.png +3 -0
  27. condition/example/c2i/depth/10601.npy +3 -0
  28. condition/example/c2i/depth/10601.png +3 -0
  29. condition/example/c2i/depth/48901.npy +3 -0
  30. condition/example/t2i/.DS_Store +0 -0
  31. condition/example/t2i/cocostuff/doll.png +3 -0
  32. condition/hed.py +117 -0
  33. condition/lineart.py +98 -0
  34. condition/midas/depth.py +221 -0
  35. condition/midas/midas/__init__.py +0 -0
  36. condition/midas/midas/base_model.py +16 -0
  37. condition/midas/midas/blocks.py +341 -0
  38. condition/midas/midas/dpt_depth.py +108 -0
  39. condition/midas/midas/midas_net.py +76 -0
  40. condition/midas/midas/midas_net_custom.py +128 -0
  41. condition/midas/midas/transforms.py +234 -0
  42. condition/midas/midas/vit.py +491 -0
  43. condition/utils.py +38 -0
  44. data/Captioned_ADE20K/.gitattributes +55 -0
  45. data/Captioned_ADE20K/.hfd/last_download_command +1 -0
  46. data/Captioned_ADE20K/.hfd/repo_metadata.json +1 -0
  47. data/Captioned_ADE20K/.hfd/wget_urls.txt +28 -0
  48. data/Captioned_ADE20K/README.md +33 -0
  49. data/Captioned_ADE20K/see_parquet.py +13 -0
  50. data/Captioned_ADE20K/train.jsonl +880 -0
autoregressive/train/train_t2i.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ torch.backends.cudnn.allow_tf32 = True
7
+ import torch.distributed as dist
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from torchvision import transforms
12
+ from glob import glob
13
+ import time
14
+ import argparse
15
+ import os
16
+
17
+ from utils.distributed import init_distributed_mode
18
+ from utils.logger import create_logger
19
+ from dataset.build import build_dataset
20
+ from dataset.augmentation import center_crop_arr
21
+ from autoregressive.train.train_c2i import creat_optimizer
22
+ from autoregressive.models.gpt import GPT_models
23
+ from tokenizer.tokenizer_image.vq_model import VQ_models
24
+
25
+
26
+
27
+ def main(args):
28
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
29
+
30
+ # Setup DDP:
31
+ init_distributed_mode(args)
32
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
33
+ rank = dist.get_rank()
34
+ device = rank % torch.cuda.device_count()
35
+ seed = args.global_seed * dist.get_world_size() + rank
36
+ torch.manual_seed(seed)
37
+ torch.cuda.set_device(device)
38
+
39
+ # Setup an experiment folder:
40
+ if rank == 0:
41
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
42
+ experiment_index = len(glob(f"{args.results_dir}/*"))
43
+ model_string_name = args.gpt_model.replace("/", "-")
44
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
45
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
46
+ os.makedirs(checkpoint_dir, exist_ok=True)
47
+ logger = create_logger(experiment_dir)
48
+ logger.info(f"Experiment directory created at {experiment_dir}")
49
+
50
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
51
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
52
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
53
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
54
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
55
+
56
+ else:
57
+ logger = create_logger(None)
58
+
59
+ # training args
60
+ logger.info(f"{args}")
61
+
62
+ # training env
63
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
64
+
65
+
66
+ # Setup model
67
+ latent_size = args.image_size // args.downsample_size
68
+ model = GPT_models[args.gpt_model](
69
+ vocab_size=args.vocab_size,
70
+ block_size=latent_size ** 2,
71
+ num_classes=args.num_classes,
72
+ cls_token_num=args.cls_token_num,
73
+ model_type=args.gpt_type,
74
+ resid_dropout_p=args.dropout_p,
75
+ ffn_dropout_p=args.dropout_p,
76
+ token_dropout_p=args.token_dropout_p,
77
+ ).to(device)
78
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
79
+
80
+ # Setup optimizer
81
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
82
+
83
+ # Setup data:
84
+ if args.dataset == 't2i': # create and load model
85
+ vq_model = VQ_models[args.vq_model](
86
+ codebook_size=args.codebook_size,
87
+ codebook_embed_dim=args.codebook_embed_dim)
88
+ vq_model.to(device)
89
+ vq_model.eval()
90
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
91
+ vq_model.load_state_dict(checkpoint["model"])
92
+ del checkpoint
93
+ transform = transforms.Compose([
94
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
95
+ transforms.ToTensor(),
96
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
97
+ ])
98
+
99
+ # 加载数据集,初始化数据集类对象
100
+ dataset = build_dataset(args, transform=transform)
101
+ sampler = DistributedSampler(
102
+ dataset,
103
+ num_replicas=dist.get_world_size(),
104
+ rank=rank,
105
+ shuffle=True,
106
+ seed=args.global_seed
107
+ )
108
+ # 数据集容器
109
+ loader = DataLoader(
110
+ dataset,
111
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
112
+ shuffle=False,
113
+ sampler=sampler,
114
+ num_workers=args.num_workers,
115
+ pin_memory=True,
116
+ drop_last=True
117
+ )
118
+ logger.info(f"Dataset contains {len(dataset):,} images")
119
+
120
+ # Prepare models for training:
121
+ if args.gpt_ckpt:
122
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
123
+ model.load_state_dict(checkpoint["model"], strict=True)
124
+ optimizer.load_state_dict(checkpoint["optimizer"])
125
+ train_steps = checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
126
+ start_epoch = int(train_steps / int(len(dataset) / args.global_batch_size))
127
+ train_steps = int(start_epoch * int(len(dataset) / args.global_batch_size))
128
+ del checkpoint
129
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
130
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
131
+ else:
132
+ train_steps = 0
133
+ start_epoch = 0
134
+
135
+ if not args.no_compile:
136
+ logger.info("compiling the model... (may take several minutes)")
137
+ model = torch.compile(model) # requires PyTorch 2.0
138
+
139
+ model = DDP(model.to(device), device_ids=[args.gpu])
140
+ model.train() # important! This enables embedding dropout for classifier-free guidance
141
+
142
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
143
+ # initialize a GradScaler. If enabled=False scaler is a no-op
144
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
145
+ # Variables for monitoring/logging purposes:
146
+ log_steps = 0
147
+ running_loss = 0
148
+ start_time = time.time()
149
+
150
+ logger.info(f"Training for {args.epochs} epochs...")
151
+ for epoch in range(start_epoch, args.epochs):
152
+ sampler.set_epoch(epoch)
153
+ logger.info(f"Beginning epoch {epoch}...")
154
+ for x, y, attn_mask, valid in loader:
155
+ x = x.to(device, non_blocking=True)
156
+ y = y.to(device, non_blocking=True)
157
+ if args.dataset == 't2i':
158
+ img = x
159
+ with torch.no_grad():
160
+ _, _, [_, _, indices] = vq_model.encode(img)#图像编码
161
+ x = indices.reshape(img.shape[0], -1)
162
+ z_indices = x.reshape(x.shape[0], -1) #图像
163
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])# 文本已经经过T5编码之后的了
164
+ assert z_indices.shape[0] == c_indices.shape[0]
165
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
166
+ with torch.cuda.amp.autocast(dtype=ptdtype):
167
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid)
168
+ # backward pass, with gradient scaling if training in fp16
169
+ scaler.scale(loss).backward()
170
+ if args.max_grad_norm != 0.0:
171
+ scaler.unscale_(optimizer)
172
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
173
+ # step the optimizer and scaler if training in fp16
174
+ scaler.step(optimizer)
175
+ scaler.update()
176
+ # flush the gradients as soon as we can, no need for this memory anymore
177
+ optimizer.zero_grad(set_to_none=True)
178
+
179
+ # Log loss values:
180
+ running_loss += loss.item()
181
+ log_steps += 1
182
+ train_steps += 1
183
+ if train_steps % args.log_every == 0:
184
+ # Measure training speed:
185
+ torch.cuda.synchronize()
186
+ end_time = time.time()
187
+ steps_per_sec = log_steps / (end_time - start_time)
188
+ # Reduce loss history over all processes:
189
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
190
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
191
+ avg_loss = avg_loss.item() / dist.get_world_size()
192
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
193
+ # Reset monitoring variables:
194
+ running_loss = 0
195
+ log_steps = 0
196
+ start_time = time.time()
197
+
198
+ # Save checkpoint:
199
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
200
+ if rank == 0:
201
+ if not args.no_compile:
202
+ model_weight = model.module._orig_mod.state_dict()
203
+ else:
204
+ model_weight = model.module.state_dict()
205
+ checkpoint = {
206
+ "model": model_weight,
207
+ "optimizer": optimizer.state_dict(),
208
+ "steps": train_steps,
209
+ "args": args
210
+ }
211
+ if not args.no_local_save:
212
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
213
+ torch.save(checkpoint, checkpoint_path)
214
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
215
+
216
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
217
+ torch.save(checkpoint, cloud_checkpoint_path)
218
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
219
+ dist.barrier()
220
+
221
+ model.eval() # important! This disables randomized embedding dropout
222
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
223
+
224
+ logger.info("Done!")
225
+ dist.destroy_process_group()
226
+
227
+
228
+
229
+ if __name__ == "__main__":
230
+ parser = argparse.ArgumentParser()
231
+ parser.add_argument("--data-path", type=str, required=True, help='包含 .jsonl 文件的路径')
232
+ parser.add_argument("--t5-feat-path", type=str, required=True, help='T5 .npy 文件路径')
233
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
234
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
235
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
236
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
237
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
238
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
239
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
240
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
241
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
242
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
243
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
244
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
245
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
246
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
247
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
248
+ parser.add_argument("--no-compile", action='store_true')
249
+ parser.add_argument("--results-dir", type=str, default="results")
250
+ parser.add_argument("--dataset", type=str, default='t2i')
251
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384)
252
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
253
+ parser.add_argument("--num-classes", type=int, default=1000)
254
+ parser.add_argument("--epochs", type=int, default=300)
255
+ parser.add_argument("--lr", type=float, default=1e-4)
256
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
257
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
258
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
259
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
260
+ parser.add_argument("--global-batch-size", type=int, default=256)
261
+ parser.add_argument("--global-seed", type=int, default=0)
262
+ parser.add_argument("--num-workers", type=int, default=24)
263
+ parser.add_argument("--log-every", type=int, default=100)
264
+ parser.add_argument("--ckpt-every", type=int, default=5000)
265
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
266
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
267
+ args = parser.parse_args()
268
+ main(args)
autoregressive/train/train_t2i_canny.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ from PIL import PngImagePlugin
5
+ MaximumDecompressedSize = 1024
6
+ MegaByte = 2**20
7
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.data.distributed import DistributedSampler
15
+ from torchvision import transforms
16
+ from glob import glob
17
+ import time
18
+ import argparse
19
+ import os
20
+ import sys
21
+ current_directory = os.getcwd()
22
+ sys.path.append(current_directory)
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.logger import create_logger
25
+ from dataset.build import build_dataset
26
+ from dataset.augmentation import center_crop_arr
27
+ from autoregressive.train.train_c2i import creat_optimizer
28
+
29
+ from autoregressive.models.gpt_t2i import GPT_models
30
+ from tokenizer.tokenizer_image.vq_model import VQ_models
31
+ from accelerate.utils import ProjectConfiguration, set_seed
32
+ from pathlib import Path
33
+ from accelerate import Accelerator
34
+ from language.t5 import T5Embedder
35
+ from dataset.t2i_control import build_t2i_control_code
36
+ import torch._dynamo
37
+ torch._dynamo.config.suppress_errors = True
38
+ def main(args):
39
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
40
+
41
+ # Setup DDP:
42
+ init_distributed_mode(args)
43
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
44
+ rank = dist.get_rank()
45
+ device = rank % torch.cuda.device_count()
46
+ seed = args.global_seed * dist.get_world_size() + rank
47
+ torch.manual_seed(seed)
48
+ torch.cuda.set_device(device)
49
+
50
+ # Setup an experiment folder:
51
+ if rank == 0:
52
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
53
+ experiment_index = len(glob(f"{args.results_dir}/*"))
54
+ model_string_name = args.gpt_model.replace("/", "-")
55
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
56
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
57
+ os.makedirs(checkpoint_dir, exist_ok=True)
58
+ logger = create_logger(experiment_dir)
59
+ logger.info(f"Experiment directory created at {experiment_dir}")
60
+
61
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
62
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
63
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
64
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
65
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
66
+
67
+ else:
68
+ logger = create_logger(None)
69
+
70
+ # training args
71
+ logger.info(f"{args}")
72
+
73
+ # training env
74
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
75
+
76
+
77
+ # Setup model
78
+ latent_size = args.image_size // args.downsample_size
79
+ model = GPT_models[args.gpt_model](
80
+ vocab_size=args.vocab_size,
81
+ block_size=latent_size ** 2,
82
+ num_classes=args.num_classes,
83
+ cls_token_num=args.cls_token_num,
84
+ model_type=args.gpt_type,
85
+ resid_dropout_p=args.dropout_p,
86
+ ffn_dropout_p=args.dropout_p,
87
+ token_dropout_p=args.token_dropout_p,
88
+ adapter_size=args.adapter_size,
89
+ condition_type=args.condition_type,
90
+ ).to(device)
91
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
92
+
93
+ # Setup optimizer
94
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
95
+
96
+ train_dataset = build_t2i_control_code(args)
97
+ sampler = DistributedSampler(
98
+ train_dataset,
99
+ num_replicas=dist.get_world_size(),
100
+ rank=rank,
101
+ shuffle=True,
102
+ seed=args.global_seed
103
+ )
104
+
105
+ loader = torch.utils.data.DataLoader(
106
+ train_dataset,
107
+ shuffle=False,
108
+ collate_fn=train_dataset.collate_fn,
109
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
110
+ num_workers=args.num_workers,
111
+ pin_memory=True,
112
+ sampler=sampler,
113
+ drop_last=True
114
+ )
115
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
116
+
117
+ # Prepare models for training:
118
+ if args.gpt_ckpt:
119
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
120
+ model.load_state_dict(checkpoint["model"], strict=False)
121
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
122
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
123
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
124
+ del checkpoint
125
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
126
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
127
+ else:
128
+ train_steps = 0
129
+ start_epoch = 0
130
+
131
+ if not args.no_compile:
132
+ logger.info("compiling the model... (may take several minutes)")
133
+ model = torch.compile(model) # requires PyTorch 2.0
134
+ # model.zero_init_mlp()
135
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
136
+ model.train() # important! This enables embedding dropout for classifier-free guidance
137
+
138
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
139
+ # initialize a GradScaler. If enabled=False scaler is a no-op
140
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
141
+ # Variables for monitoring/logging purposes:
142
+ log_steps = 0
143
+ running_loss = 0
144
+ start_time = time.time()
145
+
146
+ logger.info(f"Training for {args.epochs} epochs...")
147
+ for epoch in range(start_epoch, args.epochs):
148
+ sampler.set_epoch(epoch)
149
+ logger.info(f"Beginning epoch {epoch}...")
150
+ for batch in loader:
151
+ x = batch['code']
152
+ caption_emb = batch['caption_emb']
153
+ condition_img = batch['control']
154
+
155
+ attn_mask = batch['attn_mask']
156
+ valid = batch['valid']
157
+ y = caption_emb
158
+ x = x.to(device, non_blocking=True)
159
+ y = y.to(device, non_blocking=True)
160
+ condition_img = condition_img.to(device, non_blocking=True)
161
+
162
+ z_indices = x.reshape(x.shape[0], -1)
163
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
164
+ assert z_indices.shape[0] == c_indices.shape[0]
165
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
166
+ with torch.cuda.amp.autocast(dtype=ptdtype):
167
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid, condition=condition_img.to(ptdtype))
168
+ # backward pass, with gradient scaling if training in fp16
169
+ scaler.scale(loss).backward()
170
+ if args.max_grad_norm != 0.0:
171
+ scaler.unscale_(optimizer)
172
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
173
+ # step the optimizer and scaler if training in fp16
174
+ scaler.step(optimizer)
175
+ scaler.update()
176
+ # flush the gradients as soon as we can, no need for this memory anymore
177
+ optimizer.zero_grad(set_to_none=True)
178
+
179
+ # Log loss values:
180
+ running_loss += loss.item()
181
+ log_steps += 1
182
+ train_steps += 1
183
+ if train_steps % args.log_every == 0:
184
+ # Measure training speed:
185
+ torch.cuda.synchronize()
186
+ end_time = time.time()
187
+ steps_per_sec = log_steps / (end_time - start_time)
188
+ # Reduce loss history over all processes:
189
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
190
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
191
+ avg_loss = avg_loss.item() / dist.get_world_size()
192
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
193
+ # Reset monitoring variables:
194
+ running_loss = 0
195
+ log_steps = 0
196
+ start_time = time.time()
197
+
198
+ # Save checkpoint:
199
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
200
+ if rank == 0:
201
+ if not args.no_compile:
202
+ model_weight = model.module._orig_mod.state_dict()
203
+ else:
204
+ model_weight = model.module.state_dict()
205
+ checkpoint = {
206
+ "model": model_weight,
207
+ "steps": train_steps,
208
+ "args": args
209
+ }
210
+ if not args.no_local_save:
211
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
212
+ torch.save(checkpoint, checkpoint_path)
213
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
214
+
215
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
216
+ torch.save(checkpoint, cloud_checkpoint_path)
217
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
218
+ dist.barrier()
219
+
220
+ model.eval() # important! This disables randomized embedding dropout
221
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
222
+
223
+ logger.info("Done!")
224
+ dist.destroy_process_group()
225
+
226
+
227
+
228
+ if __name__ == "__main__":
229
+ parser = argparse.ArgumentParser()
230
+ parser.add_argument("--data-path", type=str, required=False)
231
+ parser.add_argument("--t5-feat-path", type=str, required=False)
232
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
233
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
234
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
235
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
236
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
237
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
238
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
239
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
240
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
241
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
242
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
243
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
244
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
245
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
246
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
247
+ parser.add_argument("--no-compile", action='store_true')
248
+ parser.add_argument("--results-dir", type=str, default="results")
249
+ parser.add_argument("--dataset", type=str, default='t2i_control')
250
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
251
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
252
+ parser.add_argument("--num-classes", type=int, default=1000)
253
+ parser.add_argument("--epochs", type=int, default=6)
254
+ parser.add_argument("--lr", type=float, default=5e-5)
255
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
256
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
257
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
258
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
259
+ parser.add_argument("--global-batch-size", type=int, default=96)
260
+ parser.add_argument("--global-seed", type=int, default=0)
261
+ parser.add_argument("--num-workers", type=int, default=24)
262
+ parser.add_argument("--log-every", type=int, default=100)
263
+ parser.add_argument("--ckpt-every", type=int, default=10000)
264
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
265
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
266
+
267
+ parser.add_argument("--condition-type", type=str, choices=['canny', 'hed', 'lineart', 'depth'], default="canny")
268
+ parser.add_argument("--code-path", type=str, required=True)
269
+ parser.add_argument("--code-path2", type=str, default=None)
270
+ parser.add_argument("--adapter-size", type=str, default='small')
271
+ parser.add_argument("--get-image", type=bool, default=True)
272
+ parser.add_argument("--get-prompt", type=bool, default=False)
273
+ parser.add_argument("--get-label", type=bool, default=False)
274
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
275
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
276
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
277
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
278
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
279
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
280
+ parser.add_argument("--logging_dir",type=str,default="logs")
281
+
282
+ args = parser.parse_args()
283
+ main(args)
autoregressive/train/train_t2i_depth.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ from PIL import PngImagePlugin
5
+ MaximumDecompressedSize = 1024
6
+ MegaByte = 2**20
7
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.data.distributed import DistributedSampler
15
+ from torchvision import transforms
16
+ from glob import glob
17
+ import time
18
+ import argparse
19
+ import os
20
+ import sys
21
+ current_directory = os.getcwd()
22
+ sys.path.append(current_directory)
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.logger import create_logger
25
+ from dataset.build import build_dataset
26
+ from dataset.augmentation import center_crop_arr
27
+ from autoregressive.train.train_c2i import creat_optimizer
28
+ from torch.optim.lr_scheduler import StepLR
29
+ from autoregressive.models.gpt_t2i import GPT_models
30
+ from tokenizer.tokenizer_image.vq_model import VQ_models
31
+ from accelerate.utils import ProjectConfiguration, set_seed
32
+ from pathlib import Path
33
+ from accelerate import Accelerator
34
+ from language.t5 import T5Embedder
35
+ from dataset.t2i_control import build_t2i_control_code
36
+ import torch._dynamo
37
+ torch._dynamo.config.suppress_errors = True
38
+ def main(args):
39
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
40
+
41
+ # Setup DDP:
42
+ init_distributed_mode(args)
43
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
44
+ rank = dist.get_rank()
45
+ device = rank % torch.cuda.device_count()
46
+ seed = args.global_seed * dist.get_world_size() + rank
47
+ torch.manual_seed(seed)
48
+ torch.cuda.set_device(device)
49
+
50
+
51
+ # Setup an experiment folder:
52
+ if rank == 0:
53
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
54
+ experiment_index = len(glob(f"{args.results_dir}/*"))
55
+ model_string_name = args.gpt_model.replace("/", "-")
56
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
57
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
58
+ os.makedirs(checkpoint_dir, exist_ok=True)
59
+ logger = create_logger(experiment_dir)
60
+ logger.info(f"Experiment directory created at {experiment_dir}")
61
+
62
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
63
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
64
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
65
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
66
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
67
+
68
+ else:
69
+ logger = create_logger(None)
70
+
71
+ # training args
72
+ logger.info(f"{args}")
73
+
74
+ # training env
75
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
76
+
77
+
78
+ # Setup model
79
+ latent_size = args.image_size // args.downsample_size
80
+ model = GPT_models[args.gpt_model](
81
+ vocab_size=args.vocab_size,
82
+ block_size=latent_size ** 2,
83
+ num_classes=args.num_classes,
84
+ cls_token_num=args.cls_token_num,
85
+ model_type=args.gpt_type,
86
+ resid_dropout_p=args.dropout_p,
87
+ ffn_dropout_p=args.dropout_p,
88
+ token_dropout_p=args.token_dropout_p,
89
+ condition_type=args.condition_type,
90
+ ).to(device)
91
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
92
+
93
+
94
+ # Setup optimizer
95
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
96
+ lr_scheduler = StepLR(optimizer, step_size=1, gamma=0.5) # 每10个epoch后,学习率乘以0.1
97
+ train_dataset = build_t2i_control_code(args)
98
+ sampler = DistributedSampler(
99
+ train_dataset,
100
+ num_replicas=dist.get_world_size(),
101
+ rank=rank,
102
+ shuffle=True,
103
+ seed=args.global_seed
104
+ )
105
+
106
+ loader = torch.utils.data.DataLoader(
107
+ train_dataset,
108
+ shuffle=False,
109
+ collate_fn=train_dataset.collate_fn,
110
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
111
+ num_workers=args.num_workers,
112
+ pin_memory=True,
113
+ sampler=sampler,
114
+ drop_last=True
115
+ )
116
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
117
+
118
+ # Prepare models for training:
119
+ if args.gpt_ckpt:
120
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
121
+ model.load_state_dict(checkpoint["model"], strict=False)
122
+ # optimizer.load_state_dict(checkpoint["optimizer"])
123
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
124
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
125
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
126
+ del checkpoint
127
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
128
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
129
+ else:
130
+ train_steps = 0
131
+ start_epoch = 0
132
+
133
+ if not args.no_compile:
134
+ logger.info("compiling the model... (may take several minutes)")
135
+ model = torch.compile(model) # requires PyTorch 2.0
136
+ # model.zero_init_mlp()
137
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
138
+ model.train() # important! This enables embedding dropout for classifier-free guidance
139
+
140
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
141
+ # initialize a GradScaler. If enabled=False scaler is a no-op
142
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
143
+ # Variables for monitoring/logging purposes:
144
+ log_steps = 0
145
+ running_loss = 0
146
+ start_time = time.time()
147
+
148
+ logger.info(f"Training for {args.epochs} epochs...")
149
+ for epoch in range(start_epoch, args.epochs):
150
+ sampler.set_epoch(epoch)
151
+ logger.info(f"Beginning epoch {epoch}...")
152
+ # for x, y, attn_mask, valid in loader:
153
+ for batch in loader:
154
+
155
+ x = batch['code']
156
+ caption_emb = batch['caption_emb']
157
+ condition_img = batch['control']
158
+
159
+ attn_mask = batch['attn_mask']
160
+ valid = batch['valid']
161
+ y = caption_emb
162
+ x = x.to(device, non_blocking=True)
163
+ y = y.to(device, non_blocking=True)
164
+ condition_img = condition_img.to(device, non_blocking=True)
165
+
166
+ z_indices = x.reshape(x.shape[0], -1)
167
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
168
+ assert z_indices.shape[0] == c_indices.shape[0]
169
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
170
+ with torch.cuda.amp.autocast(dtype=ptdtype):
171
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid, condition=condition_img.to(ptdtype))
172
+ # backward pass, with gradient scaling if training in fp16
173
+ scaler.scale(loss).backward()
174
+ if args.max_grad_norm != 0.0:
175
+ scaler.unscale_(optimizer)
176
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
177
+ # step the optimizer and scaler if training in fp16
178
+ scaler.step(optimizer)
179
+ scaler.update()
180
+ # flush the gradients as soon as we can, no need for this memory anymore
181
+ optimizer.zero_grad(set_to_none=True)
182
+
183
+ # Log loss values:
184
+ running_loss += loss.item()
185
+ log_steps += 1
186
+ train_steps += 1
187
+ if train_steps % args.log_every == 0:
188
+ # Measure training speed:
189
+ torch.cuda.synchronize()
190
+ end_time = time.time()
191
+ steps_per_sec = log_steps / (end_time - start_time)
192
+ # Reduce loss history over all processes:
193
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
194
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
195
+ avg_loss = avg_loss.item() / dist.get_world_size()
196
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
197
+ # Reset monitoring variables:
198
+ running_loss = 0
199
+ log_steps = 0
200
+ start_time = time.time()
201
+
202
+ # Save checkpoint:
203
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
204
+ if rank == 0:
205
+ if not args.no_compile:
206
+ model_weight = model.module._orig_mod.state_dict()
207
+ else:
208
+ model_weight = model.module.state_dict()
209
+ checkpoint = {
210
+ "model": model_weight,
211
+ "steps": train_steps,
212
+ "args": args
213
+ }
214
+ if not args.no_local_save:
215
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
216
+ torch.save(checkpoint, checkpoint_path)
217
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
218
+
219
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
220
+ torch.save(checkpoint, cloud_checkpoint_path)
221
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
222
+ dist.barrier()
223
+ lr_scheduler.step()
224
+ model.eval() # important! This disables randomized embedding dropout
225
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
226
+
227
+ logger.info("Done!")
228
+ dist.destroy_process_group()
229
+
230
+
231
+
232
+ if __name__ == "__main__":
233
+ parser = argparse.ArgumentParser()
234
+ parser.add_argument("--data-path", type=str, required=False)
235
+ parser.add_argument("--t5-feat-path", type=str, required=False)
236
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
237
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
238
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
239
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
240
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
241
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
242
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
243
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
244
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
245
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
246
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
247
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
248
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
249
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
250
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
251
+ parser.add_argument("--no-compile", action='store_true')
252
+ parser.add_argument("--results-dir", type=str, default="results")
253
+ parser.add_argument("--dataset", type=str, default='t2i_control')
254
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
255
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
256
+ parser.add_argument("--num-classes", type=int, default=1000)
257
+ parser.add_argument("--epochs", type=int, default=7)
258
+ parser.add_argument("--lr", type=float, default=5e-5)
259
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
260
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
261
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
262
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
263
+ parser.add_argument("--global-batch-size", type=int, default=96)
264
+ parser.add_argument("--global-seed", type=int, default=0)
265
+ parser.add_argument("--num-workers", type=int, default=24)
266
+ parser.add_argument("--log-every", type=int, default=100)
267
+ parser.add_argument("--ckpt-every", type=int, default=10000)
268
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
269
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
270
+
271
+ parser.add_argument("--condition-type", type=str, choices=['canny', 'hed', 'lineart', 'depth'], default="depth")
272
+ parser.add_argument("--code-path", type=str, required=True)
273
+ parser.add_argument("--code-path2", type=str, default=None)
274
+ parser.add_argument("--get-image", type=bool, default=True)
275
+ parser.add_argument("--get-prompt", type=bool, default=False)
276
+ parser.add_argument("--get-label", type=bool, default=False)
277
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
278
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
279
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
280
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
281
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
282
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
283
+ parser.add_argument("--logging_dir",type=str,default="logs")
284
+
285
+ args = parser.parse_args()
286
+ main(args)
autoregressive/train/train_t2i_depth_multiscale.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+ from PIL import PngImagePlugin
7
+ MaximumDecompressedSize = 1024
8
+ MegaByte = 2**20
9
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
10
+ import torch
11
+ torch.backends.cuda.matmul.allow_tf32 = True
12
+ torch.backends.cudnn.allow_tf32 = True
13
+ import torch.distributed as dist
14
+ from torch.nn.parallel import DistributedDataParallel as DDP
15
+ from torch.utils.data import DataLoader
16
+ from torch.utils.data.distributed import DistributedSampler
17
+ from torchvision import transforms
18
+ from glob import glob
19
+ import time
20
+ import argparse
21
+ import os
22
+ import sys
23
+ current_directory = os.getcwd()
24
+ sys.path.append(current_directory)
25
+ from utils.distributed import init_distributed_mode
26
+ from utils.logger import create_logger
27
+ from dataset.build import build_dataset
28
+ from dataset.augmentation import center_crop_arr
29
+ from autoregressive.train.train_c2i import creat_optimizer
30
+ from autoregressive.models.gpt_t2i import GPT_models
31
+ from tokenizer.tokenizer_image.vq_model import VQ_models
32
+ from accelerate.utils import ProjectConfiguration, set_seed
33
+ from pathlib import Path
34
+ from accelerate import Accelerator
35
+ from language.t5 import T5Embedder
36
+ from dataset.t2i_control import build_t2i_control_code
37
+ import torch._dynamo
38
+ torch._dynamo.config.suppress_errors = True
39
+ import random
40
+ import torch.nn.functional as F
41
+ from condition.hed import HEDdetector
42
+ from condition.lineart import LineArt
43
+ import numpy as np
44
+ def random_sample_scale(image, condition=None):
45
+
46
+ H = np.arange(384, 1024+16, 16)
47
+ W = np.arange(384, 1024+16, 16)
48
+ resolution = [1024,1024]
49
+ while resolution[0]//16+resolution[1]//16 > 2304:
50
+ resolution = [random.choice(H), random.choice(W)]
51
+ assert resolution[0]//16+resolution[1]//16 <= 2304
52
+ image = F.interpolate(image.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
53
+ if condition is not None:
54
+ condition = F.interpolate(condition.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
55
+ return image, condition
56
+ return image
57
+
58
+
59
+ def main(args):
60
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
61
+
62
+ # Setup DDP:
63
+ init_distributed_mode(args)
64
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
65
+ rank = dist.get_rank()
66
+ device = rank % torch.cuda.device_count()
67
+ seed = args.global_seed * dist.get_world_size() + rank
68
+ torch.manual_seed(seed)
69
+ torch.cuda.set_device(device)
70
+
71
+ # Setup an experiment folder:
72
+ if rank == 0:
73
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
74
+ experiment_index = len(glob(f"{args.results_dir}/*"))
75
+ model_string_name = args.gpt_model.replace("/", "-")
76
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
77
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
78
+ os.makedirs(checkpoint_dir, exist_ok=True)
79
+ logger = create_logger(experiment_dir)
80
+ logger.info(f"Experiment directory created at {experiment_dir}")
81
+
82
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
83
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
84
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
85
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
86
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
87
+
88
+ else:
89
+ logger = create_logger(None)
90
+
91
+ # training args
92
+ logger.info(f"{args}")
93
+
94
+ # training env
95
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
96
+
97
+
98
+ # Setup model
99
+ latent_size = args.image_size // args.downsample_size
100
+ model = GPT_models[args.gpt_model](
101
+ vocab_size=args.vocab_size,
102
+ block_size=latent_size ** 2,
103
+ num_classes=args.num_classes,
104
+ cls_token_num=args.cls_token_num,
105
+ model_type=args.gpt_type,
106
+ resid_dropout_p=args.dropout_p,
107
+ ffn_dropout_p=args.dropout_p,
108
+ token_dropout_p=args.token_dropout_p,
109
+ condition_type=args.condition_type,
110
+ ).to(device)
111
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
112
+ # # Freeze all the layers
113
+ # for param in model.parameters():
114
+ # param.requires_grad = False
115
+ # for name, param in model.named_parameters():
116
+ # if 'condition' in name:
117
+ # param.requires_grad = True
118
+ # print(name)
119
+ # if 'adapter' in name:
120
+ # param.requires_grad = True
121
+ # print(name)
122
+ # if 'layers.0.' in name:
123
+ # param.requires_grad = True
124
+ # print(name)
125
+ # if 'layers.12.' in name:
126
+ # param.requires_grad = True
127
+ # print(name)
128
+ # if 'layers.24.' in name:
129
+ # param.requires_grad = True
130
+ # print(name)
131
+
132
+ # Setup optimizer
133
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
134
+
135
+ # Setup data:
136
+ if args.dataset == 't2i_control': # create and load model
137
+ vq_model = VQ_models[args.vq_model](
138
+ codebook_size=args.codebook_size,
139
+ codebook_embed_dim=args.codebook_embed_dim)
140
+ vq_model.to(device)
141
+ vq_model.eval()
142
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
143
+ vq_model.load_state_dict(checkpoint["model"])
144
+ del checkpoint
145
+
146
+ train_dataset = build_t2i_control_code(args)
147
+ sampler = DistributedSampler(
148
+ train_dataset,
149
+ num_replicas=dist.get_world_size(),
150
+ rank=rank,
151
+ shuffle=True,
152
+ seed=args.global_seed
153
+ )
154
+
155
+ loader = torch.utils.data.DataLoader(
156
+ train_dataset,
157
+ shuffle=False,
158
+ collate_fn=train_dataset.collate_fn,
159
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
160
+ num_workers=args.num_workers,
161
+ pin_memory=True,
162
+ sampler=sampler,
163
+ drop_last=True
164
+ )
165
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
166
+
167
+ # Prepare models for training:
168
+ if args.gpt_ckpt:
169
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
170
+ model.load_state_dict(checkpoint["model"], strict=False)
171
+ # optimizer.load_state_dict(checkpoint["optimizer"])
172
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
173
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
174
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
175
+ del checkpoint
176
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
177
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
178
+ else:
179
+ train_steps = 0
180
+ start_epoch = 0
181
+
182
+ if not args.no_compile:
183
+ logger.info("compiling the model... (may take several minutes)")
184
+ model = torch.compile(model) # requires PyTorch 2.0
185
+
186
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
187
+ model.train() # important! This enables embedding dropout for classifier-free guidance
188
+
189
+
190
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
191
+ # initialize a GradScaler. If enabled=False scaler is a no-op
192
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
193
+ # Variables for monitoring/logging purposes:
194
+ log_steps = 0
195
+ running_loss = 0
196
+ start_time = time.time()
197
+ # get_condition = HEDdetector().to(device).eval()
198
+ logger.info(f"Training for {args.epochs} epochs...")
199
+ for epoch in range(start_epoch, args.epochs):
200
+ sampler.set_epoch(epoch)
201
+ logger.info(f"Beginning epoch {epoch}...")
202
+ for batch in loader:
203
+
204
+ x = batch['code']
205
+ image = batch['image']
206
+ caption_emb = batch['caption_emb']
207
+ condition_img = batch['control']
208
+
209
+ attn_mask = batch['attn_mask']
210
+ valid = batch['valid']
211
+ y = caption_emb
212
+ x = x.to(device, non_blocking=True)
213
+ image = image.to(device, non_blocking=True)
214
+ y = y.to(device, non_blocking=True)
215
+ condition_img = condition_img.to(device, non_blocking=True)
216
+ image, condition_img = random_sample_scale(image, condition_img)
217
+
218
+ if args.dataset == 't2i_control':
219
+ img = 2*(image/255 - 0.5)
220
+
221
+ with torch.no_grad():
222
+ _, _, [_, _, indices] = vq_model.encode(img)
223
+ x = indices.reshape(img.shape[0], -1)
224
+ z_indices = x.reshape(x.shape[0], -1)
225
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
226
+ assert z_indices.shape[0] == c_indices.shape[0]
227
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
228
+ with torch.cuda.amp.autocast(dtype=ptdtype):
229
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :x.shape[1]+120-1,:x.shape[1]+120-1], valid=valid, condition=condition_img.to(ptdtype))
230
+ # backward pass, with gradient scaling if training in fp16
231
+ scaler.scale(loss).backward()
232
+ if args.max_grad_norm != 0.0:
233
+ scaler.unscale_(optimizer)
234
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
235
+ # step the optimizer and scaler if training in fp16
236
+ scaler.step(optimizer)
237
+ scaler.update()
238
+ # flush the gradients as soon as we can, no need for this memory anymore
239
+ optimizer.zero_grad(set_to_none=True)
240
+
241
+ # Log loss values:
242
+ running_loss += loss.item()
243
+ log_steps += 1
244
+ train_steps += 1
245
+ if train_steps % args.log_every == 0:
246
+ # Measure training speed:
247
+ torch.cuda.synchronize()
248
+ end_time = time.time()
249
+ steps_per_sec = log_steps / (end_time - start_time)
250
+ # Reduce loss history over all processes:
251
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
252
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
253
+ avg_loss = avg_loss.item() / dist.get_world_size()
254
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
255
+ # Reset monitoring variables:
256
+ running_loss = 0
257
+ log_steps = 0
258
+ start_time = time.time()
259
+
260
+ # Save checkpoint:
261
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
262
+ if rank == 0:
263
+ if not args.no_compile:
264
+ model_weight = model.module._orig_mod.state_dict()
265
+ else:
266
+ model_weight = model.module.state_dict()
267
+ checkpoint = {
268
+ "model": model_weight,
269
+ "steps": train_steps,
270
+ "args": args
271
+ }
272
+ if not args.no_local_save:
273
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
274
+ torch.save(checkpoint, checkpoint_path)
275
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
276
+
277
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
278
+ torch.save(checkpoint, cloud_checkpoint_path)
279
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
280
+ dist.barrier()
281
+
282
+ model.eval() # important! This disables randomized embedding dropout
283
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
284
+
285
+ logger.info("Done!")
286
+ dist.destroy_process_group()
287
+
288
+
289
+
290
+ if __name__ == "__main__":
291
+ parser = argparse.ArgumentParser()
292
+ parser.add_argument("--data-path", type=str, required=False)
293
+ parser.add_argument("--t5-feat-path", type=str, required=False)
294
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
295
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
296
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
297
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
298
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
299
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
300
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
301
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
302
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
303
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
304
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
305
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
306
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
307
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
308
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
309
+ parser.add_argument("--no-compile", action='store_true')
310
+ parser.add_argument("--results-dir", type=str, default="results")
311
+ parser.add_argument("--dataset", type=str, default='t2i_control')
312
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512, 768, 832, 896, 960], default=384)
313
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
314
+ parser.add_argument("--num-classes", type=int, default=1000)
315
+ parser.add_argument("--epochs", type=int, default=15)
316
+ parser.add_argument("--lr", type=float, default=1e-5)
317
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
318
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
319
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
320
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
321
+ parser.add_argument("--global-batch-size", type=int, default=64)
322
+ parser.add_argument("--global-seed", type=int, default=0)
323
+ parser.add_argument("--num-workers", type=int, default=24)
324
+ parser.add_argument("--log-every", type=int, default=100)
325
+ parser.add_argument("--ckpt-every", type=int, default=30000)
326
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
327
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
328
+
329
+ parser.add_argument("--code-path", type=str, required=True)
330
+ parser.add_argument("--code-path2", type=str, default=None)
331
+ parser.add_argument("--condition-type", type=str, choices=['segmentation', 'canny', 'hed', 'lineart', 'depth'], default="depth")
332
+ parser.add_argument("--get-image", type=bool, default=True)
333
+ parser.add_argument("--get-prompt", type=bool, default=False)
334
+ parser.add_argument("--get-label", type=bool, default=False)
335
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
336
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
337
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
338
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
339
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
340
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
341
+ parser.add_argument("--logging_dir",type=str,default="logs")
342
+ parser.add_argument("--report_to",type=str,default="wandb")
343
+ parser.add_argument("--task_name",type=str,default='segmentation')
344
+ parser.add_argument("--dataset_name",type=str,default=None)
345
+ parser.add_argument("--dataset_config_name",type=str,default=None)
346
+
347
+ parser.add_argument("--image_column", type=str, default="image", help="The column of the dataset containing the target image.")
348
+ parser.add_argument("--conditioning_image_column",type=str,default="control_seg",help="The column of the dataset containing the controlnet conditioning image.")
349
+ parser.add_argument("--caption_column",type=str,default="prompt",help="The column of the dataset containing a caption or a list of captions.")
350
+ parser.add_argument("--label_column",type=str,default=None,help="The column of the dataset containing the original labels. `seg_map` for ADE20K; `panoptic_seg_map` for COCO-Stuff.")
351
+ parser.add_argument("--max_train_samples",type=int,default=None)
352
+ parser.add_argument("--image_condition_dropout",type=float,default=0)
353
+ parser.add_argument("--text_condition_dropout",type=float,default=0)
354
+ parser.add_argument("--all_condition_dropout",type=float,default=0)
355
+
356
+ args = parser.parse_args()
357
+ main(args)
autoregressive/train/train_t2i_hed.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ from PIL import PngImagePlugin
5
+ MaximumDecompressedSize = 1024
6
+ MegaByte = 2**20
7
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.data.distributed import DistributedSampler
15
+ from torchvision import transforms
16
+ from glob import glob
17
+ import time
18
+ import argparse
19
+ import os
20
+ import sys
21
+ current_directory = os.getcwd()
22
+ sys.path.append(current_directory)
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.logger import create_logger
25
+ from dataset.build import build_dataset
26
+ from dataset.augmentation import center_crop_arr
27
+ from autoregressive.train.train_c2i import creat_optimizer
28
+ from autoregressive.models.gpt_t2i import GPT_models
29
+ from tokenizer.tokenizer_image.vq_model import VQ_models
30
+ from accelerate.utils import ProjectConfiguration, set_seed
31
+ from pathlib import Path
32
+ from accelerate import Accelerator
33
+ from language.t5 import T5Embedder
34
+ from dataset.t2i_control import build_t2i_control_code
35
+ import torch._dynamo
36
+ from condition.hed import HEDdetector
37
+ torch._dynamo.config.suppress_errors = True
38
+ def main(args):
39
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
40
+
41
+ # Setup DDP:
42
+ init_distributed_mode(args)
43
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
44
+ rank = dist.get_rank()
45
+ device = rank % torch.cuda.device_count()
46
+ seed = args.global_seed * dist.get_world_size() + rank
47
+ torch.manual_seed(seed)
48
+ torch.cuda.set_device(device)
49
+
50
+ # Setup an experiment folder:
51
+ if rank == 0:
52
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
53
+ experiment_index = len(glob(f"{args.results_dir}/*"))
54
+ model_string_name = args.gpt_model.replace("/", "-")
55
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
56
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
57
+ os.makedirs(checkpoint_dir, exist_ok=True)
58
+ logger = create_logger(experiment_dir)
59
+ logger.info(f"Experiment directory created at {experiment_dir}")
60
+
61
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
62
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
63
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
64
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
65
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
66
+
67
+ else:
68
+ logger = create_logger(None)
69
+
70
+ # training args
71
+ logger.info(f"{args}")
72
+
73
+ # training env
74
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
75
+
76
+
77
+ # Setup model
78
+ latent_size = args.image_size // args.downsample_size
79
+ model = GPT_models[args.gpt_model](
80
+ vocab_size=args.vocab_size,
81
+ block_size=latent_size ** 2,
82
+ num_classes=args.num_classes,
83
+ cls_token_num=args.cls_token_num,
84
+ model_type=args.gpt_type,
85
+ resid_dropout_p=args.dropout_p,
86
+ ffn_dropout_p=args.dropout_p,
87
+ token_dropout_p=args.token_dropout_p,
88
+ adapter_size=args.adapter_size,
89
+ condition_type=args.condition_type,
90
+ ).to(device)
91
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
92
+ get_condition = HEDdetector().to(device).eval()
93
+
94
+
95
+ # Setup optimizer
96
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
97
+
98
+ train_dataset = build_t2i_control_code(args)
99
+ sampler = DistributedSampler(
100
+ train_dataset,
101
+ num_replicas=dist.get_world_size(),
102
+ rank=rank,
103
+ shuffle=True,
104
+ seed=args.global_seed
105
+ )
106
+
107
+ loader = torch.utils.data.DataLoader(
108
+ train_dataset,
109
+ shuffle=False,
110
+ collate_fn=train_dataset.collate_fn,
111
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
112
+ num_workers=args.num_workers,
113
+ pin_memory=True,
114
+ sampler=sampler,
115
+ drop_last=True
116
+ )
117
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
118
+
119
+ # Prepare models for training:
120
+ if args.gpt_ckpt:
121
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
122
+ model.load_state_dict(checkpoint["model"], strict=False)
123
+ # optimizer.load_state_dict(checkpoint["optimizer"])
124
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
125
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
126
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
127
+ del checkpoint
128
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
129
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
130
+ else:
131
+ train_steps = 0
132
+ start_epoch = 0
133
+
134
+ if not args.no_compile:
135
+ logger.info("compiling the model... (may take several minutes)")
136
+ model = torch.compile(model) # requires PyTorch 2.0
137
+ # model.zero_init_mlp()
138
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
139
+ model.train() # important! This enables embedding dropout for classifier-free guidance
140
+
141
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
142
+ # initialize a GradScaler. If enabled=False scaler is a no-op
143
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
144
+ # Variables for monitoring/logging purposes:
145
+ log_steps = 0
146
+ running_loss = 0
147
+ start_time = time.time()
148
+
149
+ logger.info(f"Training for {args.epochs} epochs...")
150
+ for epoch in range(start_epoch, args.epochs):
151
+ sampler.set_epoch(epoch)
152
+ logger.info(f"Beginning epoch {epoch}...")
153
+ # for x, y, attn_mask, valid in loader:
154
+ for batch in loader:
155
+
156
+ x = batch['code']
157
+ caption_emb = batch['caption_emb']
158
+ condition_img = batch['control']
159
+
160
+ attn_mask = batch['attn_mask']
161
+ valid = batch['valid']
162
+ y = caption_emb
163
+ x = x.to(device, non_blocking=True)
164
+ y = y.to(device, non_blocking=True)
165
+ condition_img = condition_img.to(device, non_blocking=True)
166
+
167
+ with torch.no_grad():
168
+ condition_img = get_condition(condition_img).unsqueeze(1).repeat(1,3,1,1)
169
+ condition_img = 2*(condition_img/255 - 0.5)
170
+
171
+ z_indices = x.reshape(x.shape[0], -1)
172
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
173
+ assert z_indices.shape[0] == c_indices.shape[0]
174
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
175
+ with torch.cuda.amp.autocast(dtype=ptdtype):
176
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid, condition=condition_img.to(ptdtype))
177
+ # backward pass, with gradient scaling if training in fp16
178
+ scaler.scale(loss).backward()
179
+ if args.max_grad_norm != 0.0:
180
+ scaler.unscale_(optimizer)
181
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
182
+ # step the optimizer and scaler if training in fp16
183
+ scaler.step(optimizer)
184
+ scaler.update()
185
+ # flush the gradients as soon as we can, no need for this memory anymore
186
+ optimizer.zero_grad(set_to_none=True)
187
+
188
+ # Log loss values:
189
+ running_loss += loss.item()
190
+ log_steps += 1
191
+ train_steps += 1
192
+ if train_steps % args.log_every == 0:
193
+ # Measure training speed:
194
+ torch.cuda.synchronize()
195
+ end_time = time.time()
196
+ steps_per_sec = log_steps / (end_time - start_time)
197
+ # Reduce loss history over all processes:
198
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
199
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
200
+ avg_loss = avg_loss.item() / dist.get_world_size()
201
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
202
+ # Reset monitoring variables:
203
+ running_loss = 0
204
+ log_steps = 0
205
+ start_time = time.time()
206
+
207
+ # Save checkpoint:
208
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
209
+ if rank == 0:
210
+ if not args.no_compile:
211
+ model_weight = model.module._orig_mod.state_dict()
212
+ else:
213
+ model_weight = model.module.state_dict()
214
+ checkpoint = {
215
+ "model": model_weight,
216
+ "steps": train_steps,
217
+ "args": args
218
+ }
219
+ if not args.no_local_save:
220
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
221
+ torch.save(checkpoint, checkpoint_path)
222
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
223
+
224
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
225
+ torch.save(checkpoint, cloud_checkpoint_path)
226
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
227
+ dist.barrier()
228
+
229
+ model.eval() # important! This disables randomized embedding dropout
230
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
231
+
232
+ logger.info("Done!")
233
+ dist.destroy_process_group()
234
+
235
+
236
+
237
+ if __name__ == "__main__":
238
+ parser = argparse.ArgumentParser()
239
+ parser.add_argument("--data-path", type=str, required=False)
240
+ parser.add_argument("--t5-feat-path", type=str, required=False)
241
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
242
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
243
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
244
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
245
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
246
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
247
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
248
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
249
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
250
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
251
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
252
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
253
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
254
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
255
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
256
+ parser.add_argument("--no-compile", action='store_true')
257
+ parser.add_argument("--results-dir", type=str, default="results")
258
+ parser.add_argument("--dataset", type=str, default='t2i_control')
259
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
260
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
261
+ parser.add_argument("--num-classes", type=int, default=1000)
262
+ parser.add_argument("--epochs", type=int, default=3)
263
+ parser.add_argument("--lr", type=float, default=5e-5)
264
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
265
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
266
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
267
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
268
+ parser.add_argument("--global-batch-size", type=int, default=88)
269
+ parser.add_argument("--global-seed", type=int, default=0)
270
+ parser.add_argument("--num-workers", type=int, default=24)
271
+ parser.add_argument("--log-every", type=int, default=100)
272
+ parser.add_argument("--ckpt-every", type=int, default=10000)
273
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
274
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
275
+
276
+ parser.add_argument("--condition-type", type=str, choices=['canny', 'hed', 'lineart', 'depth'], default="hed")
277
+ parser.add_argument("--code-path", type=str, required=True)
278
+ parser.add_argument("--code-path2", type=str, default=None)
279
+ parser.add_argument("--get-image", type=bool, default=True)
280
+ parser.add_argument("--get-prompt", type=bool, default=False)
281
+ parser.add_argument("--get-label", type=bool, default=False)
282
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
283
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
284
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
285
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
286
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
287
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
288
+ parser.add_argument("--logging_dir",type=str,default="logs")
289
+ parser.add_argument("--adapter-size", type=str, default='small')
290
+ args = parser.parse_args()
291
+ main(args)
autoregressive/train/train_t2i_hed_multiscale.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+ from PIL import PngImagePlugin
7
+ MaximumDecompressedSize = 1024
8
+ MegaByte = 2**20
9
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
10
+ import torch
11
+ torch.backends.cuda.matmul.allow_tf32 = True
12
+ torch.backends.cudnn.allow_tf32 = True
13
+ import torch.distributed as dist
14
+ from torch.nn.parallel import DistributedDataParallel as DDP
15
+ from torch.utils.data import DataLoader
16
+ from torch.utils.data.distributed import DistributedSampler
17
+ from torchvision import transforms
18
+ from glob import glob
19
+ import time
20
+ import argparse
21
+ import os
22
+ import sys
23
+ current_directory = os.getcwd()
24
+ sys.path.append(current_directory)
25
+ from utils.distributed import init_distributed_mode
26
+ from utils.logger import create_logger
27
+ from dataset.build import build_dataset
28
+ from dataset.augmentation import center_crop_arr
29
+ from autoregressive.train.train_c2i import creat_optimizer
30
+ from autoregressive.models.gpt_t2i import GPT_models
31
+ from tokenizer.tokenizer_image.vq_model import VQ_models
32
+ from accelerate.utils import ProjectConfiguration, set_seed
33
+ from pathlib import Path
34
+ from accelerate import Accelerator
35
+ from language.t5 import T5Embedder
36
+ from dataset.t2i_control import build_t2i_control_code
37
+ import torch._dynamo
38
+ torch._dynamo.config.suppress_errors = True
39
+ import random
40
+ import torch.nn.functional as F
41
+ from condition.hed import HEDdetector
42
+ def random_sample_scale(image, condition=None):
43
+
44
+ H = np.arange(384, 1024+16, 16)
45
+ W = np.arange(384, 1024+16, 16)
46
+ resolution = [1024,1024]
47
+ while resolution[0]//16+resolution[1]//16 > 2304:
48
+ resolution = [random.choice(H), random.choice(W)]
49
+ assert resolution[0]//16+resolution[1]//16 <= 2304
50
+ image = F.interpolate(image.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
51
+ if condition is not None:
52
+ condition = F.interpolate(condition.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
53
+ return image, condition
54
+ return image
55
+
56
+
57
+ def main(args):
58
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
59
+
60
+ # Setup DDP:
61
+ init_distributed_mode(args)
62
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
63
+ rank = dist.get_rank()
64
+ device = rank % torch.cuda.device_count()
65
+ seed = args.global_seed * dist.get_world_size() + rank
66
+ torch.manual_seed(seed)
67
+ torch.cuda.set_device(device)
68
+
69
+
70
+ # Setup an experiment folder:
71
+ if rank == 0:
72
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
73
+ experiment_index = len(glob(f"{args.results_dir}/*"))
74
+ model_string_name = args.gpt_model.replace("/", "-")
75
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
76
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
77
+ os.makedirs(checkpoint_dir, exist_ok=True)
78
+ logger = create_logger(experiment_dir)
79
+ logger.info(f"Experiment directory created at {experiment_dir}")
80
+
81
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
82
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
83
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
84
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
85
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
86
+
87
+ else:
88
+ logger = create_logger(None)
89
+
90
+ # training args
91
+ logger.info(f"{args}")
92
+
93
+ # training env
94
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
95
+
96
+
97
+ # Setup model
98
+ latent_size = args.image_size // args.downsample_size
99
+ model = GPT_models[args.gpt_model](
100
+ vocab_size=args.vocab_size,
101
+ block_size=latent_size ** 2,
102
+ num_classes=args.num_classes,
103
+ cls_token_num=args.cls_token_num,
104
+ model_type=args.gpt_type,
105
+ resid_dropout_p=args.dropout_p,
106
+ ffn_dropout_p=args.dropout_p,
107
+ token_dropout_p=args.token_dropout_p,
108
+ condition_type=args.condition_type,
109
+ ).to(device)
110
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
111
+
112
+
113
+ # Setup optimizer
114
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
115
+
116
+ # Setup data:
117
+ if args.dataset == 't2i_control': # create and load model
118
+ vq_model = VQ_models[args.vq_model](
119
+ codebook_size=args.codebook_size,
120
+ codebook_embed_dim=args.codebook_embed_dim)
121
+ vq_model.to(device)
122
+ vq_model.eval()
123
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
124
+ vq_model.load_state_dict(checkpoint["model"])
125
+ del checkpoint
126
+
127
+ # train_dataset,val_dataset = build_dataset(args, tokenizer=None, accelerator=accelerator)
128
+ train_dataset = build_t2i_control_code(args)
129
+ sampler = DistributedSampler(
130
+ train_dataset,
131
+ num_replicas=dist.get_world_size(),
132
+ rank=rank,
133
+ shuffle=True,
134
+ seed=args.global_seed
135
+ )
136
+
137
+ loader = torch.utils.data.DataLoader(
138
+ train_dataset,
139
+ shuffle=False,
140
+ collate_fn=train_dataset.collate_fn,
141
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
142
+ num_workers=args.num_workers,
143
+ pin_memory=True,
144
+ sampler=sampler,
145
+ drop_last=True
146
+ )
147
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
148
+
149
+ # Prepare models for training:
150
+ if args.gpt_ckpt:
151
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
152
+ model.load_state_dict(checkpoint["model"], strict=False)
153
+ # optimizer.load_state_dict(checkpoint["optimizer"])
154
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
155
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
156
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
157
+ del checkpoint
158
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
159
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
160
+ else:
161
+ train_steps = 0
162
+ start_epoch = 0
163
+
164
+ if not args.no_compile:
165
+ logger.info("compiling the model... (may take several minutes)")
166
+ model = torch.compile(model) # requires PyTorch 2.0
167
+ # model.zero_init_mlp()
168
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
169
+ model.train() # important! This enables embedding dropout for classifier-free guidance
170
+
171
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
172
+ # initialize a GradScaler. If enabled=False scaler is a no-op
173
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
174
+ # Variables for monitoring/logging purposes:
175
+ log_steps = 0
176
+ running_loss = 0
177
+ start_time = time.time()
178
+ get_condition = HEDdetector().to(device).eval()
179
+ logger.info(f"Training for {args.epochs} epochs...")
180
+ for epoch in range(start_epoch, args.epochs):
181
+ sampler.set_epoch(epoch)
182
+ logger.info(f"Beginning epoch {epoch}...")
183
+
184
+ for batch in loader:
185
+
186
+ x = batch['code']
187
+ image = batch['image']
188
+ caption_emb = batch['caption_emb']
189
+ condition_img = batch['control']
190
+ condition_img = 2*(condition_img - 0.5)
191
+ attn_mask = batch['attn_mask']
192
+ valid = batch['valid']
193
+ y = caption_emb
194
+ x = x.to(device, non_blocking=True)
195
+ image = image.to(device, non_blocking=True)
196
+ y = y.to(device, non_blocking=True)
197
+ condition_img = condition_img.to(device, non_blocking=True)
198
+ image = random_sample_scale(image)
199
+ with torch.no_grad():
200
+ condition_img = get_condition(image).unsqueeze(1).repeat(1,3,1,1)
201
+ condition_img = 2*(condition_img/255 - 0.5)
202
+
203
+ if args.dataset == 't2i_control':
204
+ img = 2*(image/255 - 0.5)
205
+
206
+ with torch.no_grad():
207
+ _, _, [_, _, indices] = vq_model.encode(img)
208
+ x = indices.reshape(img.shape[0], -1)
209
+ z_indices = x.reshape(x.shape[0], -1)
210
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
211
+ assert z_indices.shape[0] == c_indices.shape[0]
212
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
213
+ with torch.cuda.amp.autocast(dtype=ptdtype):
214
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :x.shape[1]+120-1,:x.shape[1]+120-1], valid=valid, condition=condition_img.to(ptdtype))
215
+ # backward pass, with gradient scaling if training in fp16
216
+ scaler.scale(loss).backward()
217
+ if args.max_grad_norm != 0.0:
218
+ scaler.unscale_(optimizer)
219
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
220
+ # step the optimizer and scaler if training in fp16
221
+ scaler.step(optimizer)
222
+ scaler.update()
223
+ # flush the gradients as soon as we can, no need for this memory anymore
224
+ optimizer.zero_grad(set_to_none=True)
225
+
226
+ # Log loss values:
227
+ running_loss += loss.item()
228
+ log_steps += 1
229
+ train_steps += 1
230
+ if train_steps % args.log_every == 0:
231
+ # Measure training speed:
232
+ torch.cuda.synchronize()
233
+ end_time = time.time()
234
+ steps_per_sec = log_steps / (end_time - start_time)
235
+ # Reduce loss history over all processes:
236
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
237
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
238
+ avg_loss = avg_loss.item() / dist.get_world_size()
239
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
240
+ # Reset monitoring variables:
241
+ running_loss = 0
242
+ log_steps = 0
243
+ start_time = time.time()
244
+
245
+ # Save checkpoint:
246
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
247
+ if rank == 0:
248
+ if not args.no_compile:
249
+ model_weight = model.module._orig_mod.state_dict()
250
+ else:
251
+ model_weight = model.module.state_dict()
252
+ checkpoint = {
253
+ "model": model_weight,
254
+ "steps": train_steps,
255
+ "args": args
256
+ }
257
+ if not args.no_local_save:
258
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
259
+ torch.save(checkpoint, checkpoint_path)
260
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
261
+
262
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
263
+ torch.save(checkpoint, cloud_checkpoint_path)
264
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
265
+ dist.barrier()
266
+
267
+ model.eval() # important! This disables randomized embedding dropout
268
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
269
+
270
+ logger.info("Done!")
271
+ dist.destroy_process_group()
272
+
273
+
274
+
275
+ if __name__ == "__main__":
276
+ parser = argparse.ArgumentParser()
277
+ parser.add_argument("--data-path", type=str, required=False)
278
+ parser.add_argument("--t5-feat-path", type=str, required=False)
279
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
280
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
281
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
282
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
283
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
284
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
285
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
286
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
287
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
288
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
289
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
290
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
291
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
292
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
293
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
294
+ parser.add_argument("--no-compile", action='store_true')
295
+ parser.add_argument("--results-dir", type=str, default="results")
296
+ parser.add_argument("--dataset", type=str, default='t2i_control')
297
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512, 768, 832, 896, 960], default=384)
298
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
299
+ parser.add_argument("--num-classes", type=int, default=1000)
300
+ parser.add_argument("--epochs", type=int, default=15)
301
+ parser.add_argument("--lr", type=float, default=1e-5)
302
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
303
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
304
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
305
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
306
+ parser.add_argument("--global-batch-size", type=int, default=16)
307
+ parser.add_argument("--global-seed", type=int, default=0)
308
+ parser.add_argument("--num-workers", type=int, default=24)
309
+ parser.add_argument("--log-every", type=int, default=100)
310
+ parser.add_argument("--ckpt-every", type=int, default=10000)
311
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
312
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
313
+
314
+ parser.add_argument("--code-path", type=str, required=True)
315
+ parser.add_argument("--code-path2", type=str, default=None)
316
+ parser.add_argument("--condition-type", type=str, choices=['segmentation', 'canny', 'hed', 'lineart', 'depth'], default="hed")
317
+ parser.add_argument("--get-image", type=bool, default=True)
318
+ parser.add_argument("--get-prompt", type=bool, default=False)
319
+ parser.add_argument("--get-label", type=bool, default=False)
320
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
321
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
322
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
323
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
324
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
325
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
326
+ parser.add_argument("--logging_dir",type=str,default="logs")
327
+ parser.add_argument("--report_to",type=str,default="wandb")
328
+ parser.add_argument("--task_name",type=str,default='segmentation')
329
+ parser.add_argument("--dataset_name",type=str,default=None)
330
+ parser.add_argument("--dataset_config_name",type=str,default=None)
331
+
332
+ parser.add_argument("--image_column", type=str, default="image", help="The column of the dataset containing the target image.")
333
+ parser.add_argument("--conditioning_image_column",type=str,default="control_seg",help="The column of the dataset containing the controlnet conditioning image.")
334
+ parser.add_argument("--caption_column",type=str,default="prompt",help="The column of the dataset containing a caption or a list of captions.")
335
+ parser.add_argument("--label_column",type=str,default=None,help="The column of the dataset containing the original labels. `seg_map` for ADE20K; `panoptic_seg_map` for COCO-Stuff.")
336
+ parser.add_argument("--max_train_samples",type=int,default=None)
337
+ parser.add_argument("--image_condition_dropout",type=float,default=0)
338
+ parser.add_argument("--text_condition_dropout",type=float,default=0)
339
+ parser.add_argument("--all_condition_dropout",type=float,default=0)
340
+
341
+ args = parser.parse_args()
342
+ main(args)
autoregressive/train/train_t2i_lineart.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ from PIL import PngImagePlugin
5
+ MaximumDecompressedSize = 1024
6
+ MegaByte = 2**20
7
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.data.distributed import DistributedSampler
15
+ from torchvision import transforms
16
+ from glob import glob
17
+ import time
18
+ import argparse
19
+ import os
20
+ import sys
21
+ current_directory = os.getcwd()
22
+ sys.path.append(current_directory)
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.logger import create_logger
25
+ from dataset.build import build_dataset
26
+ from dataset.augmentation import center_crop_arr
27
+ from autoregressive.train.train_c2i import creat_optimizer
28
+ from autoregressive.models.gpt_t2i import GPT_models
29
+ from tokenizer.tokenizer_image.vq_model import VQ_models
30
+ from accelerate.utils import ProjectConfiguration, set_seed
31
+ from pathlib import Path
32
+ from accelerate import Accelerator
33
+ from language.t5 import T5Embedder
34
+ from dataset.t2i_control import build_t2i_control_code
35
+ import torch._dynamo
36
+ from condition.hed import HEDdetector
37
+ from condition.lineart import LineArt
38
+ torch._dynamo.config.suppress_errors = True
39
+ def main(args):
40
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
41
+
42
+ # Setup DDP:
43
+ init_distributed_mode(args)
44
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
45
+ rank = dist.get_rank()
46
+ device = rank % torch.cuda.device_count()
47
+ seed = args.global_seed * dist.get_world_size() + rank
48
+ torch.manual_seed(seed)
49
+ torch.cuda.set_device(device)
50
+
51
+
52
+ # Setup an experiment folder:
53
+ if rank == 0:
54
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
55
+ experiment_index = len(glob(f"{args.results_dir}/*"))
56
+ model_string_name = args.gpt_model.replace("/", "-")
57
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
58
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
59
+ os.makedirs(checkpoint_dir, exist_ok=True)
60
+ logger = create_logger(experiment_dir)
61
+ logger.info(f"Experiment directory created at {experiment_dir}")
62
+
63
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
64
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
65
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
66
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
67
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
68
+
69
+ else:
70
+ logger = create_logger(None)
71
+
72
+ # training args
73
+ logger.info(f"{args}")
74
+
75
+ # training env
76
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
77
+
78
+
79
+ # Setup model
80
+ latent_size = args.image_size // args.downsample_size
81
+ model = GPT_models[args.gpt_model](
82
+ vocab_size=args.vocab_size,
83
+ block_size=latent_size ** 2,
84
+ num_classes=args.num_classes,
85
+ cls_token_num=args.cls_token_num,
86
+ model_type=args.gpt_type,
87
+ resid_dropout_p=args.dropout_p,
88
+ ffn_dropout_p=args.dropout_p,
89
+ token_dropout_p=args.token_dropout_p,
90
+ condition_type=args.condition_type,
91
+ ).to(device)
92
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
93
+ get_condition = LineArt()
94
+ get_condition.load_state_dict(torch.load('/data/vjuicefs_sz_cv_v2/11171709/ControlAR/condition/ckpts/model.pth', map_location=torch.device('cpu')))
95
+ get_condition.to(device)
96
+
97
+ # Setup optimizer
98
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
99
+
100
+ train_dataset = build_t2i_control_code(args)
101
+ sampler = DistributedSampler(
102
+ train_dataset,
103
+ num_replicas=dist.get_world_size(),
104
+ rank=rank,
105
+ shuffle=True,
106
+ seed=args.global_seed
107
+ )
108
+
109
+ loader = torch.utils.data.DataLoader(
110
+ train_dataset,
111
+ shuffle=False,
112
+ collate_fn=train_dataset.collate_fn,
113
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
114
+ num_workers=args.num_workers,
115
+ pin_memory=True,
116
+ sampler=sampler,
117
+ drop_last=True
118
+ )
119
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
120
+
121
+ # Prepare models for training:
122
+ if args.gpt_ckpt:
123
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
124
+ model.load_state_dict(checkpoint["model"], strict=False)
125
+ # optimizer.load_state_dict(checkpoint["optimizer"])
126
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
127
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
128
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
129
+ del checkpoint
130
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
131
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
132
+ else:
133
+ train_steps = 0
134
+ start_epoch = 0
135
+
136
+ if not args.no_compile:
137
+ logger.info("compiling the model... (may take several minutes)")
138
+ model = torch.compile(model) # requires PyTorch 2.0
139
+ # model.zero_init_mlp()
140
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
141
+ model.train() # important! This enables embedding dropout for classifier-free guidance
142
+
143
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
144
+ # initialize a GradScaler. If enabled=False scaler is a no-op
145
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
146
+ # Variables for monitoring/logging purposes:
147
+ log_steps = 0
148
+ running_loss = 0
149
+ start_time = time.time()
150
+
151
+ logger.info(f"Training for {args.epochs} epochs...")
152
+ for epoch in range(start_epoch, args.epochs):
153
+ sampler.set_epoch(epoch)
154
+ logger.info(f"Beginning epoch {epoch}...")
155
+ for batch in loader:
156
+
157
+ x = batch['code']
158
+ caption_emb = batch['caption_emb']
159
+ condition_img = batch['control']
160
+
161
+ attn_mask = batch['attn_mask']
162
+ valid = batch['valid']
163
+ y = caption_emb
164
+ x = x.to(device, non_blocking=True)
165
+ y = y.to(device, non_blocking=True)
166
+ condition_img = condition_img.to(device, non_blocking=True)
167
+ with torch.no_grad():
168
+ condition_img = get_condition(condition_img.float()).repeat(1,3,1,1)
169
+ condition_img = 2*(condition_img - 0.5)
170
+
171
+ z_indices = x.reshape(x.shape[0], -1)
172
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
173
+ assert z_indices.shape[0] == c_indices.shape[0]
174
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
175
+ with torch.cuda.amp.autocast(dtype=ptdtype):
176
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid, condition=condition_img.to(ptdtype))
177
+ # backward pass, with gradient scaling if training in fp16
178
+ scaler.scale(loss).backward()
179
+ if args.max_grad_norm != 0.0:
180
+ scaler.unscale_(optimizer)
181
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
182
+ # step the optimizer and scaler if training in fp16
183
+ scaler.step(optimizer)
184
+ scaler.update()
185
+ # flush the gradients as soon as we can, no need for this memory anymore
186
+ optimizer.zero_grad(set_to_none=True)
187
+
188
+ # Log loss values:
189
+ running_loss += loss.item()
190
+ log_steps += 1
191
+ train_steps += 1
192
+ if train_steps % args.log_every == 0:
193
+ # Measure training speed:
194
+ torch.cuda.synchronize()
195
+ end_time = time.time()
196
+ steps_per_sec = log_steps / (end_time - start_time)
197
+ # Reduce loss history over all processes:
198
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
199
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
200
+ avg_loss = avg_loss.item() / dist.get_world_size()
201
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
202
+ # Reset monitoring variables:
203
+ running_loss = 0
204
+ log_steps = 0
205
+ start_time = time.time()
206
+
207
+ # Save checkpoint:
208
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
209
+ if rank == 0:
210
+ if not args.no_compile:
211
+ model_weight = model.module._orig_mod.state_dict()
212
+ else:
213
+ model_weight = model.module.state_dict()
214
+ checkpoint = {
215
+ "model": model_weight,
216
+ "steps": train_steps,
217
+ "args": args
218
+ }
219
+ if not args.no_local_save:
220
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
221
+ torch.save(checkpoint, checkpoint_path)
222
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
223
+
224
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
225
+ torch.save(checkpoint, cloud_checkpoint_path)
226
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
227
+ dist.barrier()
228
+
229
+ model.eval() # important! This disables randomized embedding dropout
230
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
231
+
232
+ logger.info("Done!")
233
+ dist.destroy_process_group()
234
+
235
+
236
+
237
+ if __name__ == "__main__":
238
+ parser = argparse.ArgumentParser()
239
+ parser.add_argument("--data-path", type=str, required=False)
240
+ parser.add_argument("--t5-feat-path", type=str, required=False)
241
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
242
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
243
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
244
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
245
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
246
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
247
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
248
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
249
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
250
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
251
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
252
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
253
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
254
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
255
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
256
+ parser.add_argument("--no-compile", action='store_true')
257
+ parser.add_argument("--results-dir", type=str, default="results")
258
+ parser.add_argument("--dataset", type=str, default='t2i_control')
259
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
260
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
261
+ parser.add_argument("--num-classes", type=int, default=1000)
262
+ parser.add_argument("--epochs", type=int, default=3)
263
+ parser.add_argument("--lr", type=float, default=5e-5)
264
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
265
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
266
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
267
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
268
+ parser.add_argument("--global-batch-size", type=int, default=88)
269
+ parser.add_argument("--global-seed", type=int, default=0)
270
+ parser.add_argument("--num-workers", type=int, default=24)
271
+ parser.add_argument("--log-every", type=int, default=100)
272
+ parser.add_argument("--ckpt-every", type=int, default=10000)
273
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
274
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
275
+
276
+ parser.add_argument("--condition-type", type=str, choices=['canny', 'hed', 'lineart', 'depth'], default="lineart")
277
+ parser.add_argument("--code-path", type=str, required=True)
278
+ parser.add_argument("--code-path2", type=str, default=None)
279
+ parser.add_argument("--get-image", type=bool, default=True)
280
+ parser.add_argument("--get-prompt", type=bool, default=False)
281
+ parser.add_argument("--get-label", type=bool, default=False)
282
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
283
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
284
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
285
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
286
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
287
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
288
+ parser.add_argument("--logging_dir",type=str,default="logs")
289
+ args = parser.parse_args()
290
+ main(args)
autoregressive/train/train_t2i_lineart_multiscale.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+ from PIL import PngImagePlugin
7
+ MaximumDecompressedSize = 1024
8
+ MegaByte = 2**20
9
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
10
+ import torch
11
+ torch.backends.cuda.matmul.allow_tf32 = True
12
+ torch.backends.cudnn.allow_tf32 = True
13
+ import torch.distributed as dist
14
+ from torch.nn.parallel import DistributedDataParallel as DDP
15
+ from torch.utils.data import DataLoader
16
+ from torch.utils.data.distributed import DistributedSampler
17
+ from torchvision import transforms
18
+ from glob import glob
19
+ import time
20
+ import argparse
21
+ import os
22
+ import sys
23
+ current_directory = os.getcwd()
24
+ sys.path.append(current_directory)
25
+ from utils.distributed import init_distributed_mode
26
+ from utils.logger import create_logger
27
+ from dataset.build import build_dataset
28
+ from dataset.augmentation import center_crop_arr
29
+ from autoregressive.train.train_c2i import creat_optimizer
30
+
31
+ from autoregressive.models.gpt_t2i import GPT_models
32
+ from tokenizer.tokenizer_image.vq_model import VQ_models
33
+ from accelerate.utils import ProjectConfiguration, set_seed
34
+ from pathlib import Path
35
+ from accelerate import Accelerator
36
+ from language.t5 import T5Embedder
37
+ from dataset.t2i_control import build_t2i_control_code
38
+ import torch._dynamo
39
+ torch._dynamo.config.suppress_errors = True
40
+ import random
41
+ import torch.nn.functional as F
42
+ from condition.hed import HEDdetector
43
+ from condition.lineart import LineArt
44
+ def random_sample_scale(image, condition=None):
45
+ H = np.arange(384, 1024+16, 16)
46
+ W = np.arange(384, 1024+16, 16)
47
+ resolution = [1024,1024]
48
+ while resolution[0]//16+resolution[1]//16 > 2304:
49
+ resolution = [random.choice(H), random.choice(W)]
50
+ assert resolution[0]//16+resolution[1]//16 <= 2304
51
+ image = F.interpolate(image.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
52
+ if condition is not None:
53
+ condition = F.interpolate(condition.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
54
+ return image, condition
55
+ return image
56
+
57
+
58
+ def main(args):
59
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
60
+
61
+ # Setup DDP:
62
+ init_distributed_mode(args)
63
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
64
+ rank = dist.get_rank()
65
+ device = rank % torch.cuda.device_count()
66
+ seed = args.global_seed * dist.get_world_size() + rank
67
+ torch.manual_seed(seed)
68
+ torch.cuda.set_device(device)
69
+
70
+
71
+ # Setup an experiment folder:
72
+ if rank == 0:
73
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
74
+ experiment_index = len(glob(f"{args.results_dir}/*"))
75
+ model_string_name = args.gpt_model.replace("/", "-")
76
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
77
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
78
+ os.makedirs(checkpoint_dir, exist_ok=True)
79
+ logger = create_logger(experiment_dir)
80
+ logger.info(f"Experiment directory created at {experiment_dir}")
81
+
82
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
83
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
84
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
85
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
86
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
87
+
88
+ else:
89
+ logger = create_logger(None)
90
+
91
+ # training args
92
+ logger.info(f"{args}")
93
+
94
+ # training env
95
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
96
+
97
+
98
+ # Setup model
99
+ latent_size = args.image_size // args.downsample_size
100
+ model = GPT_models[args.gpt_model](
101
+ vocab_size=args.vocab_size,
102
+ block_size=latent_size ** 2,
103
+ num_classes=args.num_classes,
104
+ cls_token_num=args.cls_token_num,
105
+ model_type=args.gpt_type,
106
+ resid_dropout_p=args.dropout_p,
107
+ ffn_dropout_p=args.dropout_p,
108
+ token_dropout_p=args.token_dropout_p,
109
+ condition_type=args.condition_type,
110
+ ).to(device)
111
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
112
+ get_condition = LineArt()
113
+ get_condition.load_state_dict(torch.load('/data/vjuicefs_sz_cv_v2/11171709/ControlAR/condition/ckpts/model.pth', map_location=torch.device('cpu')))
114
+ get_condition.to(device)
115
+
116
+ # Setup optimizer
117
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
118
+
119
+ # Setup data:
120
+ if args.dataset == 't2i_control': # create and load model
121
+ vq_model = VQ_models[args.vq_model](
122
+ codebook_size=args.codebook_size,
123
+ codebook_embed_dim=args.codebook_embed_dim)
124
+ vq_model.to(device)
125
+ vq_model.eval()
126
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
127
+ vq_model.load_state_dict(checkpoint["model"])
128
+ del checkpoint
129
+
130
+ train_dataset = build_t2i_control_code(args)
131
+ sampler = DistributedSampler(
132
+ train_dataset,
133
+ num_replicas=dist.get_world_size(),
134
+ rank=rank,
135
+ shuffle=True,
136
+ seed=args.global_seed
137
+ )
138
+
139
+ loader = torch.utils.data.DataLoader(
140
+ train_dataset,
141
+ shuffle=False,
142
+ collate_fn=train_dataset.collate_fn,
143
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
144
+ num_workers=args.num_workers,
145
+ pin_memory=True,
146
+ sampler=sampler,
147
+ drop_last=True
148
+ )
149
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
150
+
151
+ # Prepare models for training:
152
+ if args.gpt_ckpt:
153
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
154
+ model.load_state_dict(checkpoint["model"], strict=False)
155
+ # optimizer.load_state_dict(checkpoint["optimizer"])
156
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
157
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
158
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
159
+ del checkpoint
160
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
161
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
162
+ else:
163
+ train_steps = 0
164
+ start_epoch = 0
165
+
166
+ if not args.no_compile:
167
+ logger.info("compiling the model... (may take several minutes)")
168
+ model = torch.compile(model) # requires PyTorch 2.0
169
+ # model.zero_init_mlp()
170
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
171
+ model.train() # important! This enables embedding dropout for classifier-free guidance
172
+
173
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
174
+ # initialize a GradScaler. If enabled=False scaler is a no-op
175
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
176
+ # Variables for monitoring/logging purposes:
177
+ log_steps = 0
178
+ running_loss = 0
179
+ start_time = time.time()
180
+ # get_condition = HEDdetector().to(device).eval()
181
+ logger.info(f"Training for {args.epochs} epochs...")
182
+ for epoch in range(start_epoch, args.epochs):
183
+ sampler.set_epoch(epoch)
184
+ logger.info(f"Beginning epoch {epoch}...")
185
+ for batch in loader:
186
+
187
+ x = batch['code']
188
+ image = batch['image']
189
+ caption_emb = batch['caption_emb']
190
+ condition_img = batch['control']
191
+ condition_img = 2*(condition_img - 0.5)
192
+ attn_mask = batch['attn_mask']
193
+ valid = batch['valid']
194
+ y = caption_emb
195
+ x = x.to(device, non_blocking=True)
196
+ image = image.to(device, non_blocking=True)
197
+ y = y.to(device, non_blocking=True)
198
+ condition_img = condition_img.to(device, non_blocking=True)
199
+ image = random_sample_scale(image)
200
+ with torch.no_grad():
201
+ condition_img = get_condition(image.float()).repeat(1,3,1,1)
202
+ condition_img = 2*(condition_img - 0.5)
203
+
204
+ if args.dataset == 't2i_control':
205
+ img = 2*(image/255 - 0.5)
206
+
207
+ with torch.no_grad():
208
+ _, _, [_, _, indices] = vq_model.encode(img)
209
+ x = indices.reshape(img.shape[0], -1)
210
+ z_indices = x.reshape(x.shape[0], -1)
211
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
212
+ assert z_indices.shape[0] == c_indices.shape[0]
213
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
214
+ with torch.cuda.amp.autocast(dtype=ptdtype):
215
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :x.shape[1]+120-1,:x.shape[1]+120-1], valid=valid, condition=condition_img.to(ptdtype))
216
+ # backward pass, with gradient scaling if training in fp16
217
+ scaler.scale(loss).backward()
218
+ if args.max_grad_norm != 0.0:
219
+ scaler.unscale_(optimizer)
220
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
221
+ # step the optimizer and scaler if training in fp16
222
+ scaler.step(optimizer)
223
+ scaler.update()
224
+ # flush the gradients as soon as we can, no need for this memory anymore
225
+ optimizer.zero_grad(set_to_none=True)
226
+
227
+ # Log loss values:
228
+ running_loss += loss.item()
229
+ log_steps += 1
230
+ train_steps += 1
231
+ if train_steps % args.log_every == 0:
232
+ # Measure training speed:
233
+ torch.cuda.synchronize()
234
+ end_time = time.time()
235
+ steps_per_sec = log_steps / (end_time - start_time)
236
+ # Reduce loss history over all processes:
237
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
238
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
239
+ avg_loss = avg_loss.item() / dist.get_world_size()
240
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
241
+ # Reset monitoring variables:
242
+ running_loss = 0
243
+ log_steps = 0
244
+ start_time = time.time()
245
+
246
+ # Save checkpoint:
247
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
248
+ if rank == 0:
249
+ if not args.no_compile:
250
+ model_weight = model.module._orig_mod.state_dict()
251
+ else:
252
+ model_weight = model.module.state_dict()
253
+ checkpoint = {
254
+ "model": model_weight,
255
+ "steps": train_steps,
256
+ "args": args
257
+ }
258
+ if not args.no_local_save:
259
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
260
+ torch.save(checkpoint, checkpoint_path)
261
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
262
+
263
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
264
+ torch.save(checkpoint, cloud_checkpoint_path)
265
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
266
+ dist.barrier()
267
+
268
+ model.eval() # important! This disables randomized embedding dropout
269
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
270
+
271
+ logger.info("Done!")
272
+ dist.destroy_process_group()
273
+
274
+
275
+
276
+ if __name__ == "__main__":
277
+ parser = argparse.ArgumentParser()
278
+ parser.add_argument("--data-path", type=str, required=False)
279
+ parser.add_argument("--t5-feat-path", type=str, required=False)
280
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
281
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
282
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
283
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
284
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
285
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
286
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
287
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
288
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
289
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
290
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
291
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
292
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
293
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
294
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
295
+ parser.add_argument("--no-compile", action='store_true')
296
+ parser.add_argument("--results-dir", type=str, default="results")
297
+ parser.add_argument("--dataset", type=str, default='t2i_control')
298
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512, 768, 832, 896, 960], default=384)
299
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
300
+ parser.add_argument("--num-classes", type=int, default=1000)
301
+ parser.add_argument("--epochs", type=int, default=15)
302
+ parser.add_argument("--lr", type=float, default=1e-5)
303
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
304
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
305
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
306
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
307
+ parser.add_argument("--global-batch-size", type=int, default=16)
308
+ parser.add_argument("--global-seed", type=int, default=0)
309
+ parser.add_argument("--num-workers", type=int, default=24)
310
+ parser.add_argument("--log-every", type=int, default=100)
311
+ parser.add_argument("--ckpt-every", type=int, default=30000)
312
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
313
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
314
+
315
+ parser.add_argument("--code-path", type=str, required=True)
316
+ parser.add_argument("--code-path2", type=str, default=None)
317
+ parser.add_argument("--condition-type", type=str, choices=['segmentation', 'canny', 'hed', 'lineart', 'depth'], default="lineart")
318
+ parser.add_argument("--get-image", type=bool, default=True)
319
+ parser.add_argument("--get-prompt", type=bool, default=False)
320
+ parser.add_argument("--get-label", type=bool, default=False)
321
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
322
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
323
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
324
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
325
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
326
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
327
+ parser.add_argument("--logging_dir",type=str,default="logs")
328
+ parser.add_argument("--report_to",type=str,default="wandb")
329
+ parser.add_argument("--task_name",type=str,default='segmentation')
330
+ parser.add_argument("--dataset_name",type=str,default=None)
331
+ parser.add_argument("--dataset_config_name",type=str,default=None)
332
+
333
+ parser.add_argument("--image_column", type=str, default="image", help="The column of the dataset containing the target image.")
334
+ parser.add_argument("--conditioning_image_column",type=str,default="control_seg",help="The column of the dataset containing the controlnet conditioning image.")
335
+ parser.add_argument("--caption_column",type=str,default="prompt",help="The column of the dataset containing a caption or a list of captions.")
336
+ parser.add_argument("--label_column",type=str,default=None,help="The column of the dataset containing the original labels. `seg_map` for ADE20K; `panoptic_seg_map` for COCO-Stuff.")
337
+ parser.add_argument("--max_train_samples",type=int,default=None)
338
+ parser.add_argument("--image_condition_dropout",type=float,default=0)
339
+ parser.add_argument("--text_condition_dropout",type=float,default=0)
340
+ parser.add_argument("--all_condition_dropout",type=float,default=0)
341
+
342
+ args = parser.parse_args()
343
+ main(args)
autoregressive/train/train_t2i_seg.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ from PIL import PngImagePlugin
5
+ MaximumDecompressedSize = 1024
6
+ MegaByte = 2**20
7
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.nn.parallel import DistributedDataParallel as DDP
13
+ from torch.utils.data import DataLoader
14
+ from torch.utils.data.distributed import DistributedSampler
15
+ from torchvision import transforms
16
+ from glob import glob
17
+ import time
18
+ import argparse
19
+ import os
20
+ import sys
21
+ current_directory = os.getcwd()
22
+ sys.path.append(current_directory)
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.logger import create_logger
25
+ from dataset.build import build_dataset
26
+ from dataset.augmentation import center_crop_arr
27
+ from autoregressive.train.train_c2i import creat_optimizer
28
+ from torch.optim.lr_scheduler import StepLR
29
+
30
+ from autoregressive.models.gpt_t2i import GPT_models
31
+ from tokenizer.tokenizer_image.vq_model import VQ_models
32
+ from accelerate.utils import ProjectConfiguration, set_seed
33
+ from pathlib import Path
34
+ from accelerate import Accelerator
35
+ from language.t5 import T5Embedder
36
+ from dataset.t2i_control import build_t2i_control_code
37
+ import torch._dynamo
38
+ torch._dynamo.config.suppress_errors = True
39
+ def main(args):
40
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
41
+
42
+ # Setup DDP:
43
+ init_distributed_mode(args)
44
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
45
+ rank = dist.get_rank()
46
+ device = rank % torch.cuda.device_count()
47
+ seed = args.global_seed * dist.get_world_size() + rank
48
+ torch.manual_seed(seed)
49
+ torch.cuda.set_device(device)
50
+
51
+
52
+ # Setup an experiment folder:
53
+ if rank == 0:
54
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
55
+ experiment_index = len(glob(f"{args.results_dir}/*"))
56
+ model_string_name = args.gpt_model.replace("/", "-")
57
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
58
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
59
+ os.makedirs(checkpoint_dir, exist_ok=True)
60
+ logger = create_logger(experiment_dir)
61
+ logger.info(f"Experiment directory created at {experiment_dir}")
62
+
63
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
64
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
65
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
66
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
67
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
68
+
69
+ else:
70
+ logger = create_logger(None)
71
+
72
+ # training args
73
+ logger.info(f"{args}")
74
+
75
+ # training env
76
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
77
+
78
+
79
+ # Setup model
80
+ latent_size = args.image_size // args.downsample_size
81
+ model = GPT_models[args.gpt_model](
82
+ vocab_size=args.vocab_size,
83
+ block_size=latent_size ** 2,
84
+ num_classes=args.num_classes,
85
+ cls_token_num=args.cls_token_num,
86
+ model_type=args.gpt_type,
87
+ resid_dropout_p=args.dropout_p,
88
+ ffn_dropout_p=args.dropout_p,
89
+ token_dropout_p=args.token_dropout_p,
90
+ adapter_size=args.adapter_size,
91
+ condition_type=args.condition_type,
92
+ ).to(device)
93
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
94
+
95
+ # Setup optimizer
96
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
97
+ lr_scheduler = StepLR(optimizer, step_size=1, gamma=0.9) # 每10个epoch后,学习率乘以0.1
98
+
99
+ # 加载数据集
100
+ train_dataset = build_t2i_control_code(args)
101
+ sampler = DistributedSampler(
102
+ train_dataset,
103
+ num_replicas=dist.get_world_size(),
104
+ rank=rank,
105
+ shuffle=True,
106
+ seed=args.global_seed
107
+ )
108
+
109
+ loader = torch.utils.data.DataLoader(
110
+ train_dataset,
111
+ shuffle=False,
112
+ collate_fn=train_dataset.collate_fn,
113
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
114
+ num_workers=args.num_workers,
115
+ pin_memory=True,
116
+ sampler=sampler,
117
+ drop_last=True
118
+ )
119
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
120
+
121
+ # Prepare models for training:
122
+ if args.gpt_ckpt:
123
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
124
+ model.load_state_dict(checkpoint["model"], strict=False)
125
+ # optimizer.load_state_dict(checkpoint["optimizer"])
126
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
127
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
128
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
129
+ del checkpoint
130
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
131
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
132
+ else:
133
+ train_steps = 0
134
+ start_epoch = 0
135
+
136
+ if not args.no_compile:
137
+ logger.info("compiling the model... (may take several minutes)")
138
+ model = torch.compile(model) # requires PyTorch 2.0
139
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
140
+ model.train() # important! This enables embedding dropout for classifier-free guidance
141
+
142
+
143
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
144
+ # initialize a GradScaler. If enabled=False scaler is a no-op
145
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
146
+ # Variables for monitoring/logging purposes:
147
+ log_steps = 0
148
+ running_loss = 0
149
+ start_time = time.time()
150
+
151
+ logger.info(f"Training for {args.epochs} epochs...")
152
+ for epoch in range(start_epoch, args.epochs):
153
+ sampler.set_epoch(epoch)# 设定采样器的 epoch
154
+ logger.info(f"Beginning epoch {epoch}...")
155
+ # 载入数据
156
+ for batch in loader:
157
+
158
+ x = batch['code']# 生成的图像的 VQ 代码
159
+ caption_emb = batch['caption_emb']# 文本 T5 Embedding
160
+ condition_img = batch['control'] # 条件图像(如 Canny Edge)
161
+
162
+ attn_mask = batch['attn_mask']# 注意力掩码
163
+ valid = batch['valid'] # 有效性标记
164
+ y = caption_emb # `y` 作为 `GPT` 的 condition
165
+
166
+ #将数据移到GPU
167
+ x = x.to(device, non_blocking=True)
168
+ y = y.to(device, non_blocking=True)
169
+ condition_img = condition_img.to(device, non_blocking=True)
170
+
171
+ # 调整数据形状 展平
172
+ z_indices = x.reshape(x.shape[0], -1)
173
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
174
+ assert z_indices.shape[0] == c_indices.shape[0]
175
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
176
+
177
+ # 前向传播
178
+ with torch.cuda.amp.autocast(dtype=ptdtype):
179
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :-1,:-1], valid=valid, condition=condition_img.to(ptdtype))
180
+
181
+ # 反向传播 backward pass, with gradient scaling if training in fp16
182
+ scaler.scale(loss).backward() # 计算梯度
183
+ if args.max_grad_norm != 0.0:
184
+ scaler.unscale_(optimizer)
185
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
186
+
187
+ # 更新模型参数 step the optimizer and scaler if training in fp16
188
+ scaler.step(optimizer)
189
+ scaler.update()
190
+ # flush the gradients as soon as we can, no need for this memory anymore
191
+ optimizer.zero_grad(set_to_none=True)
192
+
193
+ # 记录训练损失 Log loss values:
194
+ running_loss += loss.item()
195
+ log_steps += 1
196
+ train_steps += 1
197
+ if train_steps % args.log_every == 0:
198
+ # Measure training speed:
199
+ torch.cuda.synchronize()
200
+ end_time = time.time()
201
+ steps_per_sec = log_steps / (end_time - start_time)
202
+ # Reduce loss history over all processes:
203
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
204
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
205
+ avg_loss = avg_loss.item() / dist.get_world_size()
206
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
207
+ # Reset monitoring variables:
208
+ running_loss = 0
209
+ log_steps = 0
210
+ start_time = time.time()
211
+
212
+ # Save checkpoint:
213
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
214
+ if rank == 0:
215
+ if not args.no_compile:
216
+ model_weight = model.module._orig_mod.state_dict()
217
+ else:
218
+ model_weight = model.module.state_dict()
219
+ checkpoint = {
220
+ "model": model_weight,
221
+ "steps": train_steps,
222
+ "args": args
223
+ }
224
+ if not args.no_local_save:
225
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
226
+ torch.save(checkpoint, checkpoint_path)
227
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
228
+
229
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
230
+ torch.save(checkpoint, cloud_checkpoint_path)
231
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
232
+ dist.barrier()
233
+ lr_scheduler.step()
234
+
235
+ for param_group in optimizer.param_groups:
236
+ print(f"Epoch {epoch + 1}, LR: {param_group['lr']}")
237
+ model.eval() # important! This disables randomized embedding dropout
238
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
239
+
240
+ logger.info("Done!")
241
+ dist.destroy_process_group()
242
+
243
+
244
+
245
+ if __name__ == "__main__":
246
+ parser = argparse.ArgumentParser()
247
+ parser.add_argument("--data-path", type=str, required=False)
248
+ parser.add_argument("--t5-feat-path", type=str, required=False)
249
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
250
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
251
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
252
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
253
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
254
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
255
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
256
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
257
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
258
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
259
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
260
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
261
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
262
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
263
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
264
+ parser.add_argument("--no-compile", action='store_true')
265
+ parser.add_argument("--results-dir", type=str, default="results")
266
+ parser.add_argument("--dataset", type=str, default='t2i_control')
267
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
268
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
269
+ parser.add_argument("--num-classes", type=int, default=1000)
270
+ parser.add_argument("--epochs", type=int, default=20)
271
+ parser.add_argument("--lr", type=float, default=5e-5)
272
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
273
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
274
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
275
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
276
+ parser.add_argument("--global-batch-size", type=int, default=96)
277
+ parser.add_argument("--global-seed", type=int, default=0)
278
+ parser.add_argument("--num-workers", type=int, default=24)
279
+ parser.add_argument("--log-every", type=int, default=100)
280
+ parser.add_argument("--ckpt-every", type=int, default=1443)
281
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
282
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
283
+
284
+
285
+ parser.add_argument("--condition-type", type=str, choices=['segmentation', 'canny', 'hed', 'lineart', 'depth'], default="segmentation")
286
+ parser.add_argument("--code-path", type=str, required=True)
287
+ parser.add_argument("--code-path2", type=str, default=None)
288
+ parser.add_argument("--adapter-size", type=str, default='small')
289
+ parser.add_argument("--get-image", type=bool, default=False)
290
+ parser.add_argument("--get-prompt", type=bool, default=False)
291
+ parser.add_argument("--get-label", type=bool, default=False)
292
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
293
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
294
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
295
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
296
+
297
+ args = parser.parse_args()
298
+ main(args)
autoregressive/train/train_t2i_seg_multiscale.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+ from PIL import PngImagePlugin
7
+ MaximumDecompressedSize = 1024
8
+ MegaByte = 2**20
9
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
10
+ import torch
11
+ torch.backends.cuda.matmul.allow_tf32 = True
12
+ torch.backends.cudnn.allow_tf32 = True
13
+ import torch.distributed as dist
14
+ from torch.nn.parallel import DistributedDataParallel as DDP
15
+ from torch.utils.data import DataLoader
16
+ from torch.utils.data.distributed import DistributedSampler
17
+ from torchvision import transforms
18
+ from glob import glob
19
+ import time
20
+ import argparse
21
+ import os
22
+ import sys
23
+ current_directory = os.getcwd()
24
+ sys.path.append(current_directory)
25
+ from utils.distributed import init_distributed_mode
26
+ from utils.logger import create_logger
27
+ from dataset.build import build_dataset
28
+ from dataset.augmentation import center_crop_arr
29
+ from autoregressive.train.train_c2i import creat_optimizer
30
+
31
+ from autoregressive.models.gpt_t2i import GPT_models
32
+ from tokenizer.tokenizer_image.vq_model import VQ_models
33
+ from accelerate.utils import ProjectConfiguration, set_seed
34
+ from pathlib import Path
35
+ from accelerate import Accelerator
36
+ from language.t5 import T5Embedder
37
+ from dataset.t2i_control import build_t2i_control_code
38
+ import torch._dynamo
39
+ torch._dynamo.config.suppress_errors = True
40
+ import random
41
+ import torch.nn.functional as F
42
+ from condition.hed import HEDdetector
43
+ def random_sample_scale(image, condition=None):
44
+ H = np.arange(384, 1024+16, 16)
45
+ W = np.arange(384, 1024+16, 16)
46
+ resolution = [1024,1024]
47
+ while resolution[0]//16+resolution[1]//16 > 2304:
48
+ resolution = [random.choice(H), random.choice(W)]
49
+ assert resolution[0]//16+resolution[1]//16 <= 2304
50
+ image = F.interpolate(image.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
51
+ if condition is not None:
52
+ condition = F.interpolate(condition.to(torch.float32), size=resolution, mode='bilinear', align_corners=False, antialias=True)
53
+ return image, condition
54
+ return image
55
+
56
+
57
+ def main(args):
58
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
59
+
60
+ # Setup DDP:
61
+ init_distributed_mode(args)
62
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
63
+ rank = dist.get_rank()
64
+ device = rank % torch.cuda.device_count()
65
+ seed = args.global_seed * dist.get_world_size() + rank
66
+ torch.manual_seed(seed)
67
+ torch.cuda.set_device(device)
68
+
69
+ # Setup an experiment folder:
70
+ if rank == 0:
71
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
72
+ experiment_index = len(glob(f"{args.results_dir}/*"))
73
+ model_string_name = args.gpt_model.replace("/", "-")
74
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}"
75
+ checkpoint_dir = f"{experiment_dir}/checkpoints"
76
+ os.makedirs(checkpoint_dir, exist_ok=True)
77
+ logger = create_logger(experiment_dir)
78
+ logger.info(f"Experiment directory created at {experiment_dir}")
79
+
80
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
81
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
82
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
83
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
84
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
85
+
86
+ else:
87
+ logger = create_logger(None)
88
+
89
+ # training args
90
+ logger.info(f"{args}")
91
+
92
+ # training env
93
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
94
+
95
+
96
+ # Setup model
97
+ latent_size = args.image_size // args.downsample_size
98
+ model = GPT_models[args.gpt_model](
99
+ vocab_size=args.vocab_size,
100
+ block_size=latent_size ** 2,
101
+ num_classes=args.num_classes,
102
+ cls_token_num=args.cls_token_num,
103
+ model_type=args.gpt_type,
104
+ resid_dropout_p=args.dropout_p,
105
+ ffn_dropout_p=args.dropout_p,
106
+ token_dropout_p=args.token_dropout_p,
107
+ condition_type=args.condition_type,
108
+ ).to(device)
109
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
110
+
111
+
112
+ # Setup optimizer
113
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
114
+
115
+ # Setup data:
116
+ if args.dataset == 't2i_control': # create and load model
117
+ vq_model = VQ_models[args.vq_model](
118
+ codebook_size=args.codebook_size,
119
+ codebook_embed_dim=args.codebook_embed_dim)
120
+ vq_model.to(device)
121
+ vq_model.eval()
122
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
123
+ vq_model.load_state_dict(checkpoint["model"])
124
+ del checkpoint
125
+ train_dataset = build_t2i_control_code(args)
126
+ sampler = DistributedSampler(
127
+ train_dataset,
128
+ num_replicas=dist.get_world_size(),
129
+ rank=rank,
130
+ shuffle=True,
131
+ seed=args.global_seed
132
+ )
133
+
134
+ loader = torch.utils.data.DataLoader(
135
+ train_dataset,
136
+ shuffle=False,
137
+ collate_fn=train_dataset.collate_fn,
138
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
139
+ num_workers=args.num_workers,
140
+ pin_memory=True,
141
+ sampler=sampler,
142
+ drop_last=True
143
+ )
144
+ logger.info(f"Dataset contains {len(train_dataset):,} images")
145
+
146
+ # Prepare models for training:
147
+ if args.gpt_ckpt:
148
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
149
+ model.load_state_dict(checkpoint["model"], strict=False)
150
+ # optimizer.load_state_dict(checkpoint["optimizer"])
151
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
152
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
153
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
154
+ del checkpoint
155
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
156
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
157
+ else:
158
+ train_steps = 0
159
+ start_epoch = 0
160
+
161
+ if not args.no_compile:
162
+ logger.info("compiling the model... (may take several minutes)")
163
+ model = torch.compile(model) # requires PyTorch 2.0
164
+ model = DDP(model.to(device), device_ids=[args.gpu], find_unused_parameters=True)
165
+ model.train() # important! This enables embedding dropout for classifier-free guidance
166
+
167
+
168
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
169
+ # initialize a GradScaler. If enabled=False scaler is a no-op
170
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
171
+ # Variables for monitoring/logging purposes:
172
+ log_steps = 0
173
+ running_loss = 0
174
+ start_time = time.time()
175
+ get_condition = HEDdetector().to(device).eval()
176
+ logger.info(f"Training for {args.epochs} epochs...")
177
+ for epoch in range(start_epoch, args.epochs):
178
+ sampler.set_epoch(epoch)
179
+ logger.info(f"Beginning epoch {epoch}...")
180
+ for batch in loader:
181
+
182
+ x = batch['code']
183
+ image = batch['image']
184
+ caption_emb = batch['caption_emb']
185
+ condition_img = batch['control']
186
+ condition_img = 2*(condition_img - 0.5)
187
+ attn_mask = batch['attn_mask']
188
+ valid = batch['valid']
189
+ y = caption_emb
190
+ x = x.to(device, non_blocking=True)
191
+ image = image.to(device, non_blocking=True)
192
+ y = y.to(device, non_blocking=True)
193
+ condition_img = condition_img.to(device, non_blocking=True)
194
+ with torch.no_grad():
195
+ condition_img = get_condition(image).unsqueeze(1).repeat(1,3,1,1)
196
+ condition_img = 2*(condition_img/255 - 0.5)
197
+ image, condition_img = random_sample_scale(image, condition_img)
198
+
199
+ if args.dataset == 't2i_control':
200
+ img = 2*(image/255 - 0.5)
201
+
202
+ with torch.no_grad():
203
+ _, _, [_, _, indices] = vq_model.encode(img)
204
+ x = indices.reshape(img.shape[0], -1)
205
+ z_indices = x.reshape(x.shape[0], -1)
206
+ c_indices = y.reshape(y.shape[0], y.shape[-2], y.shape[-1])
207
+ assert z_indices.shape[0] == c_indices.shape[0]
208
+ attn_mask = attn_mask.reshape(attn_mask.shape[0], 1, attn_mask.shape[-2], attn_mask.shape[-1]) # (bs, n_head, seq_len, seq_len)
209
+ with torch.cuda.amp.autocast(dtype=ptdtype):
210
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, mask=attn_mask[:, :, :x.shape[1]+120-1,:x.shape[1]+120-1], valid=valid, condition=condition_img.to(ptdtype))
211
+ # backward pass, with gradient scaling if training in fp16
212
+ scaler.scale(loss).backward()
213
+ if args.max_grad_norm != 0.0:
214
+ scaler.unscale_(optimizer)
215
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
216
+ # step the optimizer and scaler if training in fp16
217
+ scaler.step(optimizer)
218
+ scaler.update()
219
+ # flush the gradients as soon as we can, no need for this memory anymore
220
+ optimizer.zero_grad(set_to_none=True)
221
+
222
+ # Log loss values:
223
+ running_loss += loss.item()
224
+ log_steps += 1
225
+ train_steps += 1
226
+ if train_steps % args.log_every == 0:
227
+ # Measure training speed:
228
+ torch.cuda.synchronize()
229
+ end_time = time.time()
230
+ steps_per_sec = log_steps / (end_time - start_time)
231
+ # Reduce loss history over all processes:
232
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
233
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
234
+ avg_loss = avg_loss.item() / dist.get_world_size()
235
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
236
+ # Reset monitoring variables:
237
+ running_loss = 0
238
+ log_steps = 0
239
+ start_time = time.time()
240
+
241
+ # Save checkpoint:
242
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
243
+ if rank == 0:
244
+ if not args.no_compile:
245
+ model_weight = model.module._orig_mod.state_dict()
246
+ else:
247
+ model_weight = model.module.state_dict()
248
+ checkpoint = {
249
+ "model": model_weight,
250
+ "steps": train_steps,
251
+ "args": args
252
+ }
253
+ if not args.no_local_save:
254
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
255
+ torch.save(checkpoint, checkpoint_path)
256
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
257
+
258
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
259
+ torch.save(checkpoint, cloud_checkpoint_path)
260
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
261
+ dist.barrier()
262
+
263
+ model.eval() # important! This disables randomized embedding dropout
264
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
265
+
266
+ logger.info("Done!")
267
+ dist.destroy_process_group()
268
+
269
+
270
+
271
+ if __name__ == "__main__":
272
+ parser = argparse.ArgumentParser()
273
+ parser.add_argument("--data-path", type=str, required=False)
274
+ parser.add_argument("--t5-feat-path", type=str, required=False)
275
+ parser.add_argument("--short-t5-feat-path", type=str, default=None, help="short caption of t5_feat_path")
276
+ parser.add_argument("--cloud-save-path", type=str, required=False, help='please specify a cloud disk path, if not, local path')
277
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
278
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
279
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
280
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
281
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
282
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
283
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
284
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i")
285
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
286
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
287
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
288
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
289
+ parser.add_argument("--drop-path", type=float, default=0.0, help="drop_path_rate of attention and ffn")
290
+ parser.add_argument("--no-compile", action='store_true')
291
+ parser.add_argument("--results-dir", type=str, default="results")
292
+ parser.add_argument("--dataset", type=str, default='t2i_control')
293
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384)
294
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
295
+ parser.add_argument("--num-classes", type=int, default=1000)
296
+ parser.add_argument("--epochs", type=int, default=50)
297
+ parser.add_argument("--lr", type=float, default=5e-5)
298
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
299
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
300
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
301
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
302
+ parser.add_argument("--global-batch-size", type=int, default=64)
303
+ parser.add_argument("--global-seed", type=int, default=0)
304
+ parser.add_argument("--num-workers", type=int, default=24)
305
+ parser.add_argument("--log-every", type=int, default=100)
306
+ parser.add_argument("--ckpt-every", type=int, default=2000)
307
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
308
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
309
+
310
+ parser.add_argument("--code-path", type=str, required=True)
311
+ parser.add_argument("--code-path2", type=str, default=None)
312
+ parser.add_argument("--condition-type", type=str, choices=['segmentation', 'canny', 'hed', 'lineart', 'depth'], default="segmentation")
313
+ parser.add_argument("--get-image", type=bool, default=True)
314
+ parser.add_argument("--get-prompt", type=bool, default=False)
315
+ parser.add_argument("--get-label", type=bool, default=False)
316
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
317
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
318
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
319
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
320
+ parser.add_argument("--keep_in_memory",type=bool,default=False)
321
+ parser.add_argument("--wrong_ids_file",type=str,default=None)
322
+ parser.add_argument("--logging_dir",type=str,default="logs")
323
+ parser.add_argument("--report_to",type=str,default="wandb")
324
+ parser.add_argument("--task_name",type=str,default='segmentation')
325
+ parser.add_argument("--dataset_name",type=str,default=None)
326
+ parser.add_argument("--dataset_config_name",type=str,default=None)
327
+
328
+ parser.add_argument("--image_column", type=str, default="image", help="The column of the dataset containing the target image.")
329
+ parser.add_argument("--conditioning_image_column",type=str,default="control_seg",help="The column of the dataset containing the controlnet conditioning image.")
330
+ parser.add_argument("--caption_column",type=str,default="prompt",help="The column of the dataset containing a caption or a list of captions.")
331
+ parser.add_argument("--label_column",type=str,default=None,help="The column of the dataset containing the original labels. `seg_map` for ADE20K; `panoptic_seg_map` for COCO-Stuff.")
332
+ parser.add_argument("--max_train_samples",type=int,default=None)
333
+ parser.add_argument("--image_condition_dropout",type=float,default=0)
334
+ parser.add_argument("--text_condition_dropout",type=float,default=0)
335
+ parser.add_argument("--all_condition_dropout",type=float,default=0)
336
+
337
+ args = parser.parse_args()
338
+ main(args)
condition/README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Prepare the preprocessing model
2
+
3
+ Hed: https://huggingface.co/lllyasviel/Annotators/blob/main/ControlNetHED.pth\
4
+ Lineart: https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings/resolve/main/model.pth\
5
+ depth: https://huggingface.co/lllyasviel/Annotators/blob/main/dpt_hybrid-midas-501f0c75.pt (hybrid for inference)\
6
+ https://huggingface.co/Intel/dpt-large (large for test conditional consistency and fid)\
7
+
8
+ We recommend storing them in the following paths
9
+
10
+ |---condition
11
+ |---ckpts
12
+ |---dpt_large
13
+ |---config.json
14
+ |---preprocessor_config.json
15
+ |---pytorch_model.bin
16
+ |---ControlNetHED.pth
17
+ |---dpt_hybrid-midas-501f0c75.pt
18
+ |---model.pth
19
+ |---example
20
+ |---midas
21
+ .
22
+ .
23
+ .
condition/__pycache__/canny.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
condition/__pycache__/hed.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
condition/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
condition/canny.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import numpy as np
4
+
5
+
6
+ class CannyDetector:
7
+ def __call__(self, img, low_threshold=100, high_threshold=200):
8
+ """
9
+ input: array or tensor (H,W,3)
10
+ output: array (H,W)
11
+ """
12
+ if torch.is_tensor(img):
13
+ img = img.cpu().detach().numpy().astype(np.uint8)
14
+ return cv2.Canny(img, low_threshold, high_threshold)
15
+
16
+
17
+ if __name__ == '__main__':
18
+ apply_canny = CannyDetector()
19
+ img = cv2.imread('condition/dragon_resize.png')
20
+ import numpy as np
21
+ print(img.max())
22
+ detected_map = apply_canny(img, 100, 200)
23
+ print(detected_map.shape, detected_map.max(), detected_map.min())
24
+ cv2.imwrite('condition/example_canny.jpg', detected_map)
25
+ np.save('condition/example_canny.npy', detected_map[None,None])
condition/depth.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from controlnet_aux import LineartDetector
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
6
+ class Depth:
7
+ def __init__(self, device):
8
+ self.model = DPTForDepthEstimation.from_pretrained("condition/ckpts/dpt_large")
9
+
10
+ def __call__(self, input_image):
11
+ """
12
+ input: tensor()
13
+ """
14
+ control_image = self.model(input_image)
15
+ return np.array(control_image)
16
+
17
+ if __name__ == '__main__':
18
+ import matplotlib.pyplot as plt
19
+ from tqdm import tqdm
20
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
21
+ from PIL import Image
22
+
23
+ image = Image.open('condition/example/t2i/depth/depth.png')
24
+ img = cv2.imread('condition/example/t2i/depth/depth.png')
25
+ processor = DPTImageProcessor.from_pretrained("condition/ckpts/dpt_large")
26
+ model = DPTForDepthEstimation.from_pretrained("condition/ckpts/dpt_large")
27
+
28
+ inputs = torch.from_numpy(np.array(img)).permute(2,0,1).unsqueeze(0).float()#
29
+ inputs = 2*(inputs/255 - 0.5)
30
+ inputs = processor(images=image, return_tensors="pt", size=(512,512))
31
+ print(inputs)
32
+ with torch.no_grad():
33
+ outputs = model(**inputs)
34
+ predicted_depth = outputs.predicted_depth
35
+ print(predicted_depth.shape)
36
+ prediction = torch.nn.functional.interpolate(
37
+ predicted_depth.unsqueeze(1),
38
+ size=image.size[::-1],
39
+ mode="bicubic",
40
+ align_corners=False,
41
+ )
42
+
43
+ output = prediction.squeeze().cpu().numpy()
44
+ formatted = (output * 255 / np.max(output)).astype("uint8")
45
+
46
+ depth = Image.fromarray(formatted)
47
+ depth.save('condition/example/t2i/depth/example_depth.jpg')
condition/example/c2i/canny/15000.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d73b63bea2625d40ae6cbf5d7ba2056b3225fc9fc7f46969431f5ac901a00dd
3
+ size 136
condition/example/c2i/canny/15000.png ADDED

Git LFS Details

  • SHA256: 3c603906c2427e5cb27edbfc331ba4472a007037cf7ba42f3dea381eb7428aac
  • Pointer size: 129 Bytes
  • Size of remote file: 9.4 kB
condition/example/c2i/canny/2312.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e0ca5d0dbfe7b1dcbca161952309b6ca147e223c6cd8739a3741f298b0a40e6
3
+ size 136
condition/example/c2i/canny/2312.png ADDED

Git LFS Details

  • SHA256: 55ee80561d5652e386cd6437313026ff437ae3d06b0748455b99c657d5877911
  • Pointer size: 129 Bytes
  • Size of remote file: 8.41 kB
condition/example/c2i/canny/48850.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9d5def4771893335a960027ae82df99811a8c7c0ac275b4a4023c20f52f1ba
3
+ size 136
condition/example/c2i/canny/48850.png ADDED

Git LFS Details

  • SHA256: 178e2c8a3667ff12bcf16cfe357d1b3dfe40c55603e47e50d21d988b1d5069f3
  • Pointer size: 129 Bytes
  • Size of remote file: 2.9 kB
condition/example/c2i/canny/650.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0be2231a3b7f713e607234d06e9dde4a8782ee12126dd72c84e1084e36e4a5ec
3
+ size 136
condition/example/c2i/canny/650.png ADDED

Git LFS Details

  • SHA256: 1ef84beaf0de298ad48ffda74cb42138134326134b9a431c30d561036e6c0b01
  • Pointer size: 129 Bytes
  • Size of remote file: 5.6 kB
condition/example/c2i/depth/101.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79a3ec9e5013300721fe2cc296c3d7016af2098b51ea46190de859dc1de8fb8e
3
+ size 136
condition/example/c2i/depth/101.png ADDED

Git LFS Details

  • SHA256: ac604b0a1578bf8dcbcb45c95156721bc810d9a486599df55e9dfd69b194d119
  • Pointer size: 130 Bytes
  • Size of remote file: 15 kB
condition/example/c2i/depth/10601.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dde42a4540e351057d10ea11940498a1100083e42892e8f7e243a0b72ef8d11
3
+ size 136
condition/example/c2i/depth/10601.png ADDED

Git LFS Details

  • SHA256: fa05bef55c97445c13ca5853b51195657a41754a63e3330dab2267bdf5e99906
  • Pointer size: 130 Bytes
  • Size of remote file: 15.3 kB
condition/example/c2i/depth/48901.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cb9b4e92e134821310ed5521e61279624930444f169c85cfb9d0b5f555436f7
3
+ size 136
condition/example/t2i/.DS_Store ADDED
Binary file (6.15 kB). View file
 
condition/example/t2i/cocostuff/doll.png ADDED

Git LFS Details

  • SHA256: af6dc57fe8bfd7b6079b0ca0613161a2b9f30afdbe55e818883e76420e5e7e56
  • Pointer size: 130 Bytes
  • Size of remote file: 18 kB
condition/hed.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is an improved version and model of HED edge detection with Apache License, Version 2.0.
2
+ # Please use this implementation in your products
3
+ # This implementation may produce slightly different results from Saining Xie's official implementations,
4
+ # but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
5
+ # Different from official models and other implementations, this is an RGB-input model (rather than BGR)
6
+ # and in this way it works better for gradio's RGB protocol
7
+
8
+ import os
9
+ import cv2
10
+ import torch
11
+ import numpy as np
12
+ from torch.nn.parallel import DataParallel
13
+ from einops import rearrange
14
+ from condition.utils import annotator_ckpts_path
15
+ import torch.nn.functional as F
16
+
17
+ class DoubleConvBlock(torch.nn.Module):
18
+ def __init__(self, input_channel, output_channel, layer_number):
19
+ super().__init__()
20
+ self.convs = torch.nn.Sequential()
21
+ self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
22
+ for i in range(1, layer_number):
23
+ self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
24
+ self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
25
+
26
+ def __call__(self, x, down_sampling=False):
27
+ h = x
28
+ if down_sampling:
29
+ h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
30
+ for conv in self.convs:
31
+ h = conv(h)
32
+ h = torch.nn.functional.relu(h)
33
+ return h, self.projection(h)
34
+
35
+
36
+ class ControlNetHED_Apache2(torch.nn.Module):
37
+ def __init__(self):
38
+ super().__init__()
39
+ self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
40
+ self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
41
+ self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
42
+ self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
43
+ self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
44
+ self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
45
+
46
+ def __call__(self, x):
47
+ h = x - self.norm
48
+ h, projection1 = self.block1(h)
49
+ h, projection2 = self.block2(h, down_sampling=True)
50
+ h, projection3 = self.block3(h, down_sampling=True)
51
+ h, projection4 = self.block4(h, down_sampling=True)
52
+ h, projection5 = self.block5(h, down_sampling=True)
53
+ return projection1, projection2, projection3, projection4, projection5
54
+
55
+
56
+ class HEDdetector(torch.nn.Module):
57
+ def __init__(self):
58
+ super().__init__()
59
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
60
+ modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth")
61
+ if not os.path.exists(modelpath):
62
+ from basicsr.utils.download_util import load_file_from_url
63
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
64
+ self.netNetwork = ControlNetHED_Apache2().float()#.to(self.device).eval()
65
+ self.netNetwork.load_state_dict(torch.load(modelpath))
66
+
67
+ def __call__(self, input_image):
68
+ """
69
+ input: tensor (B,C,H,W)
70
+ output: tensor (B,H,W)
71
+ """
72
+ B, C, H, W = input_image.shape
73
+ image_hed = input_image
74
+
75
+ edges = self.netNetwork(image_hed)
76
+ edges = [F.interpolate(e, size=(H, W), mode='bilinear', align_corners=False).squeeze(1) for e in edges]
77
+ edges = torch.stack(edges, dim=1)
78
+ edge = 1 / (1 + torch.exp(-torch.mean(edges, dim=1)))
79
+ edge = (edge * 255.0).clamp(0, 255)
80
+
81
+ return edge
82
+
83
+
84
+ def nms(x, t, s):
85
+ x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
86
+
87
+ f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
88
+ f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
89
+ f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
90
+ f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
91
+
92
+ y = np.zeros_like(x)
93
+
94
+ for f in [f1, f2, f3, f4]:
95
+ np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
96
+
97
+ z = np.zeros_like(y, dtype=np.uint8)
98
+ z[y > t] = 255
99
+ return z
100
+
101
+ if __name__ == '__main__':
102
+ import matplotlib.pyplot as plt
103
+ from tqdm import tqdm
104
+ import torch.nn.functional as F
105
+ device = torch.device('cuda')
106
+ apply_hed = HEDdetector().to(device).eval()
107
+ img = cv2.imread('condition/dragon_1024_512.jpg')
108
+ H,W = img.shape[:2]
109
+ resize_img = cv2.resize(img,(512,1024))
110
+ detected_map = apply_hed(torch.from_numpy(img).permute(2,0,1).unsqueeze(0).cuda())
111
+ resize_detected_map = apply_hed(torch.from_numpy(resize_img).permute(2,0,1).unsqueeze(0).cuda())
112
+ cv2.imwrite('condition/example_hed_resize.jpg', resize_detected_map[0].cpu().detach().numpy())
113
+ resize_detected_map = F.interpolate(resize_detected_map.unsqueeze(0).to(torch.float32), size=(H,W), mode='bilinear', align_corners=False, antialias=True)
114
+ print(abs(detected_map - resize_detected_map).sum())
115
+ print(img.shape, img.max(),img.min(),detected_map.shape, detected_map.max(),detected_map.min())
116
+ cv2.imwrite('condition/example_hed.jpg', detected_map[0].cpu().detach().numpy())
117
+ cv2.imwrite('condition/example_hed_resized.jpg', resize_detected_map[0,0].cpu().detach().numpy())
condition/lineart.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from controlnet_aux import LineartDetector
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ import torch.nn as nn
6
+
7
+
8
+ norm_layer = nn.InstanceNorm2d
9
+ class ResidualBlock(nn.Module):
10
+ def __init__(self, in_features):
11
+ super(ResidualBlock, self).__init__()
12
+
13
+ conv_block = [ nn.ReflectionPad2d(1),
14
+ nn.Conv2d(in_features, in_features, 3),
15
+ norm_layer(in_features),
16
+ nn.ReLU(inplace=True),
17
+ nn.ReflectionPad2d(1),
18
+ nn.Conv2d(in_features, in_features, 3),
19
+ norm_layer(in_features)
20
+ ]
21
+
22
+ self.conv_block = nn.Sequential(*conv_block)
23
+
24
+ def forward(self, x):
25
+ return x + self.conv_block(x)
26
+ class LineArt(nn.Module):
27
+ def __init__(self, input_nc=3, output_nc=1, n_residual_blocks=3, sigmoid=True):
28
+ super(LineArt, self).__init__()
29
+
30
+ # Initial convolution block
31
+ model0 = [ nn.ReflectionPad2d(3),
32
+ nn.Conv2d(input_nc, 64, 7),
33
+ norm_layer(64),
34
+ nn.ReLU(inplace=True) ]
35
+ self.model0 = nn.Sequential(*model0)
36
+
37
+ # Downsampling
38
+ model1 = []
39
+ in_features = 64
40
+ out_features = in_features*2
41
+ for _ in range(2):
42
+ model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
43
+ norm_layer(out_features),
44
+ nn.ReLU(inplace=True) ]
45
+ in_features = out_features
46
+ out_features = in_features*2
47
+ self.model1 = nn.Sequential(*model1)
48
+
49
+ model2 = []
50
+ # Residual blocks
51
+ for _ in range(n_residual_blocks):
52
+ model2 += [ResidualBlock(in_features)]
53
+ self.model2 = nn.Sequential(*model2)
54
+
55
+ # Upsampling
56
+ model3 = []
57
+ out_features = in_features//2
58
+ for _ in range(2):
59
+ model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
60
+ norm_layer(out_features),
61
+ nn.ReLU(inplace=True) ]
62
+ in_features = out_features
63
+ out_features = in_features//2
64
+ self.model3 = nn.Sequential(*model3)
65
+
66
+ # Output layer
67
+ model4 = [ nn.ReflectionPad2d(3),
68
+ nn.Conv2d(64, output_nc, 7)]
69
+ if sigmoid:
70
+ model4 += [nn.Sigmoid()]
71
+
72
+ self.model4 = nn.Sequential(*model4)
73
+
74
+ def forward(self, x, cond=None):
75
+ """
76
+ input: tensor (B,C,H,W)
77
+ output: tensor (B,1,H,W) 0~1
78
+ """
79
+
80
+ out = self.model0(x)
81
+ out = self.model1(out)
82
+ out = self.model2(out)
83
+ out = self.model3(out)
84
+ out = self.model4(out)
85
+
86
+ return out
87
+
88
+
89
+ if __name__ == '__main__':
90
+ import matplotlib.pyplot as plt
91
+ from tqdm import tqdm
92
+ apply_lineart = LineArt()
93
+ apply_lineart.load_state_dict(torch.load('condition/ckpts/model.pth', map_location=torch.device('cpu')))
94
+ img = cv2.imread('condition/car_448_768.jpg')
95
+ img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0).repeat(8,1,1,1).float()
96
+ detected_map = apply_lineart(img)
97
+ print(img.shape, img.max(),img.min(),detected_map.shape, detected_map.max(),detected_map.min())
98
+ cv2.imwrite('condition/example_lineart.jpg', 255*detected_map[0,0].cpu().detach().numpy())
condition/midas/depth.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Midas Depth Estimation
2
+ # From https://github.com/isl-org/MiDaS
3
+ # MIT LICENSE
4
+
5
+ import cv2
6
+ import numpy as np
7
+ import torch
8
+ import sys
9
+ sys.path.append('/data/vjuicefs_sz_cv_v2/11171709/ControlAR')
10
+ from einops import rearrange
11
+ # from .api import MiDaSInference
12
+ from condition.utils import annotator_ckpts_path
13
+ from condition.midas.midas.dpt_depth import DPTDepthModel
14
+ from condition.midas.midas.midas_net import MidasNet
15
+ from condition.midas.midas.midas_net_custom import MidasNet_small
16
+ from condition.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
17
+ import os
18
+ import torch.nn as nn
19
+ from torchvision.transforms import Compose
20
+
21
+ ISL_PATHS = {
22
+ "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"),
23
+ "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
24
+ "midas_v21": "",
25
+ "midas_v21_small": "",
26
+ }
27
+
28
+ remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
29
+
30
+
31
+ def disabled_train(self, mode=True):
32
+ """Overwrite model.train with this function to make sure train/eval mode
33
+ does not change anymore."""
34
+ return self
35
+
36
+
37
+ def load_midas_transform(model_type):
38
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
39
+ # load transform only
40
+ if model_type == "dpt_large": # DPT-Large
41
+ net_w, net_h = 384, 384
42
+ resize_mode = "minimal"
43
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
44
+
45
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
46
+ net_w, net_h = 384, 384
47
+ resize_mode = "minimal"
48
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
49
+
50
+ elif model_type == "midas_v21":
51
+ net_w, net_h = 384, 384
52
+ resize_mode = "upper_bound"
53
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
54
+
55
+ elif model_type == "midas_v21_small":
56
+ net_w, net_h = 256, 256
57
+ resize_mode = "upper_bound"
58
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
59
+
60
+ else:
61
+ assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
62
+
63
+ transform = Compose(
64
+ [
65
+ Resize(
66
+ net_w,
67
+ net_h,
68
+ resize_target=None,
69
+ keep_aspect_ratio=True,
70
+ ensure_multiple_of=32,
71
+ resize_method=resize_mode,
72
+ image_interpolation_method=cv2.INTER_CUBIC,
73
+ ),
74
+ normalization,
75
+ PrepareForNet(),
76
+ ]
77
+ )
78
+
79
+ return transform
80
+
81
+
82
+ def load_model(model_type):
83
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
84
+ # load network
85
+ model_path = ISL_PATHS[model_type]
86
+ if model_type == "dpt_large": # DPT-Large
87
+ model = DPTDepthModel(
88
+ path=model_path,
89
+ backbone="vitl16_384",
90
+ non_negative=True,
91
+ )
92
+ net_w, net_h = 384, 384
93
+ resize_mode = "minimal"
94
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
95
+
96
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
97
+ if not os.path.exists(model_path):
98
+ from basicsr.utils.download_util import load_file_from_url
99
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
100
+
101
+ model = DPTDepthModel(
102
+ path=model_path,
103
+ backbone="vitb_rn50_384",
104
+ non_negative=True,
105
+ )
106
+ net_w, net_h = 384, 384
107
+ resize_mode = "minimal"
108
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
109
+
110
+ elif model_type == "midas_v21":
111
+ model = MidasNet(model_path, non_negative=True)
112
+ net_w, net_h = 384, 384
113
+ resize_mode = "upper_bound"
114
+ normalization = NormalizeImage(
115
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
116
+ )
117
+
118
+ elif model_type == "midas_v21_small":
119
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
120
+ non_negative=True, blocks={'expand': True})
121
+ net_w, net_h = 256, 256
122
+ resize_mode = "upper_bound"
123
+ normalization = NormalizeImage(
124
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
125
+ )
126
+
127
+ else:
128
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
129
+ assert False
130
+
131
+ transform = Compose(
132
+ [
133
+ Resize(
134
+ net_w,
135
+ net_h,
136
+ resize_target=None,
137
+ keep_aspect_ratio=True,
138
+ ensure_multiple_of=32,
139
+ resize_method=resize_mode,
140
+ image_interpolation_method=cv2.INTER_CUBIC,
141
+ ),
142
+ normalization,
143
+ PrepareForNet(),
144
+ ]
145
+ )
146
+
147
+ return model.eval(), transform
148
+
149
+
150
+ class MiDaSInference(nn.Module):
151
+ MODEL_TYPES_TORCH_HUB = [
152
+ "DPT_Large",
153
+ "DPT_Hybrid",
154
+ "MiDaS_small"
155
+ ]
156
+ MODEL_TYPES_ISL = [
157
+ "dpt_large",
158
+ "dpt_hybrid",
159
+ "midas_v21",
160
+ "midas_v21_small",
161
+ ]
162
+
163
+ def __init__(self, model_type):
164
+ super().__init__()
165
+ assert (model_type in self.MODEL_TYPES_ISL)
166
+ model, _ = load_model(model_type)
167
+ self.model = model
168
+ self.model.train = disabled_train
169
+
170
+ def forward(self, x):
171
+ with torch.no_grad():
172
+ prediction = self.model(x)
173
+ return prediction
174
+
175
+
176
+ class MidasDetector:
177
+ def __init__(self,device=torch.device('cuda:0'), model_type="dpt_hybrid"):
178
+ self.device = device
179
+ self.model = MiDaSInference(model_type=model_type).to(device)
180
+
181
+ def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1):
182
+ assert input_image.ndim == 3
183
+ image_depth = input_image
184
+ with torch.no_grad():
185
+ image_depth = image_depth
186
+ image_depth = image_depth / 127.5 - 1.0
187
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
188
+ depth = self.model(image_depth)[0]
189
+
190
+ depth_pt = depth.clone()
191
+ depth_pt -= torch.min(depth_pt)
192
+ depth_pt /= torch.max(depth_pt)
193
+ depth_pt = depth_pt.cpu().numpy()
194
+ depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
195
+
196
+ depth_np = depth.cpu().numpy()
197
+ x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
198
+ y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
199
+ z = np.ones_like(x) * a
200
+ x[depth_pt < bg_th] = 0
201
+ y[depth_pt < bg_th] = 0
202
+ # normal = np.stack([x, y, z], axis=2)
203
+ # normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
204
+ # normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
205
+
206
+ return depth_image#, normal_image
207
+
208
+ if __name__ == '__main__':
209
+ import matplotlib.pyplot as plt
210
+ from tqdm import tqdm
211
+ from PIL import Image
212
+ import torchvision.transforms.functional as F
213
+ apply_depth = MidasDetector(device=torch.device('cuda:0'))
214
+ img = cv2.imread('/data/vjuicefs_sz_cv_v2/11171709/ControlAR_github/condition/example/t2i/multi_resolution/car_1_448_768.jpg')
215
+ img = cv2.resize(img,(768,448))
216
+ detected_map = apply_depth(torch.from_numpy(img).cuda().float())
217
+ print(img.shape, img.max(),img.min(),detected_map.shape, detected_map.max(),detected_map.min())
218
+ plt.imshow(detected_map, cmap='gray')
219
+ plt.show()
220
+ cv2.imwrite('condition/example_depth.jpg', detected_map)
221
+ # cv2.imwrite('condition/example_normal.jpg', normal_map)
condition/midas/midas/__init__.py ADDED
File without changes
condition/midas/midas/base_model.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class BaseModel(torch.nn.Module):
5
+ def load(self, path):
6
+ """Load model from file.
7
+
8
+ Args:
9
+ path (str): file path
10
+ """
11
+ parameters = torch.load(path, map_location=torch.device('cpu'))
12
+
13
+ if "optimizer" in parameters:
14
+ parameters = parameters["model"]
15
+
16
+ self.load_state_dict(parameters)
condition/midas/midas/blocks.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .vit import (
5
+ _make_pretrained_vitb_rn50_384,
6
+ _make_pretrained_vitl16_384,
7
+ _make_pretrained_vitb16_384,
8
+ forward_vit,
9
+ )
10
+
11
+ def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
+ if backbone == "vitl16_384":
13
+ pretrained = _make_pretrained_vitl16_384(
14
+ use_pretrained, hooks=hooks, use_readout=use_readout
15
+ )
16
+ scratch = _make_scratch(
17
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
19
+ elif backbone == "vitb_rn50_384":
20
+ pretrained = _make_pretrained_vitb_rn50_384(
21
+ use_pretrained,
22
+ hooks=hooks,
23
+ use_vit_only=use_vit_only,
24
+ use_readout=use_readout,
25
+ )
26
+ scratch = _make_scratch(
27
+ [256, 512, 768, 768], features, groups=groups, expand=expand
28
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
29
+ elif backbone == "vitb16_384":
30
+ pretrained = _make_pretrained_vitb16_384(
31
+ use_pretrained, hooks=hooks, use_readout=use_readout
32
+ )
33
+ scratch = _make_scratch(
34
+ [96, 192, 384, 768], features, groups=groups, expand=expand
35
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
36
+ elif backbone == "resnext101_wsl":
37
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
+ elif backbone == "efficientnet_lite3":
40
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
+ else:
43
+ print(f"Backbone '{backbone}' not implemented")
44
+ assert False
45
+
46
+ return pretrained, scratch
47
+
48
+
49
+ def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
+ scratch = nn.Module()
51
+
52
+ out_shape1 = out_shape
53
+ out_shape2 = out_shape
54
+ out_shape3 = out_shape
55
+ out_shape4 = out_shape
56
+ if expand==True:
57
+ out_shape1 = out_shape
58
+ out_shape2 = out_shape*2
59
+ out_shape3 = out_shape*4
60
+ out_shape4 = out_shape*8
61
+
62
+ scratch.layer1_rn = nn.Conv2d(
63
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
+ )
65
+ scratch.layer2_rn = nn.Conv2d(
66
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
+ )
68
+ scratch.layer3_rn = nn.Conv2d(
69
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
+ )
71
+ scratch.layer4_rn = nn.Conv2d(
72
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
+ )
74
+
75
+ return scratch
76
+
77
+
78
+ def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
+ efficientnet = torch.hub.load(
80
+ "rwightman/gen-efficientnet-pytorch",
81
+ "tf_efficientnet_lite3",
82
+ pretrained=use_pretrained,
83
+ exportable=exportable
84
+ )
85
+ return _make_efficientnet_backbone(efficientnet)
86
+
87
+
88
+ def _make_efficientnet_backbone(effnet):
89
+ pretrained = nn.Module()
90
+
91
+ pretrained.layer1 = nn.Sequential(
92
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
+ )
94
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
+
98
+ return pretrained
99
+
100
+
101
+ def _make_resnet_backbone(resnet):
102
+ pretrained = nn.Module()
103
+ pretrained.layer1 = nn.Sequential(
104
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
+ )
106
+
107
+ pretrained.layer2 = resnet.layer2
108
+ pretrained.layer3 = resnet.layer3
109
+ pretrained.layer4 = resnet.layer4
110
+
111
+ return pretrained
112
+
113
+
114
+ def _make_pretrained_resnext101_wsl(use_pretrained):
115
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
+ return _make_resnet_backbone(resnet)
117
+
118
+
119
+
120
+ class Interpolate(nn.Module):
121
+ """Interpolation module.
122
+ """
123
+
124
+ def __init__(self, scale_factor, mode, align_corners=False):
125
+ """Init.
126
+
127
+ Args:
128
+ scale_factor (float): scaling
129
+ mode (str): interpolation mode
130
+ """
131
+ super(Interpolate, self).__init__()
132
+
133
+ self.interp = nn.functional.interpolate
134
+ self.scale_factor = scale_factor
135
+ self.mode = mode
136
+ self.align_corners = align_corners
137
+
138
+ def forward(self, x):
139
+ """Forward pass.
140
+
141
+ Args:
142
+ x (tensor): input
143
+
144
+ Returns:
145
+ tensor: interpolated data
146
+ """
147
+
148
+ x = self.interp(
149
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
+ )
151
+
152
+ return x
153
+
154
+
155
+ class ResidualConvUnit(nn.Module):
156
+ """Residual convolution module.
157
+ """
158
+
159
+ def __init__(self, features):
160
+ """Init.
161
+
162
+ Args:
163
+ features (int): number of features
164
+ """
165
+ super().__init__()
166
+
167
+ self.conv1 = nn.Conv2d(
168
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
169
+ )
170
+
171
+ self.conv2 = nn.Conv2d(
172
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
173
+ )
174
+
175
+ self.relu = nn.ReLU(inplace=True)
176
+
177
+ def forward(self, x):
178
+ """Forward pass.
179
+
180
+ Args:
181
+ x (tensor): input
182
+
183
+ Returns:
184
+ tensor: output
185
+ """
186
+ out = self.relu(x)
187
+ out = self.conv1(out)
188
+ out = self.relu(out)
189
+ out = self.conv2(out)
190
+
191
+ return out + x
192
+
193
+
194
+ class FeatureFusionBlock(nn.Module):
195
+ """Feature fusion block.
196
+ """
197
+
198
+ def __init__(self, features):
199
+ """Init.
200
+
201
+ Args:
202
+ features (int): number of features
203
+ """
204
+ super(FeatureFusionBlock, self).__init__()
205
+
206
+ self.resConfUnit1 = ResidualConvUnit(features)
207
+ self.resConfUnit2 = ResidualConvUnit(features)
208
+
209
+ def forward(self, *xs):
210
+ """Forward pass.
211
+
212
+ Returns:
213
+ tensor: output
214
+ """
215
+ output = xs[0]
216
+
217
+ if len(xs) == 2:
218
+ output += self.resConfUnit1(xs[1])
219
+
220
+ output = self.resConfUnit2(output)
221
+
222
+ output = nn.functional.interpolate(
223
+ output, scale_factor=2, mode="bilinear", align_corners=True
224
+ )
225
+
226
+ return output
227
+
228
+
229
+
230
+
231
+ class ResidualConvUnit_custom(nn.Module):
232
+ """Residual convolution module.
233
+ """
234
+
235
+ def __init__(self, features, activation, bn):
236
+ """Init.
237
+
238
+ Args:
239
+ features (int): number of features
240
+ """
241
+ super().__init__()
242
+
243
+ self.bn = bn
244
+
245
+ self.groups=1
246
+
247
+ self.conv1 = nn.Conv2d(
248
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
+ )
250
+
251
+ self.conv2 = nn.Conv2d(
252
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
+ )
254
+
255
+ if self.bn==True:
256
+ self.bn1 = nn.BatchNorm2d(features)
257
+ self.bn2 = nn.BatchNorm2d(features)
258
+
259
+ self.activation = activation
260
+
261
+ self.skip_add = nn.quantized.FloatFunctional()
262
+
263
+ def forward(self, x):
264
+ """Forward pass.
265
+
266
+ Args:
267
+ x (tensor): input
268
+
269
+ Returns:
270
+ tensor: output
271
+ """
272
+
273
+ out = self.activation(x)
274
+ out = self.conv1(out)
275
+ if self.bn==True:
276
+ out = self.bn1(out)
277
+
278
+ out = self.activation(out)
279
+ out = self.conv2(out)
280
+ if self.bn==True:
281
+ out = self.bn2(out)
282
+
283
+ if self.groups > 1:
284
+ out = self.conv_merge(out)
285
+
286
+ return self.skip_add.add(out, x)
287
+
288
+ # return out + x
289
+
290
+
291
+ class FeatureFusionBlock_custom(nn.Module):
292
+ """Feature fusion block.
293
+ """
294
+
295
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
+ """Init.
297
+
298
+ Args:
299
+ features (int): number of features
300
+ """
301
+ super(FeatureFusionBlock_custom, self).__init__()
302
+
303
+ self.deconv = deconv
304
+ self.align_corners = align_corners
305
+
306
+ self.groups=1
307
+
308
+ self.expand = expand
309
+ out_features = features
310
+ if self.expand==True:
311
+ out_features = features//2
312
+
313
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
+
315
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
+
318
+ self.skip_add = nn.quantized.FloatFunctional()
319
+
320
+ def forward(self, *xs):
321
+ """Forward pass.
322
+
323
+ Returns:
324
+ tensor: output
325
+ """
326
+ output = xs[0]
327
+
328
+ if len(xs) == 2:
329
+ res = self.resConfUnit1(xs[1])
330
+ output = self.skip_add.add(output, res)
331
+ # output += res
332
+
333
+ output = self.resConfUnit2(output)
334
+
335
+ output = nn.functional.interpolate(
336
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
+ )
338
+
339
+ output = self.out_conv(output)
340
+
341
+ return output
condition/midas/midas/dpt_depth.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from .base_model import BaseModel
6
+ from .blocks import (
7
+ FeatureFusionBlock,
8
+ FeatureFusionBlock_custom,
9
+ Interpolate,
10
+ _make_encoder,
11
+ forward_vit,
12
+ )
13
+
14
+
15
+ def _make_fusion_block(features, use_bn):
16
+ return FeatureFusionBlock_custom(
17
+ features,
18
+ nn.ReLU(False),
19
+ deconv=False,
20
+ bn=use_bn,
21
+ expand=False,
22
+ align_corners=True,
23
+ )
24
+
25
+
26
+ class DPT(BaseModel):
27
+ def __init__(
28
+ self,
29
+ head,
30
+ features=256,
31
+ backbone="vitb_rn50_384",
32
+ readout="project",
33
+ channels_last=False,
34
+ use_bn=False,
35
+ ):
36
+
37
+ super(DPT, self).__init__()
38
+
39
+ self.channels_last = channels_last
40
+
41
+ hooks = {
42
+ "vitb_rn50_384": [0, 1, 8, 11],
43
+ "vitb16_384": [2, 5, 8, 11],
44
+ "vitl16_384": [5, 11, 17, 23],
45
+ }
46
+
47
+ # Instantiate backbone and reassemble blocks
48
+ self.pretrained, self.scratch = _make_encoder(
49
+ backbone,
50
+ features,
51
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
52
+ groups=1,
53
+ expand=False,
54
+ exportable=False,
55
+ hooks=hooks[backbone],
56
+ use_readout=readout,
57
+ )
58
+
59
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
+
64
+ self.scratch.output_conv = head
65
+
66
+
67
+ def forward(self, x):
68
+ if self.channels_last == True:
69
+ x.contiguous(memory_format=torch.channels_last)
70
+
71
+ layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
+
73
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
74
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
75
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
76
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
77
+
78
+ path_4 = self.scratch.refinenet4(layer_4_rn)
79
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
+
83
+ out = self.scratch.output_conv(path_1)
84
+
85
+ return out
86
+
87
+
88
+ class DPTDepthModel(DPT):
89
+ def __init__(self, path=None, non_negative=True, **kwargs):
90
+ features = kwargs["features"] if "features" in kwargs else 256
91
+
92
+ head = nn.Sequential(
93
+ nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
+ nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
+ nn.ReLU(True),
97
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
+ nn.ReLU(True) if non_negative else nn.Identity(),
99
+ nn.Identity(),
100
+ )
101
+
102
+ super().__init__(head, **kwargs)
103
+
104
+ if path is not None:
105
+ self.load(path)
106
+
107
+ def forward(self, x):
108
+ return super().forward(x).squeeze(dim=1)
condition/midas/midas/midas_net.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=256, non_negative=True):
17
+ """Init.
18
+
19
+ Args:
20
+ path (str, optional): Path to saved model. Defaults to None.
21
+ features (int, optional): Number of features. Defaults to 256.
22
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
+ """
24
+ print("Loading weights: ", path)
25
+
26
+ super(MidasNet, self).__init__()
27
+
28
+ use_pretrained = False if path is None else True
29
+
30
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
+
32
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
33
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
34
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
35
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
36
+
37
+ self.scratch.output_conv = nn.Sequential(
38
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
+ Interpolate(scale_factor=2, mode="bilinear"),
40
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
+ nn.ReLU(True),
42
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
+ nn.ReLU(True) if non_negative else nn.Identity(),
44
+ )
45
+
46
+ if path:
47
+ self.load(path)
48
+
49
+ def forward(self, x):
50
+ """Forward pass.
51
+
52
+ Args:
53
+ x (tensor): input data (image)
54
+
55
+ Returns:
56
+ tensor: depth
57
+ """
58
+
59
+ layer_1 = self.pretrained.layer1(x)
60
+ layer_2 = self.pretrained.layer2(layer_1)
61
+ layer_3 = self.pretrained.layer3(layer_2)
62
+ layer_4 = self.pretrained.layer4(layer_3)
63
+
64
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
65
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
66
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
67
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
68
+
69
+ path_4 = self.scratch.refinenet4(layer_4_rn)
70
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
+
74
+ out = self.scratch.output_conv(path_1)
75
+
76
+ return torch.squeeze(out, dim=1)
condition/midas/midas/midas_net_custom.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet_small(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
+ blocks={'expand': True}):
18
+ """Init.
19
+
20
+ Args:
21
+ path (str, optional): Path to saved model. Defaults to None.
22
+ features (int, optional): Number of features. Defaults to 256.
23
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
+ """
25
+ print("Loading weights: ", path)
26
+
27
+ super(MidasNet_small, self).__init__()
28
+
29
+ use_pretrained = False if path else True
30
+
31
+ self.channels_last = channels_last
32
+ self.blocks = blocks
33
+ self.backbone = backbone
34
+
35
+ self.groups = 1
36
+
37
+ features1=features
38
+ features2=features
39
+ features3=features
40
+ features4=features
41
+ self.expand = False
42
+ if "expand" in self.blocks and self.blocks['expand'] == True:
43
+ self.expand = True
44
+ features1=features
45
+ features2=features*2
46
+ features3=features*4
47
+ features4=features*8
48
+
49
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
+
51
+ self.scratch.activation = nn.ReLU(False)
52
+
53
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
+
58
+
59
+ self.scratch.output_conv = nn.Sequential(
60
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
+ Interpolate(scale_factor=2, mode="bilinear"),
62
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
+ self.scratch.activation,
64
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
+ nn.ReLU(True) if non_negative else nn.Identity(),
66
+ nn.Identity(),
67
+ )
68
+
69
+ if path:
70
+ self.load(path)
71
+
72
+
73
+ def forward(self, x):
74
+ """Forward pass.
75
+
76
+ Args:
77
+ x (tensor): input data (image)
78
+
79
+ Returns:
80
+ tensor: depth
81
+ """
82
+ if self.channels_last==True:
83
+ print("self.channels_last = ", self.channels_last)
84
+ x.contiguous(memory_format=torch.channels_last)
85
+
86
+
87
+ layer_1 = self.pretrained.layer1(x)
88
+ layer_2 = self.pretrained.layer2(layer_1)
89
+ layer_3 = self.pretrained.layer3(layer_2)
90
+ layer_4 = self.pretrained.layer4(layer_3)
91
+
92
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
93
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
94
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
95
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
96
+
97
+
98
+ path_4 = self.scratch.refinenet4(layer_4_rn)
99
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
+
103
+ out = self.scratch.output_conv(path_1)
104
+
105
+ return torch.squeeze(out, dim=1)
106
+
107
+
108
+
109
+ def fuse_model(m):
110
+ prev_previous_type = nn.Identity()
111
+ prev_previous_name = ''
112
+ previous_type = nn.Identity()
113
+ previous_name = ''
114
+ for name, module in m.named_modules():
115
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
+ # print("FUSED ", prev_previous_name, previous_name, name)
117
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
+ # print("FUSED ", prev_previous_name, previous_name)
120
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
+ # print("FUSED ", previous_name, name)
123
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
+
125
+ prev_previous_type = previous_type
126
+ prev_previous_name = previous_name
127
+ previous_type = type(module)
128
+ previous_name = name
condition/midas/midas/transforms.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import math
4
+
5
+
6
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
7
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
8
+
9
+ Args:
10
+ sample (dict): sample
11
+ size (tuple): image size
12
+
13
+ Returns:
14
+ tuple: new size
15
+ """
16
+ shape = list(sample["disparity"].shape)
17
+
18
+ if shape[0] >= size[0] and shape[1] >= size[1]:
19
+ return sample
20
+
21
+ scale = [0, 0]
22
+ scale[0] = size[0] / shape[0]
23
+ scale[1] = size[1] / shape[1]
24
+
25
+ scale = max(scale)
26
+
27
+ shape[0] = math.ceil(scale * shape[0])
28
+ shape[1] = math.ceil(scale * shape[1])
29
+
30
+ # resize
31
+ sample["image"] = cv2.resize(
32
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
33
+ )
34
+
35
+ sample["disparity"] = cv2.resize(
36
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
37
+ )
38
+ sample["mask"] = cv2.resize(
39
+ sample["mask"].astype(np.float32),
40
+ tuple(shape[::-1]),
41
+ interpolation=cv2.INTER_NEAREST,
42
+ )
43
+ sample["mask"] = sample["mask"].astype(bool)
44
+
45
+ return tuple(shape)
46
+
47
+
48
+ class Resize(object):
49
+ """Resize sample to given size (width, height).
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ width,
55
+ height,
56
+ resize_target=True,
57
+ keep_aspect_ratio=False,
58
+ ensure_multiple_of=1,
59
+ resize_method="lower_bound",
60
+ image_interpolation_method=cv2.INTER_AREA,
61
+ ):
62
+ """Init.
63
+
64
+ Args:
65
+ width (int): desired output width
66
+ height (int): desired output height
67
+ resize_target (bool, optional):
68
+ True: Resize the full sample (image, mask, target).
69
+ False: Resize image only.
70
+ Defaults to True.
71
+ keep_aspect_ratio (bool, optional):
72
+ True: Keep the aspect ratio of the input sample.
73
+ Output sample might not have the given width and height, and
74
+ resize behaviour depends on the parameter 'resize_method'.
75
+ Defaults to False.
76
+ ensure_multiple_of (int, optional):
77
+ Output width and height is constrained to be multiple of this parameter.
78
+ Defaults to 1.
79
+ resize_method (str, optional):
80
+ "lower_bound": Output will be at least as large as the given size.
81
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
82
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
83
+ Defaults to "lower_bound".
84
+ """
85
+ self.__width = width
86
+ self.__height = height
87
+
88
+ self.__resize_target = resize_target
89
+ self.__keep_aspect_ratio = keep_aspect_ratio
90
+ self.__multiple_of = ensure_multiple_of
91
+ self.__resize_method = resize_method
92
+ self.__image_interpolation_method = image_interpolation_method
93
+
94
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
95
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
96
+
97
+ if max_val is not None and y > max_val:
98
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
99
+
100
+ if y < min_val:
101
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
102
+
103
+ return y
104
+
105
+ def get_size(self, width, height):
106
+ # determine new height and width
107
+ scale_height = self.__height / height
108
+ scale_width = self.__width / width
109
+
110
+ if self.__keep_aspect_ratio:
111
+ if self.__resize_method == "lower_bound":
112
+ # scale such that output size is lower bound
113
+ if scale_width > scale_height:
114
+ # fit width
115
+ scale_height = scale_width
116
+ else:
117
+ # fit height
118
+ scale_width = scale_height
119
+ elif self.__resize_method == "upper_bound":
120
+ # scale such that output size is upper bound
121
+ if scale_width < scale_height:
122
+ # fit width
123
+ scale_height = scale_width
124
+ else:
125
+ # fit height
126
+ scale_width = scale_height
127
+ elif self.__resize_method == "minimal":
128
+ # scale as least as possbile
129
+ if abs(1 - scale_width) < abs(1 - scale_height):
130
+ # fit width
131
+ scale_height = scale_width
132
+ else:
133
+ # fit height
134
+ scale_width = scale_height
135
+ else:
136
+ raise ValueError(
137
+ f"resize_method {self.__resize_method} not implemented"
138
+ )
139
+
140
+ if self.__resize_method == "lower_bound":
141
+ new_height = self.constrain_to_multiple_of(
142
+ scale_height * height, min_val=self.__height
143
+ )
144
+ new_width = self.constrain_to_multiple_of(
145
+ scale_width * width, min_val=self.__width
146
+ )
147
+ elif self.__resize_method == "upper_bound":
148
+ new_height = self.constrain_to_multiple_of(
149
+ scale_height * height, max_val=self.__height
150
+ )
151
+ new_width = self.constrain_to_multiple_of(
152
+ scale_width * width, max_val=self.__width
153
+ )
154
+ elif self.__resize_method == "minimal":
155
+ new_height = self.constrain_to_multiple_of(scale_height * height)
156
+ new_width = self.constrain_to_multiple_of(scale_width * width)
157
+ else:
158
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
159
+
160
+ return (new_width, new_height)
161
+
162
+ def __call__(self, sample):
163
+ width, height = self.get_size(
164
+ sample["image"].shape[1], sample["image"].shape[0]
165
+ )
166
+
167
+ # resize sample
168
+ sample["image"] = cv2.resize(
169
+ sample["image"],
170
+ (width, height),
171
+ interpolation=self.__image_interpolation_method,
172
+ )
173
+
174
+ if self.__resize_target:
175
+ if "disparity" in sample:
176
+ sample["disparity"] = cv2.resize(
177
+ sample["disparity"],
178
+ (width, height),
179
+ interpolation=cv2.INTER_NEAREST,
180
+ )
181
+
182
+ if "depth" in sample:
183
+ sample["depth"] = cv2.resize(
184
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
185
+ )
186
+
187
+ sample["mask"] = cv2.resize(
188
+ sample["mask"].astype(np.float32),
189
+ (width, height),
190
+ interpolation=cv2.INTER_NEAREST,
191
+ )
192
+ sample["mask"] = sample["mask"].astype(bool)
193
+
194
+ return sample
195
+
196
+
197
+ class NormalizeImage(object):
198
+ """Normlize image by given mean and std.
199
+ """
200
+
201
+ def __init__(self, mean, std):
202
+ self.__mean = mean
203
+ self.__std = std
204
+
205
+ def __call__(self, sample):
206
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
207
+
208
+ return sample
209
+
210
+
211
+ class PrepareForNet(object):
212
+ """Prepare sample for usage as network input.
213
+ """
214
+
215
+ def __init__(self):
216
+ pass
217
+
218
+ def __call__(self, sample):
219
+ image = np.transpose(sample["image"], (2, 0, 1))
220
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
221
+
222
+ if "mask" in sample:
223
+ sample["mask"] = sample["mask"].astype(np.float32)
224
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
225
+
226
+ if "disparity" in sample:
227
+ disparity = sample["disparity"].astype(np.float32)
228
+ sample["disparity"] = np.ascontiguousarray(disparity)
229
+
230
+ if "depth" in sample:
231
+ depth = sample["depth"].astype(np.float32)
232
+ sample["depth"] = np.ascontiguousarray(depth)
233
+
234
+ return sample
condition/midas/midas/vit.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import timm
4
+ import types
5
+ import math
6
+ import torch.nn.functional as F
7
+
8
+
9
+ class Slice(nn.Module):
10
+ def __init__(self, start_index=1):
11
+ super(Slice, self).__init__()
12
+ self.start_index = start_index
13
+
14
+ def forward(self, x):
15
+ return x[:, self.start_index :]
16
+
17
+
18
+ class AddReadout(nn.Module):
19
+ def __init__(self, start_index=1):
20
+ super(AddReadout, self).__init__()
21
+ self.start_index = start_index
22
+
23
+ def forward(self, x):
24
+ if self.start_index == 2:
25
+ readout = (x[:, 0] + x[:, 1]) / 2
26
+ else:
27
+ readout = x[:, 0]
28
+ return x[:, self.start_index :] + readout.unsqueeze(1)
29
+
30
+
31
+ class ProjectReadout(nn.Module):
32
+ def __init__(self, in_features, start_index=1):
33
+ super(ProjectReadout, self).__init__()
34
+ self.start_index = start_index
35
+
36
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
+
38
+ def forward(self, x):
39
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
+ features = torch.cat((x[:, self.start_index :], readout), -1)
41
+
42
+ return self.project(features)
43
+
44
+
45
+ class Transpose(nn.Module):
46
+ def __init__(self, dim0, dim1):
47
+ super(Transpose, self).__init__()
48
+ self.dim0 = dim0
49
+ self.dim1 = dim1
50
+
51
+ def forward(self, x):
52
+ x = x.transpose(self.dim0, self.dim1)
53
+ return x
54
+
55
+
56
+ def forward_vit(pretrained, x):
57
+ b, c, h, w = x.shape
58
+
59
+ glob = pretrained.model.forward_flex(x)
60
+
61
+ layer_1 = pretrained.activations["1"]
62
+ layer_2 = pretrained.activations["2"]
63
+ layer_3 = pretrained.activations["3"]
64
+ layer_4 = pretrained.activations["4"]
65
+
66
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
+
71
+ unflatten = nn.Sequential(
72
+ nn.Unflatten(
73
+ 2,
74
+ torch.Size(
75
+ [
76
+ h // pretrained.model.patch_size[1],
77
+ w // pretrained.model.patch_size[0],
78
+ ]
79
+ ),
80
+ )
81
+ )
82
+
83
+ if layer_1.ndim == 3:
84
+ layer_1 = unflatten(layer_1)
85
+ if layer_2.ndim == 3:
86
+ layer_2 = unflatten(layer_2)
87
+ if layer_3.ndim == 3:
88
+ layer_3 = unflatten(layer_3)
89
+ if layer_4.ndim == 3:
90
+ layer_4 = unflatten(layer_4)
91
+
92
+ layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
+ layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
+ layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
+ layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
+
97
+ return layer_1, layer_2, layer_3, layer_4
98
+
99
+
100
+ def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
+ posemb_tok, posemb_grid = (
102
+ posemb[:, : self.start_index],
103
+ posemb[0, self.start_index :],
104
+ )
105
+
106
+ gs_old = int(math.sqrt(len(posemb_grid)))
107
+
108
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
+
112
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
+
114
+ return posemb
115
+
116
+
117
+ def forward_flex(self, x):
118
+ b, c, h, w = x.shape
119
+
120
+ pos_embed = self._resize_pos_embed(
121
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
+ )
123
+
124
+ B = x.shape[0]
125
+
126
+ if hasattr(self.patch_embed, "backbone"):
127
+ x = self.patch_embed.backbone(x)
128
+ if isinstance(x, (list, tuple)):
129
+ x = x[-1] # last feature if backbone outputs list/tuple of features
130
+
131
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
+
133
+ if getattr(self, "dist_token", None) is not None:
134
+ cls_tokens = self.cls_token.expand(
135
+ B, -1, -1
136
+ ) # stole cls_tokens impl from Phil Wang, thanks
137
+ dist_token = self.dist_token.expand(B, -1, -1)
138
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
+ else:
140
+ cls_tokens = self.cls_token.expand(
141
+ B, -1, -1
142
+ ) # stole cls_tokens impl from Phil Wang, thanks
143
+ x = torch.cat((cls_tokens, x), dim=1)
144
+
145
+ x = x + pos_embed
146
+ x = self.pos_drop(x)
147
+
148
+ for blk in self.blocks:
149
+ x = blk(x)
150
+
151
+ x = self.norm(x)
152
+
153
+ return x
154
+
155
+
156
+ activations = {}
157
+
158
+
159
+ def get_activation(name):
160
+ def hook(model, input, output):
161
+ activations[name] = output
162
+
163
+ return hook
164
+
165
+
166
+ def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
+ if use_readout == "ignore":
168
+ readout_oper = [Slice(start_index)] * len(features)
169
+ elif use_readout == "add":
170
+ readout_oper = [AddReadout(start_index)] * len(features)
171
+ elif use_readout == "project":
172
+ readout_oper = [
173
+ ProjectReadout(vit_features, start_index) for out_feat in features
174
+ ]
175
+ else:
176
+ assert (
177
+ False
178
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
+
180
+ return readout_oper
181
+
182
+
183
+ def _make_vit_b16_backbone(
184
+ model,
185
+ features=[96, 192, 384, 768],
186
+ size=[384, 384],
187
+ hooks=[2, 5, 8, 11],
188
+ vit_features=768,
189
+ use_readout="ignore",
190
+ start_index=1,
191
+ ):
192
+ pretrained = nn.Module()
193
+
194
+ pretrained.model = model
195
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
+
200
+ pretrained.activations = activations
201
+
202
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
+
204
+ # 32, 48, 136, 384
205
+ pretrained.act_postprocess1 = nn.Sequential(
206
+ readout_oper[0],
207
+ Transpose(1, 2),
208
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
+ nn.Conv2d(
210
+ in_channels=vit_features,
211
+ out_channels=features[0],
212
+ kernel_size=1,
213
+ stride=1,
214
+ padding=0,
215
+ ),
216
+ nn.ConvTranspose2d(
217
+ in_channels=features[0],
218
+ out_channels=features[0],
219
+ kernel_size=4,
220
+ stride=4,
221
+ padding=0,
222
+ bias=True,
223
+ dilation=1,
224
+ groups=1,
225
+ ),
226
+ )
227
+
228
+ pretrained.act_postprocess2 = nn.Sequential(
229
+ readout_oper[1],
230
+ Transpose(1, 2),
231
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
+ nn.Conv2d(
233
+ in_channels=vit_features,
234
+ out_channels=features[1],
235
+ kernel_size=1,
236
+ stride=1,
237
+ padding=0,
238
+ ),
239
+ nn.ConvTranspose2d(
240
+ in_channels=features[1],
241
+ out_channels=features[1],
242
+ kernel_size=2,
243
+ stride=2,
244
+ padding=0,
245
+ bias=True,
246
+ dilation=1,
247
+ groups=1,
248
+ ),
249
+ )
250
+
251
+ pretrained.act_postprocess3 = nn.Sequential(
252
+ readout_oper[2],
253
+ Transpose(1, 2),
254
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
+ nn.Conv2d(
256
+ in_channels=vit_features,
257
+ out_channels=features[2],
258
+ kernel_size=1,
259
+ stride=1,
260
+ padding=0,
261
+ ),
262
+ )
263
+
264
+ pretrained.act_postprocess4 = nn.Sequential(
265
+ readout_oper[3],
266
+ Transpose(1, 2),
267
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
+ nn.Conv2d(
269
+ in_channels=vit_features,
270
+ out_channels=features[3],
271
+ kernel_size=1,
272
+ stride=1,
273
+ padding=0,
274
+ ),
275
+ nn.Conv2d(
276
+ in_channels=features[3],
277
+ out_channels=features[3],
278
+ kernel_size=3,
279
+ stride=2,
280
+ padding=1,
281
+ ),
282
+ )
283
+
284
+ pretrained.model.start_index = start_index
285
+ pretrained.model.patch_size = [16, 16]
286
+
287
+ # We inject this function into the VisionTransformer instances so that
288
+ # we can use it with interpolated position embeddings without modifying the library source.
289
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
+ pretrained.model._resize_pos_embed = types.MethodType(
291
+ _resize_pos_embed, pretrained.model
292
+ )
293
+
294
+ return pretrained
295
+
296
+
297
+ def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
+
300
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
301
+ return _make_vit_b16_backbone(
302
+ model,
303
+ features=[256, 512, 1024, 1024],
304
+ hooks=hooks,
305
+ vit_features=1024,
306
+ use_readout=use_readout,
307
+ )
308
+
309
+
310
+ def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
+
313
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
314
+ return _make_vit_b16_backbone(
315
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
+ )
317
+
318
+
319
+ def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
+ model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
+
322
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
323
+ return _make_vit_b16_backbone(
324
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
+ )
326
+
327
+
328
+ def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
+ model = timm.create_model(
330
+ "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
+ )
332
+
333
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
334
+ return _make_vit_b16_backbone(
335
+ model,
336
+ features=[96, 192, 384, 768],
337
+ hooks=hooks,
338
+ use_readout=use_readout,
339
+ start_index=2,
340
+ )
341
+
342
+
343
+ def _make_vit_b_rn50_backbone(
344
+ model,
345
+ features=[256, 512, 768, 768],
346
+ size=[384, 384],
347
+ hooks=[0, 1, 8, 11],
348
+ vit_features=768,
349
+ use_vit_only=False,
350
+ use_readout="ignore",
351
+ start_index=1,
352
+ ):
353
+ pretrained = nn.Module()
354
+
355
+ pretrained.model = model
356
+
357
+ if use_vit_only == True:
358
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
+ else:
361
+ pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
+ get_activation("1")
363
+ )
364
+ pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
+ get_activation("2")
366
+ )
367
+
368
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
+
371
+ pretrained.activations = activations
372
+
373
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
+
375
+ if use_vit_only == True:
376
+ pretrained.act_postprocess1 = nn.Sequential(
377
+ readout_oper[0],
378
+ Transpose(1, 2),
379
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
+ nn.Conv2d(
381
+ in_channels=vit_features,
382
+ out_channels=features[0],
383
+ kernel_size=1,
384
+ stride=1,
385
+ padding=0,
386
+ ),
387
+ nn.ConvTranspose2d(
388
+ in_channels=features[0],
389
+ out_channels=features[0],
390
+ kernel_size=4,
391
+ stride=4,
392
+ padding=0,
393
+ bias=True,
394
+ dilation=1,
395
+ groups=1,
396
+ ),
397
+ )
398
+
399
+ pretrained.act_postprocess2 = nn.Sequential(
400
+ readout_oper[1],
401
+ Transpose(1, 2),
402
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
+ nn.Conv2d(
404
+ in_channels=vit_features,
405
+ out_channels=features[1],
406
+ kernel_size=1,
407
+ stride=1,
408
+ padding=0,
409
+ ),
410
+ nn.ConvTranspose2d(
411
+ in_channels=features[1],
412
+ out_channels=features[1],
413
+ kernel_size=2,
414
+ stride=2,
415
+ padding=0,
416
+ bias=True,
417
+ dilation=1,
418
+ groups=1,
419
+ ),
420
+ )
421
+ else:
422
+ pretrained.act_postprocess1 = nn.Sequential(
423
+ nn.Identity(), nn.Identity(), nn.Identity()
424
+ )
425
+ pretrained.act_postprocess2 = nn.Sequential(
426
+ nn.Identity(), nn.Identity(), nn.Identity()
427
+ )
428
+
429
+ pretrained.act_postprocess3 = nn.Sequential(
430
+ readout_oper[2],
431
+ Transpose(1, 2),
432
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
+ nn.Conv2d(
434
+ in_channels=vit_features,
435
+ out_channels=features[2],
436
+ kernel_size=1,
437
+ stride=1,
438
+ padding=0,
439
+ ),
440
+ )
441
+
442
+ pretrained.act_postprocess4 = nn.Sequential(
443
+ readout_oper[3],
444
+ Transpose(1, 2),
445
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
+ nn.Conv2d(
447
+ in_channels=vit_features,
448
+ out_channels=features[3],
449
+ kernel_size=1,
450
+ stride=1,
451
+ padding=0,
452
+ ),
453
+ nn.Conv2d(
454
+ in_channels=features[3],
455
+ out_channels=features[3],
456
+ kernel_size=3,
457
+ stride=2,
458
+ padding=1,
459
+ ),
460
+ )
461
+
462
+ pretrained.model.start_index = start_index
463
+ pretrained.model.patch_size = [16, 16]
464
+
465
+ # We inject this function into the VisionTransformer instances so that
466
+ # we can use it with interpolated position embeddings without modifying the library source.
467
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
+
469
+ # We inject this function into the VisionTransformer instances so that
470
+ # we can use it with interpolated position embeddings without modifying the library source.
471
+ pretrained.model._resize_pos_embed = types.MethodType(
472
+ _resize_pos_embed, pretrained.model
473
+ )
474
+
475
+ return pretrained
476
+
477
+
478
+ def _make_pretrained_vitb_rn50_384(
479
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
+ ):
481
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
+
483
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
484
+ return _make_vit_b_rn50_backbone(
485
+ model,
486
+ features=[256, 512, 768, 768],
487
+ size=[384, 384],
488
+ hooks=hooks,
489
+ use_vit_only=use_vit_only,
490
+ use_readout=use_readout,
491
+ )
condition/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import os
4
+
5
+
6
+ annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts')
7
+
8
+
9
+ def HWC3(x):
10
+ assert x.dtype == np.uint8
11
+ if x.ndim == 2:
12
+ x = x[:, :, None]
13
+ assert x.ndim == 3
14
+ H, W, C = x.shape
15
+ assert C == 1 or C == 3 or C == 4
16
+ if C == 3:
17
+ return x
18
+ if C == 1:
19
+ return np.concatenate([x, x, x], axis=2)
20
+ if C == 4:
21
+ color = x[:, :, 0:3].astype(np.float32)
22
+ alpha = x[:, :, 3:4].astype(np.float32) / 255.0
23
+ y = color * alpha + 255.0 * (1.0 - alpha)
24
+ y = y.clip(0, 255).astype(np.uint8)
25
+ return y
26
+
27
+
28
+ def resize_image(input_image, resolution):
29
+ H, W, C = input_image.shape
30
+ H = float(H)
31
+ W = float(W)
32
+ k = float(resolution) / min(H, W)
33
+ H *= k
34
+ W *= k
35
+ H = int(np.round(H / 64.0)) * 64
36
+ W = int(np.round(W / 64.0)) * 64
37
+ img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
38
+ return img
data/Captioned_ADE20K/.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
data/Captioned_ADE20K/.hfd/last_download_command ADDED
@@ -0,0 +1 @@
 
 
1
+ REPO_ID=limingcv/Captioned_ADE20K TOOL=wget INCLUDE_PATTERNS= EXCLUDE_PATTERNS= DATASET=1 HF_USERNAME= HF_TOKEN= HF_TOKEN=https://hf-mirror.com REVISION=main
data/Captioned_ADE20K/.hfd/repo_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_id":"653f319baa1f487614013f5a","id":"limingcv/Captioned_ADE20K","author":"limingcv","sha":"1406fc53a4749e505126bd385035cd7968e76dba","lastModified":"2023-10-30T04:38:04.000Z","private":false,"gated":false,"disabled":false,"tags":["size_categories:10K<n<100K","format:parquet","modality:image","modality:text","library:datasets","library:dask","library:mlcroissant","library:polars","region:us"],"citation":null,"description":null,"downloads":396,"likes":2,"cardData":{"dataset_info":{"features":[{"name":"image","dtype":"image"},{"name":"detailed_prompt","dtype":"string"},{"name":"control_seg","dtype":"image"},{"name":"seg_map","sequence":{"sequence":"uint8"}},{"name":"image_path","dtype":"string"},{"name":"prompt","dtype":"string"}],"splits":[{"name":"train","num_bytes":11095603078.08,"num_examples":20210},{"name":"validation","num_bytes":1128604170,"num_examples":2000}],"download_size":7044514076,"dataset_size":12224207248.08},"configs":[{"config_name":"default","data_files":[{"split":"train","path":"data/train-*"},{"split":"validation","path":"data/validation-*"}]}]},"siblings":[{"rfilename":".gitattributes"},{"rfilename":"README.md"},{"rfilename":"data/train-00000-of-00023.parquet"},{"rfilename":"data/train-00001-of-00023.parquet"},{"rfilename":"data/train-00002-of-00023.parquet"},{"rfilename":"data/train-00003-of-00023.parquet"},{"rfilename":"data/train-00004-of-00023.parquet"},{"rfilename":"data/train-00005-of-00023.parquet"},{"rfilename":"data/train-00006-of-00023.parquet"},{"rfilename":"data/train-00007-of-00023.parquet"},{"rfilename":"data/train-00008-of-00023.parquet"},{"rfilename":"data/train-00009-of-00023.parquet"},{"rfilename":"data/train-00010-of-00023.parquet"},{"rfilename":"data/train-00011-of-00023.parquet"},{"rfilename":"data/train-00012-of-00023.parquet"},{"rfilename":"data/train-00013-of-00023.parquet"},{"rfilename":"data/train-00014-of-00023.parquet"},{"rfilename":"data/train-00015-of-00023.parquet"},{"rfilename":"data/train-00016-of-00023.parquet"},{"rfilename":"data/train-00017-of-00023.parquet"},{"rfilename":"data/train-00018-of-00023.parquet"},{"rfilename":"data/train-00019-of-00023.parquet"},{"rfilename":"data/train-00020-of-00023.parquet"},{"rfilename":"data/train-00021-of-00023.parquet"},{"rfilename":"data/train-00022-of-00023.parquet"},{"rfilename":"data/validation-00000-of-00003.parquet"},{"rfilename":"data/validation-00001-of-00003.parquet"},{"rfilename":"data/validation-00002-of-00003.parquet"}],"createdAt":"2023-10-30T04:31:23.000Z","usedStorage":24658841884}
data/Captioned_ADE20K/.hfd/wget_urls.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/.gitattributes
2
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/README.md
3
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00000-of-00023.parquet
4
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00001-of-00023.parquet
5
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00002-of-00023.parquet
6
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00003-of-00023.parquet
7
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00004-of-00023.parquet
8
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00005-of-00023.parquet
9
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00006-of-00023.parquet
10
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00007-of-00023.parquet
11
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00008-of-00023.parquet
12
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00009-of-00023.parquet
13
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00010-of-00023.parquet
14
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00011-of-00023.parquet
15
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00012-of-00023.parquet
16
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00013-of-00023.parquet
17
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00014-of-00023.parquet
18
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00015-of-00023.parquet
19
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00016-of-00023.parquet
20
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00017-of-00023.parquet
21
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00018-of-00023.parquet
22
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00019-of-00023.parquet
23
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00020-of-00023.parquet
24
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00021-of-00023.parquet
25
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/train-00022-of-00023.parquet
26
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/validation-00000-of-00003.parquet
27
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/validation-00001-of-00003.parquet
28
+ https://hf-mirror.com/datasets/limingcv/Captioned_ADE20K/resolve/main/data/validation-00002-of-00003.parquet
data/Captioned_ADE20K/README.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: image
5
+ dtype: image
6
+ - name: detailed_prompt
7
+ dtype: string
8
+ - name: control_seg
9
+ dtype: image
10
+ - name: seg_map
11
+ sequence:
12
+ sequence: uint8
13
+ - name: image_path
14
+ dtype: string
15
+ - name: prompt
16
+ dtype: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 11095603078.08
20
+ num_examples: 20210
21
+ - name: validation
22
+ num_bytes: 1128604170.0
23
+ num_examples: 2000
24
+ download_size: 7044514076
25
+ dataset_size: 12224207248.08
26
+ configs:
27
+ - config_name: default
28
+ data_files:
29
+ - split: train
30
+ path: data/train-*
31
+ - split: validation
32
+ path: data/validation-*
33
+ ---
data/Captioned_ADE20K/see_parquet.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+
4
+ parquet_path = "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/data/train-00006-of-00023.parquet"
5
+
6
+ # 读取 parquet 文件
7
+ df = pd.read_parquet(parquet_path)
8
+
9
+ # 显示字段名(列名)
10
+ print("字段列名:", df.columns.tolist())
11
+
12
+ # 显示前几行内容
13
+ print(df.head())
data/Captioned_ADE20K/train.jsonl ADDED
@@ -0,0 +1,880 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/0.png"}
2
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/1.png"}
3
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/10.png"}
4
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/100.png"}
5
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/101.png"}
6
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/102.png"}
7
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/103.png"}
8
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/104.png"}
9
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/105.png"}
10
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/106.png"}
11
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/107.png"}
12
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/108.png"}
13
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/109.png"}
14
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/11.png"}
15
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/110.png"}
16
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/111.png"}
17
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/112.png"}
18
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/113.png"}
19
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/114.png"}
20
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/115.png"}
21
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/116.png"}
22
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/117.png"}
23
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/118.png"}
24
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/119.png"}
25
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/12.png"}
26
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/120.png"}
27
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/121.png"}
28
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/122.png"}
29
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/123.png"}
30
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/124.png"}
31
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/125.png"}
32
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/126.png"}
33
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/127.png"}
34
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/128.png"}
35
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/129.png"}
36
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/13.png"}
37
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/130.png"}
38
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/131.png"}
39
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/132.png"}
40
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/133.png"}
41
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/134.png"}
42
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/135.png"}
43
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/136.png"}
44
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/137.png"}
45
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/138.png"}
46
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/139.png"}
47
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/14.png"}
48
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/140.png"}
49
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/141.png"}
50
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/142.png"}
51
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/143.png"}
52
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/144.png"}
53
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/145.png"}
54
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/146.png"}
55
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/147.png"}
56
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/148.png"}
57
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/149.png"}
58
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/15.png"}
59
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/150.png"}
60
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/151.png"}
61
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/152.png"}
62
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/153.png"}
63
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/154.png"}
64
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/155.png"}
65
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/156.png"}
66
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/157.png"}
67
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/158.png"}
68
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/159.png"}
69
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/16.png"}
70
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/160.png"}
71
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/161.png"}
72
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/162.png"}
73
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/163.png"}
74
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/164.png"}
75
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/165.png"}
76
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/166.png"}
77
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/167.png"}
78
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/168.png"}
79
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/169.png"}
80
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/17.png"}
81
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/170.png"}
82
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/171.png"}
83
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/172.png"}
84
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/173.png"}
85
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/174.png"}
86
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/175.png"}
87
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/176.png"}
88
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/177.png"}
89
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/178.png"}
90
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/179.png"}
91
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/18.png"}
92
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/180.png"}
93
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/181.png"}
94
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/182.png"}
95
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/183.png"}
96
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/184.png"}
97
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/185.png"}
98
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/186.png"}
99
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/187.png"}
100
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/188.png"}
101
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/189.png"}
102
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/19.png"}
103
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/190.png"}
104
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/191.png"}
105
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/192.png"}
106
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/193.png"}
107
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/194.png"}
108
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/195.png"}
109
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/196.png"}
110
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/197.png"}
111
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/198.png"}
112
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/199.png"}
113
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/2.png"}
114
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/20.png"}
115
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/200.png"}
116
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/201.png"}
117
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/202.png"}
118
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/203.png"}
119
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/204.png"}
120
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/205.png"}
121
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/206.png"}
122
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/207.png"}
123
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/208.png"}
124
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/209.png"}
125
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/21.png"}
126
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/210.png"}
127
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/211.png"}
128
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/212.png"}
129
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/213.png"}
130
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/214.png"}
131
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/215.png"}
132
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/216.png"}
133
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/217.png"}
134
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/218.png"}
135
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/219.png"}
136
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/22.png"}
137
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/220.png"}
138
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/221.png"}
139
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/222.png"}
140
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/223.png"}
141
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/224.png"}
142
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/225.png"}
143
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/226.png"}
144
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/227.png"}
145
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/228.png"}
146
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/229.png"}
147
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/23.png"}
148
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/230.png"}
149
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/231.png"}
150
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/232.png"}
151
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/233.png"}
152
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/234.png"}
153
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/235.png"}
154
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/236.png"}
155
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/237.png"}
156
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/238.png"}
157
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/239.png"}
158
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/24.png"}
159
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/240.png"}
160
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/241.png"}
161
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/242.png"}
162
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/243.png"}
163
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/244.png"}
164
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/245.png"}
165
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/246.png"}
166
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/247.png"}
167
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/248.png"}
168
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/249.png"}
169
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/25.png"}
170
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/250.png"}
171
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/251.png"}
172
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/252.png"}
173
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/253.png"}
174
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/254.png"}
175
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/255.png"}
176
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/256.png"}
177
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/257.png"}
178
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/258.png"}
179
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/259.png"}
180
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/26.png"}
181
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/260.png"}
182
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/261.png"}
183
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/262.png"}
184
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/263.png"}
185
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/264.png"}
186
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/265.png"}
187
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/266.png"}
188
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/267.png"}
189
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/268.png"}
190
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/269.png"}
191
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/27.png"}
192
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/270.png"}
193
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/271.png"}
194
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/272.png"}
195
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/273.png"}
196
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/274.png"}
197
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/275.png"}
198
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/276.png"}
199
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/277.png"}
200
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/278.png"}
201
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/279.png"}
202
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/28.png"}
203
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/280.png"}
204
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/281.png"}
205
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/282.png"}
206
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/283.png"}
207
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/284.png"}
208
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/285.png"}
209
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/286.png"}
210
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/287.png"}
211
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/288.png"}
212
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/289.png"}
213
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/29.png"}
214
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/290.png"}
215
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/291.png"}
216
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/292.png"}
217
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/293.png"}
218
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/294.png"}
219
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/295.png"}
220
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/296.png"}
221
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/297.png"}
222
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/298.png"}
223
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/299.png"}
224
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/3.png"}
225
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/30.png"}
226
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/300.png"}
227
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/301.png"}
228
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/302.png"}
229
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/303.png"}
230
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/304.png"}
231
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/305.png"}
232
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/306.png"}
233
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/307.png"}
234
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/308.png"}
235
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/309.png"}
236
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/31.png"}
237
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/310.png"}
238
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/311.png"}
239
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/312.png"}
240
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/313.png"}
241
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/314.png"}
242
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/315.png"}
243
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/316.png"}
244
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/317.png"}
245
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/318.png"}
246
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/319.png"}
247
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/32.png"}
248
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/320.png"}
249
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/321.png"}
250
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/322.png"}
251
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/323.png"}
252
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/324.png"}
253
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/325.png"}
254
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/326.png"}
255
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/327.png"}
256
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/328.png"}
257
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/329.png"}
258
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/33.png"}
259
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/330.png"}
260
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/331.png"}
261
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/332.png"}
262
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/333.png"}
263
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/334.png"}
264
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/335.png"}
265
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/336.png"}
266
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/337.png"}
267
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/338.png"}
268
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/339.png"}
269
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/34.png"}
270
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/340.png"}
271
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/341.png"}
272
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/342.png"}
273
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/343.png"}
274
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/344.png"}
275
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/345.png"}
276
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/346.png"}
277
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/347.png"}
278
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/348.png"}
279
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/349.png"}
280
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/35.png"}
281
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/350.png"}
282
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/351.png"}
283
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/352.png"}
284
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/353.png"}
285
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/354.png"}
286
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/355.png"}
287
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/356.png"}
288
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/357.png"}
289
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/358.png"}
290
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/359.png"}
291
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/36.png"}
292
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/360.png"}
293
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/361.png"}
294
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/362.png"}
295
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/363.png"}
296
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/364.png"}
297
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/365.png"}
298
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/366.png"}
299
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/367.png"}
300
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/368.png"}
301
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/369.png"}
302
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/37.png"}
303
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/370.png"}
304
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/371.png"}
305
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/372.png"}
306
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/373.png"}
307
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/374.png"}
308
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/375.png"}
309
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/376.png"}
310
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/377.png"}
311
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/378.png"}
312
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/379.png"}
313
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/38.png"}
314
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/380.png"}
315
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/381.png"}
316
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/382.png"}
317
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/383.png"}
318
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/384.png"}
319
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/385.png"}
320
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/386.png"}
321
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/387.png"}
322
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/388.png"}
323
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/389.png"}
324
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/39.png"}
325
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/390.png"}
326
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/391.png"}
327
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/392.png"}
328
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/393.png"}
329
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/394.png"}
330
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/395.png"}
331
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/396.png"}
332
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/397.png"}
333
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/398.png"}
334
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/399.png"}
335
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/4.png"}
336
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/40.png"}
337
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/400.png"}
338
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/401.png"}
339
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/402.png"}
340
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/403.png"}
341
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/404.png"}
342
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/405.png"}
343
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/406.png"}
344
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/407.png"}
345
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/408.png"}
346
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/409.png"}
347
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/41.png"}
348
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/410.png"}
349
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/411.png"}
350
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/412.png"}
351
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/413.png"}
352
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/414.png"}
353
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/415.png"}
354
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/416.png"}
355
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/417.png"}
356
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/418.png"}
357
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/419.png"}
358
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/42.png"}
359
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/420.png"}
360
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/421.png"}
361
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/422.png"}
362
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/423.png"}
363
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/424.png"}
364
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/425.png"}
365
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/426.png"}
366
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/427.png"}
367
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/428.png"}
368
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/429.png"}
369
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/43.png"}
370
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/430.png"}
371
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/431.png"}
372
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/432.png"}
373
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/433.png"}
374
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/434.png"}
375
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/435.png"}
376
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/436.png"}
377
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/437.png"}
378
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/438.png"}
379
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/439.png"}
380
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/44.png"}
381
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/440.png"}
382
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/441.png"}
383
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/442.png"}
384
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/443.png"}
385
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/444.png"}
386
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/445.png"}
387
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/446.png"}
388
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/447.png"}
389
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/448.png"}
390
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/449.png"}
391
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/45.png"}
392
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/450.png"}
393
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/451.png"}
394
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/452.png"}
395
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/453.png"}
396
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/454.png"}
397
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/455.png"}
398
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/456.png"}
399
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/457.png"}
400
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/458.png"}
401
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/459.png"}
402
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/46.png"}
403
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/460.png"}
404
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/461.png"}
405
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/462.png"}
406
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/463.png"}
407
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/464.png"}
408
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/465.png"}
409
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/466.png"}
410
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/467.png"}
411
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/468.png"}
412
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/469.png"}
413
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/47.png"}
414
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/470.png"}
415
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/471.png"}
416
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/472.png"}
417
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/473.png"}
418
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/474.png"}
419
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/475.png"}
420
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/476.png"}
421
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/477.png"}
422
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/478.png"}
423
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/479.png"}
424
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/48.png"}
425
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/480.png"}
426
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/481.png"}
427
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/482.png"}
428
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/483.png"}
429
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/484.png"}
430
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/485.png"}
431
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/486.png"}
432
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/487.png"}
433
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/488.png"}
434
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/489.png"}
435
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/49.png"}
436
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/490.png"}
437
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/491.png"}
438
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/492.png"}
439
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/493.png"}
440
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/494.png"}
441
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/495.png"}
442
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/496.png"}
443
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/497.png"}
444
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/498.png"}
445
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/499.png"}
446
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/5.png"}
447
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/50.png"}
448
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/500.png"}
449
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/501.png"}
450
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/502.png"}
451
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/503.png"}
452
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/504.png"}
453
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/505.png"}
454
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/506.png"}
455
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/507.png"}
456
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/508.png"}
457
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/509.png"}
458
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/51.png"}
459
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/510.png"}
460
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/511.png"}
461
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/512.png"}
462
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/513.png"}
463
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/514.png"}
464
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/515.png"}
465
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/516.png"}
466
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/517.png"}
467
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/518.png"}
468
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/519.png"}
469
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/52.png"}
470
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/520.png"}
471
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/521.png"}
472
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/522.png"}
473
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/523.png"}
474
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/524.png"}
475
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/525.png"}
476
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/526.png"}
477
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/527.png"}
478
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/528.png"}
479
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/529.png"}
480
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/53.png"}
481
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/530.png"}
482
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/531.png"}
483
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/532.png"}
484
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/533.png"}
485
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/534.png"}
486
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/535.png"}
487
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/536.png"}
488
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/537.png"}
489
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/538.png"}
490
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/539.png"}
491
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/54.png"}
492
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/540.png"}
493
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/541.png"}
494
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/542.png"}
495
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/543.png"}
496
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/544.png"}
497
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/545.png"}
498
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/546.png"}
499
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/547.png"}
500
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/548.png"}
501
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/549.png"}
502
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/55.png"}
503
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/550.png"}
504
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/551.png"}
505
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/552.png"}
506
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/553.png"}
507
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/554.png"}
508
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/555.png"}
509
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/556.png"}
510
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/557.png"}
511
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/558.png"}
512
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/559.png"}
513
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/56.png"}
514
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/560.png"}
515
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/561.png"}
516
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/562.png"}
517
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/563.png"}
518
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/564.png"}
519
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/565.png"}
520
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/566.png"}
521
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/567.png"}
522
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/568.png"}
523
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/569.png"}
524
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/57.png"}
525
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/570.png"}
526
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/571.png"}
527
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/572.png"}
528
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/573.png"}
529
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/574.png"}
530
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/575.png"}
531
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/576.png"}
532
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/577.png"}
533
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/578.png"}
534
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/579.png"}
535
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/58.png"}
536
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/580.png"}
537
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/581.png"}
538
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/582.png"}
539
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/583.png"}
540
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/584.png"}
541
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/585.png"}
542
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/586.png"}
543
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/587.png"}
544
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/588.png"}
545
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/589.png"}
546
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/59.png"}
547
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/590.png"}
548
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/591.png"}
549
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/592.png"}
550
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/593.png"}
551
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/594.png"}
552
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/595.png"}
553
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/596.png"}
554
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/597.png"}
555
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/598.png"}
556
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/599.png"}
557
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/6.png"}
558
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/60.png"}
559
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/600.png"}
560
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/601.png"}
561
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/602.png"}
562
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/603.png"}
563
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/604.png"}
564
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/605.png"}
565
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/606.png"}
566
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/607.png"}
567
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/608.png"}
568
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/609.png"}
569
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/61.png"}
570
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/610.png"}
571
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/611.png"}
572
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/612.png"}
573
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/613.png"}
574
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/614.png"}
575
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/615.png"}
576
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/616.png"}
577
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/617.png"}
578
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/618.png"}
579
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/619.png"}
580
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/62.png"}
581
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/620.png"}
582
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/621.png"}
583
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/622.png"}
584
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/623.png"}
585
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/624.png"}
586
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/625.png"}
587
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/626.png"}
588
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/627.png"}
589
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/628.png"}
590
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/629.png"}
591
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/63.png"}
592
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/630.png"}
593
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/631.png"}
594
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/632.png"}
595
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/633.png"}
596
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/634.png"}
597
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/635.png"}
598
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/636.png"}
599
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/637.png"}
600
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/638.png"}
601
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/639.png"}
602
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/64.png"}
603
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/640.png"}
604
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/641.png"}
605
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/642.png"}
606
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/643.png"}
607
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/644.png"}
608
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/645.png"}
609
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/646.png"}
610
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/647.png"}
611
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/648.png"}
612
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/649.png"}
613
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/65.png"}
614
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/650.png"}
615
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/651.png"}
616
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/652.png"}
617
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/653.png"}
618
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/654.png"}
619
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/655.png"}
620
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/656.png"}
621
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/657.png"}
622
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/658.png"}
623
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/659.png"}
624
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/66.png"}
625
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/660.png"}
626
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/661.png"}
627
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/662.png"}
628
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/663.png"}
629
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/664.png"}
630
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/665.png"}
631
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/666.png"}
632
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/667.png"}
633
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/668.png"}
634
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/669.png"}
635
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/67.png"}
636
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/670.png"}
637
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/671.png"}
638
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/672.png"}
639
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/673.png"}
640
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/674.png"}
641
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/675.png"}
642
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/676.png"}
643
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/677.png"}
644
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/678.png"}
645
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/679.png"}
646
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/68.png"}
647
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/680.png"}
648
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/681.png"}
649
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/682.png"}
650
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/683.png"}
651
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/684.png"}
652
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/685.png"}
653
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/686.png"}
654
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/687.png"}
655
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/688.png"}
656
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/689.png"}
657
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/69.png"}
658
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/690.png"}
659
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/691.png"}
660
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/692.png"}
661
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/693.png"}
662
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/694.png"}
663
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/695.png"}
664
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/696.png"}
665
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/697.png"}
666
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/698.png"}
667
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/699.png"}
668
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/7.png"}
669
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/70.png"}
670
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/700.png"}
671
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/701.png"}
672
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/702.png"}
673
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/703.png"}
674
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/704.png"}
675
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/705.png"}
676
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/706.png"}
677
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/707.png"}
678
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/708.png"}
679
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/709.png"}
680
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/71.png"}
681
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/710.png"}
682
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/711.png"}
683
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/712.png"}
684
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/713.png"}
685
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/714.png"}
686
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/715.png"}
687
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/716.png"}
688
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/717.png"}
689
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/718.png"}
690
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/719.png"}
691
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/72.png"}
692
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/720.png"}
693
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/721.png"}
694
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/722.png"}
695
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/723.png"}
696
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/724.png"}
697
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/725.png"}
698
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/726.png"}
699
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/727.png"}
700
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/728.png"}
701
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/729.png"}
702
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/73.png"}
703
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/730.png"}
704
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/731.png"}
705
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/732.png"}
706
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/733.png"}
707
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/734.png"}
708
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/735.png"}
709
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/736.png"}
710
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/737.png"}
711
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/738.png"}
712
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/739.png"}
713
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/74.png"}
714
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/740.png"}
715
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/741.png"}
716
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/742.png"}
717
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/743.png"}
718
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/744.png"}
719
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/745.png"}
720
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/746.png"}
721
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/747.png"}
722
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/748.png"}
723
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/749.png"}
724
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/75.png"}
725
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/750.png"}
726
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/751.png"}
727
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/752.png"}
728
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/753.png"}
729
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/754.png"}
730
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/755.png"}
731
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/756.png"}
732
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/757.png"}
733
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/758.png"}
734
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/759.png"}
735
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/76.png"}
736
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/760.png"}
737
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/761.png"}
738
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/762.png"}
739
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/763.png"}
740
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/764.png"}
741
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/765.png"}
742
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/766.png"}
743
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/767.png"}
744
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/768.png"}
745
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/769.png"}
746
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/77.png"}
747
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/770.png"}
748
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/771.png"}
749
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/772.png"}
750
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/773.png"}
751
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/774.png"}
752
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/775.png"}
753
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/776.png"}
754
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/777.png"}
755
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/778.png"}
756
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/779.png"}
757
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/78.png"}
758
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/780.png"}
759
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/781.png"}
760
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/782.png"}
761
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/783.png"}
762
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/784.png"}
763
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/785.png"}
764
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/786.png"}
765
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/787.png"}
766
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/788.png"}
767
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/789.png"}
768
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/79.png"}
769
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/790.png"}
770
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/791.png"}
771
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/792.png"}
772
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/793.png"}
773
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/794.png"}
774
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/795.png"}
775
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/796.png"}
776
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/797.png"}
777
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/798.png"}
778
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/799.png"}
779
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/8.png"}
780
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/80.png"}
781
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/800.png"}
782
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/801.png"}
783
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/802.png"}
784
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/803.png"}
785
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/804.png"}
786
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/805.png"}
787
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/806.png"}
788
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/807.png"}
789
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/808.png"}
790
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/809.png"}
791
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/81.png"}
792
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/810.png"}
793
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/811.png"}
794
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/812.png"}
795
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/813.png"}
796
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/814.png"}
797
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/815.png"}
798
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/816.png"}
799
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/817.png"}
800
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/818.png"}
801
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/819.png"}
802
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/82.png"}
803
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/820.png"}
804
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/821.png"}
805
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/822.png"}
806
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/823.png"}
807
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/824.png"}
808
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/825.png"}
809
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/826.png"}
810
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/827.png"}
811
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/828.png"}
812
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/829.png"}
813
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/83.png"}
814
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/830.png"}
815
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/831.png"}
816
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/832.png"}
817
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/833.png"}
818
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/834.png"}
819
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/835.png"}
820
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/836.png"}
821
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/837.png"}
822
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/838.png"}
823
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/839.png"}
824
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/84.png"}
825
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/840.png"}
826
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/841.png"}
827
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/842.png"}
828
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/843.png"}
829
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/844.png"}
830
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/845.png"}
831
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/846.png"}
832
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/847.png"}
833
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/848.png"}
834
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/849.png"}
835
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/85.png"}
836
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/850.png"}
837
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/851.png"}
838
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/852.png"}
839
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/853.png"}
840
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/854.png"}
841
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/855.png"}
842
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/856.png"}
843
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/857.png"}
844
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/858.png"}
845
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/859.png"}
846
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/86.png"}
847
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/860.png"}
848
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/861.png"}
849
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/862.png"}
850
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/863.png"}
851
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/864.png"}
852
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/865.png"}
853
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/866.png"}
854
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/867.png"}
855
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/868.png"}
856
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/869.png"}
857
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/87.png"}
858
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/870.png"}
859
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/871.png"}
860
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/872.png"}
861
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/873.png"}
862
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/874.png"}
863
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/875.png"}
864
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/876.png"}
865
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/877.png"}
866
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/878.png"}
867
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/879.png"}
868
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/88.png"}
869
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/89.png"}
870
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/9.png"}
871
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/90.png"}
872
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/91.png"}
873
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/92.png"}
874
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/93.png"}
875
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/94.png"}
876
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/95.png"}
877
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/96.png"}
878
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/97.png"}
879
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/98.png"}
880
+ {"image_path": "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image/99.png"}