# Modified from: # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py # import os # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True import torch.distributed as dist from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torchvision import transforms import numpy as np import argparse import os from utils.distributed import init_distributed_mode from dataset.augmentation import center_crop_arr from dataset.build import build_dataset from tokenizer.tokenizer_image.vq_model import VQ_models ################################################################################# # Training Loop # ################################################################################# def main(args): assert torch.cuda.is_available(), "Training currently requires at least one GPU." # Setup DDP: if not args.debug: init_distributed_mode(args) rank = dist.get_rank() device = rank % torch.cuda.device_count() seed = args.global_seed * dist.get_world_size() + rank torch.manual_seed(seed) torch.cuda.set_device(device) print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.") else: device = 'cuda' rank = 0 # Setup a feature folder: if args.debug or rank == 0: os.makedirs(args.code_path, exist_ok=True) os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_codes'), exist_ok=True) os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_labels'), exist_ok=True) # create and load model vq_model = VQ_models[args.vq_model]( codebook_size=args.codebook_size, codebook_embed_dim=args.codebook_embed_dim) vq_model.to(device) vq_model.eval() checkpoint = torch.load(args.vq_ckpt, map_location="cpu") vq_model.load_state_dict(checkpoint["model"]) del checkpoint # Setup data: if args.ten_crop: crop_size = int(args.image_size * args.crop_range) transform = transforms.Compose([ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)), transforms.TenCrop(args.image_size), # this is a tuple of PIL Images transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True) ]) else: crop_size = args.image_size transform = transforms.Compose([ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True) ]) dataset = build_dataset(args, transform=transform) if not args.debug: sampler = DistributedSampler( dataset, num_replicas=dist.get_world_size(), rank=rank, shuffle=False, seed=args.global_seed ) else: sampler = None loader = DataLoader( dataset, batch_size=1, # important! shuffle=False, sampler=sampler, num_workers=args.num_workers, pin_memory=True, drop_last=False ) from tqdm import tqdm total = 0 for x, y in tqdm(loader): x = x.to(device) if args.ten_crop: x_all = x.flatten(0, 1) num_aug = 10 else: x_flip = torch.flip(x, dims=[-1]) x_all = torch.cat([x, x_flip]) num_aug = 2 y = y.to(device) with torch.no_grad(): _, _, [_, _, indices] = vq_model.encode(x_all) codes = indices.reshape(x.shape[0], num_aug, -1) x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16) train_steps = rank + total np.save(f'{args.code_path}/{args.dataset}{args.image_size}_codes/{train_steps}.npy', x) y = y.detach().cpu().numpy() # (1,) np.save(f'{args.code_path}/{args.dataset}{args.image_size}_labels/{train_steps}.npy', y) if not args.debug: total += dist.get_world_size() else: total += 1 #print(total) dist.destroy_process_group() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--data-path", type=str, required=True) parser.add_argument("--code-path", type=str, required=True) parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16") parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model") parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization") parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization") parser.add_argument("--dataset", type=str, default='imagenet') parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256) parser.add_argument("--ten-crop", action='store_true', help="whether using random crop") parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop") parser.add_argument("--global-seed", type=int, default=0) parser.add_argument("--num-workers", type=int, default=24) parser.add_argument("--debug", action='store_true') args = parser.parse_args() main(args)