wxy-ControlAR / autoregressive /train /extract_file_imagenet.py
slz1's picture
Add files using upload-large-folder tool
33b03a3 verified
# Modified from:
# fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
import numpy as np
import argparse
import os
import sys
current_directory = os.getcwd()
sys.path.append(current_directory)
from utils.distributed import init_distributed_mode
from dataset.augmentation import center_crop_arr
from dataset.build import build_dataset
from tokenizer.tokenizer_image.vq_model import VQ_models
from condition.canny import CannyDetector
import cv2
from condition.midas.depth import MidasDetector
#################################################################################
# Training Loop #
#################################################################################
def main(args):
assert torch.cuda.is_available(), "Training currently requires at least one GPU."
# Setup DDP:
if not args.debug:
init_distributed_mode(args)
rank = dist.get_rank()
device = rank % torch.cuda.device_count()
seed = args.global_seed * dist.get_world_size() + rank
torch.manual_seed(seed)
torch.cuda.set_device(device)
print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
else:
device = 'cuda'
rank = 0
# Setup a feature folder:
if args.debug or rank == 0:
os.makedirs(args.code_path, exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_codes'), exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_labels'), exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_canny_imagesnpy'), exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_canny_images'), exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_depth_imagesnpy'), exist_ok=True)
os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_depth_images'), exist_ok=True)
# create and load model
vq_model = VQ_models[args.vq_model](
codebook_size=args.codebook_size,
codebook_embed_dim=args.codebook_embed_dim)
vq_model.to(device)
vq_model.eval()
checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
vq_model.load_state_dict(checkpoint["model"])
del checkpoint
# Setup data:
if args.ten_crop:
crop_size = int(args.image_size * args.crop_range)
transform = transforms.Compose([
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
])
else:
crop_size = args.image_size
transform = transforms.Compose([
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
])
dataset = build_dataset(args, transform=transform)
if not args.debug:
sampler = DistributedSampler(
dataset,
num_replicas=dist.get_world_size(),
rank=rank,
shuffle=False,
seed=args.global_seed
)
else:
sampler = None
loader = DataLoader(
dataset,
batch_size=1, # important!
shuffle=False,
sampler=sampler,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False
)
apply_canny = CannyDetector()
depth_model = MidasDetector(device=device)
from tqdm import tqdm
total = 0
for x, y in tqdm(loader):
x = x.to(device)
batch_size_per_gpu = x.shape[0]
if args.ten_crop:
x_all = x.flatten(0, 1)
num_aug = 10
else:
x_flip = torch.flip(x, dims=[-1])
x_all = torch.cat([x, x_flip])
num_aug = 2
y = y.to(device)
canny = []
depths = []
for i in range(x_all.shape[0]):
canny.append(apply_canny((255*(x_all[i]*0.5 + 0.5)).cpu().numpy().transpose(1,2,0).astype(np.uint8),low_threshold=args.min_threshold, high_threshold=args.max_threshold)[None,None,...])
img = (255*(x_all[i]*0.5 + 0.5)).permute(1,2,0)
depth = depth_model(img)
depths.append(depth[None,None,...])
depths = np.concatenate(depths, axis=0)
cannys = np.concatenate(canny, axis=0)
train_steps = rank + total
np.save(f'{args.code_path}/{args.dataset}{args.image_size}_canny_imagesnpy/{train_steps}.npy', cannys)
np.save(f'{args.code_path}/{args.dataset}{args.image_size}_depth_imagesnpy/{train_steps}.npy', depths)
with torch.no_grad():
_, _, [_, _, indices] = vq_model.encode(x_all)
codes = indices.reshape(x.shape[0], num_aug, -1)
x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
y = y.detach().cpu().numpy() # (1,)
train_steps = rank + total
cv2.imwrite(f'{args.code_path}/{args.dataset}{args.image_size}_canny_images/{train_steps}.png', cannys[0,0])
cv2.imwrite(f'{args.code_path}/{args.dataset}{args.image_size}_depth_images/{train_steps}.png', depths[0,0])
np.save(f'{args.code_path}/{args.dataset}{args.image_size}_codes/{train_steps}.npy', x)
np.save(f'{args.code_path}/{args.dataset}{args.image_size}_labels/{train_steps}.npy', y)
if not args.debug:
total += dist.get_world_size()
else:
total += 1
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--code-path", type=str, required=True)
parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
parser.add_argument("--dataset", type=str, default='imagenet')
parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
parser.add_argument("--global-seed", type=int, default=0)
parser.add_argument("--num-workers", type=int, default=24)
parser.add_argument("--debug", action='store_true')
parser.add_argument("--min-threshold", type=int, default=100)
parser.add_argument("--max-threshold", type=int, default=200)
args = parser.parse_args()
main(args)