File size: 5,607 Bytes
33b03a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
# Modified from:
# fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.distributed as dist
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
import numpy as np
from PIL import Image
import glob
import argparse
import os
import json
from utils.distributed import init_distributed_mode
from dataset.augmentation import center_crop_arr
from tokenizer.tokenizer_image.vq_model import VQ_models
#################################################################################
# Training Helper Functions #
#################################################################################
class CustomDataset(Dataset):
def __init__(self, lst_dir, start, end, transform):
img_path_list = []
for lst_name in sorted(os.listdir(lst_dir))[start: end+1]:
if not lst_name.endswith('.jsonl'):
continue
file_path = os.path.join(lst_dir, lst_name)
with open(file_path, 'r') as file:
for line_idx, line in enumerate(file):
data = json.loads(line)
img_path = data['image_path']
code_dir = file_path.split('/')[-1].split('.')[0]
img_path_list.append((img_path, code_dir, line_idx))
self.img_path_list = img_path_list
self.transform = transform
def __len__(self):
return len(self.img_path_list)
def __getitem__(self, index):
img_path, code_dir, code_name = self.img_path_list[index]
img = Image.open(img_path).convert("RGB")
if self.transform is not None:
img = self.transform(img)
return img, code_dir, code_name
#################################################################################
# Training Loop #
#################################################################################
def main(args):
"""
Trains a new DiT model.
"""
assert torch.cuda.is_available(), "Training currently requires at least one GPU."
# Setup DDP:
# dist.init_process_group("nccl")
init_distributed_mode(args)
rank = dist.get_rank()
device = rank % torch.cuda.device_count()
seed = args.global_seed * dist.get_world_size() + rank
torch.manual_seed(seed)
torch.cuda.set_device(device)
print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
# Setup a feature folder:
if rank == 0:
os.makedirs(args.code_path, exist_ok=True)
# create and load model
vq_model = VQ_models[args.vq_model](
codebook_size=args.codebook_size,
codebook_embed_dim=args.codebook_embed_dim)
vq_model.to(device)
vq_model.eval()
checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
vq_model.load_state_dict(checkpoint["model"])
del checkpoint
# Setup data:
transform = transforms.Compose([
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
])
print(f"Dataset is preparing...")
dataset = CustomDataset(args.data_path, args.data_start, args.data_end, transform=transform)
sampler = DistributedSampler(
dataset,
num_replicas=dist.get_world_size(),
rank=rank,
shuffle=False,
seed=args.global_seed
)
loader = DataLoader(
dataset,
batch_size=1, # important!
shuffle=False,
sampler=sampler,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False
)
print(f"Dataset contains {len(dataset):,} images")
# total = 0
for img, code_dir, code_name in loader:
img = img.to(device)
with torch.no_grad():
_, _, [_, _, indices] = vq_model.encode(img)
codes = indices.reshape(img.shape[0], -1)
x = codes.detach().cpu().numpy() # (1, args.image_size//16 * args.image_size//16)
os.makedirs(os.path.join(args.code_path, code_dir[0]), exist_ok=True)
np.save(os.path.join(args.code_path, code_dir[0], '{}.npy'.format(code_name.item())), x)
# total += dist.get_world_size()
print(code_name.item())
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--code-path", type=str, required=True)
parser.add_argument("--data-start", type=int, required=True)
parser.add_argument("--data-end", type=int, required=True)
parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=512)
parser.add_argument("--global-seed", type=int, default=0)
parser.add_argument("--num-workers", type=int, default=24)
args = parser.parse_args()
main(args)
|