|
|
|
|
|
import torch |
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
torch.backends.cudnn.allow_tf32 = True |
|
import argparse |
|
|
|
from tokenizer.tokenizer_image.vq_model import VQ_models |
|
from autoregressive.models.gpt_hf import GPT_models_HF, TransformerHF |
|
|
|
device = "cuda" if torch.cuda_is_available() else "cpu" |
|
|
|
def main(args): |
|
|
|
assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage" |
|
torch.set_grad_enabled(False) |
|
|
|
|
|
precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision] |
|
latent_size = args.image_size // args.downsample_size |
|
gpt_model = GPT_models_HF[args.gpt_model]( |
|
vocab_size=args.codebook_size, |
|
block_size=latent_size ** 2, |
|
num_classes=args.num_classes, |
|
cls_token_num=args.cls_token_num, |
|
model_type=args.gpt_type, |
|
).to(device=device, dtype=precision) |
|
checkpoint = torch.load(args.gpt_ckpt, map_location="cpu") |
|
if args.from_fsdp: |
|
model_weight = checkpoint |
|
elif "model" in checkpoint: |
|
model_weight = checkpoint["model"] |
|
elif "module" in checkpoint: |
|
model_weight = checkpoint["module"] |
|
elif "state_dict" in checkpoint: |
|
model_weight = checkpoint["state_dict"] |
|
else: |
|
raise Exception("please check model weight, maybe add --from-fsdp to run command") |
|
|
|
|
|
gpt_model.load_state_dict(model_weight, strict=False) |
|
gpt_model.eval() |
|
del checkpoint |
|
|
|
|
|
repo_id = f"FoundationVision/{args.gpt_model}-{args.image_size}" |
|
gpt_model.push_to_hub(repo_id) |
|
|
|
|
|
model = TransformerHF.from_pretrained(repo_id) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B") |
|
parser.add_argument("--gpt-ckpt", type=str, default=None) |
|
parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional") |
|
parser.add_argument("--from-fsdp", action='store_true') |
|
parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input") |
|
parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"]) |
|
parser.add_argument("--compile", action='store_true', default=True) |
|
parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16") |
|
parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model") |
|
parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization") |
|
parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization") |
|
parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384) |
|
parser.add_argument("--image-size-eval", type=int, choices=[256, 384, 512], default=256) |
|
parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16) |
|
parser.add_argument("--num-classes", type=int, default=1000) |
|
args = parser.parse_args() |
|
main(args) |