slz1 commited on
Commit
33b03a3
·
verified ·
1 Parent(s): bf5834d

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. autoregressive/sample/sample_c2i.py +151 -0
  2. autoregressive/sample/sample_t2i.py +225 -0
  3. autoregressive/sample/sample_t2i_MR.py +238 -0
  4. autoregressive/serve/README.md +58 -0
  5. autoregressive/serve/fake_json/GPT-3B.json +28 -0
  6. autoregressive/serve/fake_json/GPT-B.json +28 -0
  7. autoregressive/serve/fake_json/GPT-L.json +28 -0
  8. autoregressive/serve/fake_json/GPT-XL.json +28 -0
  9. autoregressive/serve/fake_json/GPT-XXL.json +28 -0
  10. autoregressive/serve/gpt_model.py +419 -0
  11. autoregressive/serve/gpu_executor.py +201 -0
  12. autoregressive/serve/llm_engine.py +671 -0
  13. autoregressive/serve/model_runner.py +1232 -0
  14. autoregressive/serve/sample_c2i.py +98 -0
  15. autoregressive/serve/sampler.py +868 -0
  16. autoregressive/serve/worker.py +349 -0
  17. autoregressive/test/metric.py +97 -0
  18. autoregressive/test/test_c2i.py +267 -0
  19. autoregressive/test/test_ssim.py +21 -0
  20. autoregressive/test/test_t2i.py +285 -0
  21. autoregressive/train/extract_codes_c2i.py +139 -0
  22. autoregressive/train/extract_codes_t2i.py +144 -0
  23. autoregressive/train/extract_file_ade.py +511 -0
  24. autoregressive/train/extract_file_cocostuff.py +228 -0
  25. autoregressive/train/extract_file_imagenet.py +168 -0
  26. autoregressive/train/extract_file_multigen.py +228 -0
  27. autoregressive/train/train_c2i.py +294 -0
  28. autoregressive/train/train_c2i_canny.py +306 -0
  29. autoregressive/train/train_c2i_depth.py +314 -0
  30. autoregressive/train/train_c2i_fsdp.py +390 -0
  31. tokenizer/tokenizer_image/__pycache__/vq_model.cpython-310.pyc +0 -0
  32. tokenizer/tokenizer_image/cache/vgg.pth +3 -0
  33. tokenizer/tokenizer_image/vq_model_hf.py +17 -0
  34. tokenizer/tokenizer_image/vq_train.py +323 -0
  35. tokenizer/vqgan/README.md +21 -0
  36. tokenizer/vqgan/configs/vqgan_imagenet_f16_1024.yaml +32 -0
  37. tokenizer/vqgan/configs/vqgan_imagenet_f16_16384.yaml +34 -0
  38. tokenizer/vqgan/configs/vqgan_openimage_f8_16384.yaml +20 -0
  39. tokenizer/vqgan/configs/vqgan_openimage_f8_256.yaml +20 -0
  40. tokenizer/vqgan/layer.py +372 -0
  41. tokenizer/vqgan/model.py +88 -0
  42. tokenizer/vqgan/quantize.py +229 -0
  43. tokenizer/vqgan/reconstruction_vqgan_ddp.py +215 -0
  44. tokenizer/vqgan/taming_vqgan_demo.py +68 -0
  45. tools/check_image_codes.py +55 -0
  46. tools/convert_pytorch_lightning_to_torch.py +25 -0
  47. tools/draw_figure.py +141 -0
  48. tools/imagenet_en_cn.py +1002 -0
  49. tools/push_gpt_to_hf.py +71 -0
  50. utils/data.py +22 -0
autoregressive/sample/sample_c2i.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/sample.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ torch.set_float32_matmul_precision('high')
7
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None)
8
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None)
9
+ from torchvision.utils import save_image
10
+ import os
11
+ import sys
12
+ current_directory = os.getcwd()
13
+ sys.path.append(current_directory)
14
+
15
+ from PIL import Image
16
+ import time
17
+ import argparse
18
+ from tokenizer.tokenizer_image.vq_model import VQ_models
19
+ from autoregressive.models.gpt import GPT_models
20
+ from autoregressive.models.generate import generate
21
+ from functools import partial
22
+ import torch.nn.functional as F
23
+ import numpy as np
24
+ import cv2
25
+
26
+
27
+ def main(args):
28
+ # Setup PyTorch:
29
+ torch.manual_seed(args.seed)
30
+ torch.backends.cudnn.deterministic = True
31
+ torch.backends.cudnn.benchmark = False
32
+ torch.set_grad_enabled(False)
33
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
34
+
35
+ # create and load model
36
+ vq_model = VQ_models[args.vq_model](
37
+ codebook_size=args.codebook_size,
38
+ codebook_embed_dim=args.codebook_embed_dim)
39
+ vq_model.to(device)
40
+ vq_model.eval()
41
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
42
+ vq_model.load_state_dict(checkpoint["model"])
43
+ del checkpoint
44
+ print(f"image tokenizer is loaded")
45
+
46
+ # create and load gpt model
47
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
48
+ latent_size = args.image_size // args.downsample_size
49
+ gpt_model = GPT_models[args.gpt_model](
50
+ vocab_size=args.codebook_size,
51
+ block_size=latent_size ** 2,
52
+ num_classes=args.num_classes,
53
+ cls_token_num=args.cls_token_num,
54
+ model_type=args.gpt_type,
55
+ condition_token_num=args.condition_token_nums,
56
+ image_size=args.image_size
57
+ ).to(device=device, dtype=precision)
58
+
59
+ _, file_extension = os.path.splitext(args.gpt_ckpt)
60
+ if file_extension.lower() == '.safetensors':
61
+ from safetensors.torch import load_file
62
+ model_weight = load_file(args.gpt_ckpt)
63
+ gpt_model.load_state_dict(model_weight, strict=False)
64
+ gpt_model.eval()
65
+ else:
66
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
67
+ if "model" in checkpoint: # ddp
68
+ model_weight = checkpoint["model"]
69
+ elif "module" in checkpoint: # deepspeed
70
+ model_weight = checkpoint["module"]
71
+ elif "state_dict" in checkpoint:
72
+ model_weight = checkpoint["state_dict"]
73
+ else:
74
+ raise Exception("please check model weight")
75
+ gpt_model.load_state_dict(model_weight, strict=False)
76
+ gpt_model.eval()
77
+ del checkpoint
78
+ print(f"gpt model is loaded")
79
+
80
+ if args.compile:
81
+ print(f"compiling the model...")
82
+ gpt_model = torch.compile(
83
+ gpt_model,
84
+ mode="reduce-overhead",
85
+ fullgraph=True
86
+ ) # requires PyTorch 2.0 (optional)
87
+ else:
88
+ print(f"no need to compile model in demo")
89
+
90
+ condition_null = None
91
+ if args.condition_type == 'canny':
92
+ sample_list = [650, 2312, 15000, 48850] # canny
93
+ elif args.condition_type == 'depth':
94
+ sample_list = [101, 4351, 10601, 48901]
95
+
96
+ class_labels = [np.load(f"condition/example/c2i/{args.condition_type}/{i}.npy")[0] for i in sample_list]
97
+ condition_imgs = [np.array(Image.open((f"condition/example/c2i/{args.condition_type}/{i}.png")))[None,None,...] for i in sample_list]
98
+ condition_imgs = torch.from_numpy(np.concatenate(condition_imgs, axis=0)).to(device).to(torch.float32)/255
99
+ condition_imgs = 2*(condition_imgs-0.5)
100
+ print(condition_imgs.shape)
101
+ c_indices = torch.tensor(class_labels, device=device)
102
+ qzshape = [len(class_labels), args.codebook_embed_dim, latent_size, latent_size]
103
+ t1 = time.time()
104
+
105
+ index_sample = generate(
106
+ gpt_model, c_indices, latent_size ** 2, condition=condition_imgs.repeat(1,3,1,1).to(precision), condition_null=condition_null, condition_token_nums=args.condition_token_nums,
107
+ cfg_scale=args.cfg_scale, cfg_interval=args.cfg_interval,
108
+ temperature=args.temperature, top_k=args.top_k,
109
+ top_p=args.top_p, sample_logits=True,
110
+ )
111
+
112
+ sampling_time = time.time() - t1
113
+ print(f"gpt sampling takes about {sampling_time:.2f} seconds.")
114
+
115
+ t2 = time.time()
116
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
117
+ decoder_time = time.time() - t2
118
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
119
+ # Save and display images:
120
+ condition_imgs = condition_imgs.repeat(1,3,1,1)
121
+ samples = torch.cat((condition_imgs[:4], samples[:4]),dim=0)
122
+ save_image(samples, f"sample/example/sample_{args.gpt_type}_{args.condition_type}.png", nrow=4, normalize=True, value_range=(-1, 1))
123
+
124
+
125
+
126
+ if __name__ == "__main__":
127
+ parser = argparse.ArgumentParser()
128
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
129
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
130
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
131
+ parser.add_argument("--from-fsdp", action='store_true')
132
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
133
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
134
+ parser.add_argument("--compile", action='store_true', default=False)
135
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
136
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
137
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
138
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
139
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=256)
140
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
141
+ parser.add_argument("--num-classes", type=int, default=1000)
142
+ parser.add_argument("--cfg-scale", type=float, default=4.0)
143
+ parser.add_argument("--cfg-interval", type=float, default=-1)
144
+ parser.add_argument("--seed", type=int, default=0)
145
+ parser.add_argument("--top-k", type=int, default=2000,help="top-k value to sample with")
146
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
147
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
148
+ parser.add_argument("--condition-token-nums", type=int, default=0)
149
+ parser.add_argument("--condition-type", type=str, default='canny', choices=['canny', 'depth'])
150
+ args = parser.parse_args()
151
+ main(args)
autoregressive/sample/sample_t2i.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ torch.set_float32_matmul_precision('high')
5
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
6
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
7
+ from torchvision.utils import save_image
8
+
9
+ import os
10
+ import sys
11
+ current_directory = os.getcwd()
12
+ sys.path.append(current_directory)
13
+ import time
14
+ import argparse
15
+ from tokenizer.tokenizer_image.vq_model import VQ_models
16
+ from language.t5 import T5Embedder
17
+ from autoregressive.models.gpt import GPT_models
18
+ from autoregressive.models.gpt_t2i import GPT_models
19
+ from autoregressive.models.generate import generate
20
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
21
+ from dataset.t2i_control import build_t2i_control_code
22
+ from accelerate import Accelerator
23
+ from dataset.build import build_dataset
24
+ from pathlib import Path
25
+ from accelerate.utils import ProjectConfiguration, set_seed
26
+ import torch.nn.functional as F
27
+ from condition.canny import CannyDetector
28
+ from condition.hed import HEDdetector
29
+ import numpy as np
30
+ from PIL import Image
31
+ from condition.lineart import LineArt
32
+ import cv2
33
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
34
+ def main(args):
35
+ # Setup PyTorch:
36
+ torch.manual_seed(args.seed)
37
+ torch.backends.cudnn.deterministic = True
38
+ torch.backends.cudnn.benchmark = False
39
+ torch.set_grad_enabled(False)
40
+ device = "cuda" if torch.cuda.is_available() else "cpu"
41
+
42
+ # create and load model
43
+ vq_model = VQ_models[args.vq_model](
44
+ codebook_size=args.codebook_size,
45
+ codebook_embed_dim=args.codebook_embed_dim)
46
+ vq_model.to(device)
47
+ vq_model.eval()
48
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
49
+ vq_model.load_state_dict(checkpoint["model"])
50
+ del checkpoint
51
+ print(f"image tokenizer is loaded")
52
+
53
+ # create and load gpt model
54
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
55
+ latent_size = args.image_size // args.downsample_size
56
+ gpt_model = GPT_models[args.gpt_model](
57
+ block_size=latent_size ** 2,
58
+ cls_token_num=args.cls_token_num,
59
+ model_type=args.gpt_type,
60
+ condition_type=args.condition_type,
61
+ adapter_size=args.adapter_size,
62
+ ).to(device=device, dtype=precision)
63
+
64
+ _, file_extension = os.path.splitext(args.gpt_ckpt)
65
+ if file_extension.lower() == '.safetensors':
66
+ from safetensors.torch import load_file
67
+ model_weight = load_file(args.gpt_ckpt)
68
+ gpt_model.load_state_dict(model_weight, strict=False)
69
+ gpt_model.eval()
70
+ else:
71
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
72
+ if "model" in checkpoint: # ddp
73
+ model_weight = checkpoint["model"]
74
+ elif "module" in checkpoint: # deepspeed
75
+ model_weight = checkpoint["module"]
76
+ elif "state_dict" in checkpoint:
77
+ model_weight = checkpoint["state_dict"]
78
+ else:
79
+ raise Exception("please check model weight")
80
+ gpt_model.load_state_dict(model_weight, strict=False)
81
+ gpt_model.eval()
82
+ del checkpoint
83
+ print(f"gpt model is loaded")
84
+
85
+ if args.compile:
86
+ print(f"compiling the model...")
87
+ gpt_model = torch.compile(
88
+ gpt_model,
89
+ mode="reduce-overhead",
90
+ fullgraph=True
91
+ ) # requires PyTorch 2.0 (optional)
92
+ else:
93
+ print(f"no need to compile model in demo")
94
+
95
+ assert os.path.exists(args.t5_path)
96
+ t5_model = T5Embedder(
97
+ device=device,
98
+ local_cache=True,
99
+ cache_dir=args.t5_path,
100
+ dir_or_name=args.t5_model_type,
101
+ torch_dtype=precision,
102
+ model_max_length=args.t5_feature_max_len,
103
+ )
104
+
105
+
106
+ if args.condition_type == 'canny':
107
+ get_control = CannyDetector()
108
+ elif args.condition_type == 'hed':
109
+ get_control = HEDdetector().to(device).eval()
110
+ elif args.condition_type == 'lineart':
111
+ get_control = LineArt()
112
+ get_control.load_state_dict(torch.load('condition/ckpts/model.pth', map_location=torch.device('cpu')))
113
+ get_control.to(device)
114
+ elif args.condition_type == 'depth':
115
+ processor = DPTImageProcessor.from_pretrained("condition/ckpts/dpt_large")
116
+ model = DPTForDepthEstimation.from_pretrained("condition/ckpts/dpt_large").to(device)
117
+ with torch.no_grad():
118
+
119
+ condition_path = args.condition_path
120
+ if args.condition_type == 'seg':
121
+ condition_img = torch.from_numpy(np.array(Image.open(condition_path)))
122
+ condition_img = condition_img.permute(2,0,1).unsqueeze(0).repeat(2,1,1,1)
123
+ elif args.condition_type == 'canny':
124
+ condition_img = get_control(np.array(Image.open(condition_path)))
125
+ condition_img = torch.from_numpy(condition_img[None,None,...]).repeat(2,3,1,1)
126
+ elif args.condition_type == 'hed':
127
+ condition_img = get_control(torch.from_numpy(np.array(Image.open(condition_path))).permute(2,0,1).unsqueeze(0).to(device))
128
+ condition_img = condition_img.unsqueeze(1).repeat(2,3,1,1)
129
+ elif args.condition_type == 'lineart':
130
+ condition_img = get_control(torch.from_numpy(np.array(Image.open(condition_path))).permute(2,0,1).unsqueeze(0).to(device).float())
131
+ condition_img = 1 - condition_img
132
+ condition_img = condition_img.repeat(2,3,1,1) * 255
133
+ elif args.condition_type == 'depth':
134
+ images = Image.open(condition_path)
135
+ inputs = processor(images=images, return_tensors="pt", size=(512,512)).to(device)
136
+ outputs = model(**inputs)
137
+ condition_img = outputs.predicted_depth
138
+ condition_img = condition_img.unsqueeze(0).repeat(2,3,1,1)
139
+ condition_img = (condition_img * 255 / condition_img.max())
140
+ # 所有加载的条件图像都会进行归一化处理,
141
+ condition_img = condition_img.to(device)
142
+ condition_img = 2*(condition_img/255 - 0.5)#使得图像值在 [-1, 1] 范围内,以便与生成模型的输入匹配。
143
+ prompts = [args.prompt if args.prompt is not None else "a high-quality image"]
144
+ prompts = prompts * 2
145
+ # 通过 T5Embedder 模型来获取文本提示(prompt)的嵌入向量 caption_embs
146
+ caption_embs, emb_masks = t5_model.get_text_embeddings(prompts)
147
+
148
+ if not args.no_left_padding:
149
+ print(f"processing left-padding...")
150
+ # a naive way to implement left-padding
151
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
152
+ new_caption_embs = []
153
+ for idx, (caption_emb, emb_mask) in enumerate(zip(caption_embs, emb_masks)):
154
+ valid_num = int(emb_mask.sum().item())
155
+ print(f' prompt {idx} token len: {valid_num}')
156
+ new_caption_emb = torch.cat([caption_emb[valid_num:],caption_emb[:valid_num]])
157
+ new_caption_embs.append(new_caption_emb)
158
+ new_caption_embs = torch.stack(new_caption_embs)
159
+ else:
160
+ new_caption_embs, new_emb_masks = caption_embs, emb_masks
161
+ c_indices = new_caption_embs * new_emb_masks[:,:, None]
162
+ c_emb_masks = new_emb_masks
163
+ qzshape = [len(c_indices), args.codebook_embed_dim, args.image_H//args.downsample_size, args.image_W//args.downsample_size]
164
+ t1 = time.time()
165
+ # generate() 函数使用这些输入(文本token:c_indices和图像条件)来生成对应的 图像 token
166
+ index_sample = generate(
167
+ gpt_model, c_indices, (args.image_H//args.downsample_size)*(args.image_W//args.downsample_size),#latent_size ** 2,
168
+ c_emb_masks, condition=condition_img.to(precision),
169
+ cfg_scale=args.cfg_scale,
170
+ temperature=args.temperature, top_k=args.top_k,
171
+ top_p=args.top_p, sample_logits=True,
172
+ control_strength=args.control_strength,
173
+ )
174
+ sampling_time = time.time() - t1
175
+ print(f"Full sampling takes about {sampling_time:.2f} seconds.")
176
+
177
+ t2 = time.time()
178
+ print(index_sample.shape)
179
+ # 生成的图像 token(index_sample)会通过 vq_model.decode_code() 解码成实际的图像样本
180
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
181
+ decoder_time = time.time() - t2
182
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
183
+
184
+ samples = torch.cat((condition_img[0:1], samples), dim=0)
185
+ save_image(samples, f"sample/example/sample_t2i_{args.condition_type}.png", nrow=4, normalize=True, value_range=(-1, 1))
186
+ print(f"image is saved to sample/example/sample_t2i_{args.condition_type}.png")
187
+ print(prompts)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ parser = argparse.ArgumentParser()
192
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
193
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
194
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
195
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
196
+ parser.add_argument("--no-left-padding", action='store_true', default=False)
197
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
198
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
199
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i", help="class->image or text->image")
200
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
201
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
202
+ parser.add_argument("--compile", action='store_true', default=False)
203
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
204
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
205
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
206
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
207
+ parser.add_argument("--image-size", type=int, choices=[256, 320, 384, 400, 448, 512, 576, 640, 704, 768], default=768)
208
+ parser.add_argument("--image-H", type=int, default=512)
209
+ parser.add_argument("--image-W", type=int, default=512)
210
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
211
+ parser.add_argument("--cfg-scale", type=float, default=4)
212
+ parser.add_argument("--seed", type=int, default=0)
213
+ parser.add_argument("--top-k", type=int, default=2000, help="top-k value to sample with")
214
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
215
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
216
+
217
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
218
+ parser.add_argument("--condition-type", type=str, choices=['seg', 'canny', 'hed', 'lineart', 'depth', 'canny_base'], default="canny")
219
+ parser.add_argument("--prompt", type=str, default='a high-quality image')
220
+ parser.add_argument("--condition-path", type=str, default='condition/example/t2i/multigen/landscape.png')
221
+ parser.add_argument("--adapter-size", type=str, default='small')
222
+
223
+ parser.add_argument("--control-strength", type=float, default=1.0)
224
+ args = parser.parse_args()
225
+ main(args)
autoregressive/sample/sample_t2i_MR.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ torch.set_float32_matmul_precision('high')
5
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
6
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
7
+ from torchvision.utils import save_image
8
+
9
+ import os
10
+ import sys
11
+ current_directory = os.getcwd()
12
+ sys.path.append(current_directory)
13
+ import time
14
+ import argparse
15
+ from tokenizer.tokenizer_image.vq_model import VQ_models
16
+ from language.t5 import T5Embedder
17
+ from autoregressive.models.gpt import GPT_models
18
+ from autoregressive.models.gpt_t2i import GPT_models
19
+ from autoregressive.models.generate import generate
20
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
21
+ from dataset.t2i_control import build_t2i_control_code
22
+ from accelerate import Accelerator
23
+ from dataset.build import build_dataset
24
+ from pathlib import Path
25
+ from accelerate.utils import ProjectConfiguration, set_seed
26
+ import torch.nn.functional as F
27
+ from condition.canny import CannyDetector
28
+ from condition.hed import HEDdetector
29
+ import numpy as np
30
+ from PIL import Image
31
+ from condition.lineart import LineArt
32
+ import cv2
33
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
34
+ from condition.midas.depth import MidasDetector
35
+
36
+
37
+ def resize_image_to_16_multiple(image_path, condition_type='seg'):
38
+ image = Image.open(image_path)
39
+ width, height = image.size
40
+
41
+ if condition_type == 'depth': # The depth model requires a side length that is a multiple of 32
42
+ new_width = (width + 31) // 32 * 32
43
+ new_height = (height + 31) // 32 * 32
44
+ else:
45
+ new_width = (width + 15) // 16 * 16
46
+ new_height = (height + 15) // 16 * 16
47
+
48
+ resized_image = image.resize((new_width, new_height))
49
+ return resized_image
50
+
51
+ def main(args):
52
+ # Setup PyTorch:
53
+ torch.manual_seed(args.seed)
54
+ torch.backends.cudnn.deterministic = True
55
+ torch.backends.cudnn.benchmark = False
56
+ torch.set_grad_enabled(False)
57
+ device = "cuda" if torch.cuda.is_available() else "cpu"
58
+
59
+ # create and load model
60
+ vq_model = VQ_models[args.vq_model](
61
+ codebook_size=args.codebook_size,
62
+ codebook_embed_dim=args.codebook_embed_dim)
63
+ vq_model.to(device)
64
+ vq_model.eval()
65
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
66
+ vq_model.load_state_dict(checkpoint["model"])
67
+ del checkpoint
68
+ print(f"image tokenizer is loaded")
69
+
70
+ # create and load gpt model
71
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
72
+ latent_size = args.image_size // args.downsample_size
73
+ gpt_model = GPT_models[args.gpt_model](
74
+ block_size=latent_size ** 2,
75
+ cls_token_num=args.cls_token_num,
76
+ model_type=args.gpt_type,
77
+ condition_type=args.condition_type,
78
+ ).to(device=device, dtype=precision)
79
+
80
+ _, file_extension = os.path.splitext(args.gpt_ckpt)
81
+ if file_extension.lower() == '.safetensors':
82
+ from safetensors.torch import load_file
83
+ model_weight = load_file(args.gpt_ckpt)
84
+ gpt_model.load_state_dict(model_weight, strict=False)
85
+ gpt_model.eval()
86
+ else:
87
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
88
+ if "model" in checkpoint: # ddp
89
+ model_weight = checkpoint["model"]
90
+ elif "module" in checkpoint: # deepspeed
91
+ model_weight = checkpoint["module"]
92
+ elif "state_dict" in checkpoint:
93
+ model_weight = checkpoint["state_dict"]
94
+ else:
95
+ raise Exception("please check model weight")
96
+ gpt_model.load_state_dict(model_weight, strict=False)
97
+ gpt_model.eval()
98
+ del checkpoint
99
+ print(f"gpt model is loaded")
100
+
101
+ if args.compile:
102
+ print(f"compiling the model...")
103
+ gpt_model = torch.compile(
104
+ gpt_model,
105
+ mode="reduce-overhead",
106
+ fullgraph=True
107
+ ) # requires PyTorch 2.0 (optional)
108
+ else:
109
+ print(f"no need to compile model in demo")
110
+
111
+ assert os.path.exists(args.t5_path)
112
+ t5_model = T5Embedder(
113
+ device=device,
114
+ local_cache=True,
115
+ cache_dir=args.t5_path,
116
+ dir_or_name=args.t5_model_type,
117
+ torch_dtype=precision,
118
+ model_max_length=args.t5_feature_max_len,
119
+ )
120
+
121
+
122
+ if args.condition_type == 'canny':
123
+ get_control = CannyDetector()
124
+ elif args.condition_type == 'hed':
125
+ get_control = HEDdetector().to(device).eval()
126
+ elif args.condition_type == 'lineart':
127
+ get_control = LineArt()
128
+ get_control.load_state_dict(torch.load('condition/ckpts/model.pth', map_location=torch.device('cpu')))
129
+ get_control.to(device)
130
+ elif args.condition_type == 'depth':
131
+ processor = DPTImageProcessor.from_pretrained("condition/ckpts/dpt_large")
132
+ model_large = DPTForDepthEstimation.from_pretrained("condition/ckpts/dpt_large").to(device)
133
+ model = MidasDetector(device=device)
134
+ with torch.no_grad():
135
+
136
+ condition_img = resize_image_to_16_multiple(args.condition_path, args.condition_type)
137
+ W, H = condition_img.size
138
+ print(H,W)
139
+ if args.condition_type == 'seg':
140
+ condition_img = torch.from_numpy(np.array(condition_img))
141
+ condition_img = condition_img.permute(2,0,1).unsqueeze(0).repeat(2,1,1,1)
142
+ elif args.condition_type == 'canny':
143
+ condition_img = get_control(np.array(condition_img))
144
+ condition_img = torch.from_numpy(condition_img[None,None,...]).repeat(2,3,1,1)
145
+ elif args.condition_type == 'hed':
146
+ condition_img = get_control(torch.from_numpy(np.array(condition_img)).permute(2,0,1).unsqueeze(0).to(device))
147
+ condition_img = condition_img.unsqueeze(1).repeat(2,3,1,1)
148
+ elif args.condition_type == 'lineart':
149
+ condition_img = get_control(torch.from_numpy(np.array(condition_img)).permute(2,0,1).unsqueeze(0).to(device).float())
150
+ condition_img = condition_img.repeat(2,3,1,1) * 255
151
+ elif args.condition_type == 'depth':
152
+ images = condition_img
153
+ if H == W:
154
+ inputs = processor(images=images, return_tensors="pt", size=(H,W)).to(device)
155
+ outputs = model_large(**inputs)
156
+ condition_img = outputs.predicted_depth
157
+ condition_img = (condition_img * 255 / condition_img.max())
158
+ else:
159
+ condition_img = torch.from_numpy(model(torch.from_numpy(np.array(condition_img)).to(device))).unsqueeze(0)
160
+ condition_img = condition_img.unsqueeze(0).repeat(2,3,1,1)
161
+ condition_img = condition_img.to(device)
162
+ condition_img = 2*(condition_img/255 - 0.5)
163
+ prompts = [args.prompt if args.prompt is not None else "a high-quality image"]
164
+ prompts = prompts * 2
165
+ caption_embs, emb_masks = t5_model.get_text_embeddings(prompts)
166
+
167
+ if not args.no_left_padding:
168
+ print(f"processing left-padding...")
169
+ # a naive way to implement left-padding
170
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
171
+ new_caption_embs = []
172
+ for idx, (caption_emb, emb_mask) in enumerate(zip(caption_embs, emb_masks)):
173
+ valid_num = int(emb_mask.sum().item())
174
+ print(f' prompt {idx} token len: {valid_num}')
175
+ new_caption_emb = torch.cat([caption_emb[valid_num:],caption_emb[:valid_num]])
176
+ new_caption_embs.append(new_caption_emb)
177
+ new_caption_embs = torch.stack(new_caption_embs)
178
+ else:
179
+ new_caption_embs, new_emb_masks = caption_embs, emb_masks
180
+ c_indices = new_caption_embs * new_emb_masks[:,:, None]
181
+ c_emb_masks = new_emb_masks
182
+ qzshape = [len(c_indices), args.codebook_embed_dim, H//args.downsample_size, W//args.downsample_size]
183
+ t1 = time.time()
184
+ index_sample = generate(
185
+ gpt_model, c_indices, (H//args.downsample_size)*(W//args.downsample_size),#latent_size ** 2,
186
+ c_emb_masks, condition=condition_img.to(precision),
187
+ cfg_scale=args.cfg_scale,
188
+ temperature=args.temperature, top_k=args.top_k,
189
+ top_p=args.top_p, sample_logits=True,
190
+ )
191
+ sampling_time = time.time() - t1
192
+ print(f"Full sampling takes about {sampling_time:.2f} seconds.")
193
+
194
+ t2 = time.time()
195
+ print(index_sample.shape)
196
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
197
+ decoder_time = time.time() - t2
198
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
199
+
200
+ samples = torch.cat((condition_img[0:1], samples), dim=0)
201
+ save_image(samples, f"sample/example/sample_t2i_MR_{args.condition_type}.png", nrow=4, normalize=True, value_range=(-1, 1))
202
+ print(f"image is saved to sample/example/sample_t2i_MR_{args.condition_type}.png")
203
+ print(prompts)
204
+
205
+
206
+ if __name__ == "__main__":
207
+ parser = argparse.ArgumentParser()
208
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
209
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
210
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
211
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
212
+ parser.add_argument("--no-left-padding", action='store_true', default=False)
213
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
214
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
215
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i", help="class->image or text->image")
216
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
217
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
218
+ parser.add_argument("--compile", action='store_true', default=False)
219
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
220
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
221
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
222
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
223
+ parser.add_argument("--image-size", type=int, choices=[256, 320, 384, 400, 448, 512, 576, 640, 704, 768], default=768)
224
+ parser.add_argument("--image-H", type=int, default=512)
225
+ parser.add_argument("--image-W", type=int, default=512)
226
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
227
+ parser.add_argument("--cfg-scale", type=float, default=4)
228
+ parser.add_argument("--seed", type=int, default=0)
229
+ parser.add_argument("--top-k", type=int, default=2000, help="top-k value to sample with")
230
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
231
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
232
+
233
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
234
+ parser.add_argument("--condition-type", type=str, choices=['seg', 'canny', 'hed', 'lineart', 'depth'], default="canny")
235
+ parser.add_argument("--prompt", type=str, default='a high-quality image')
236
+ parser.add_argument("--condition-path", type=str, default='condition/example/t2i/multigen/landscape.png')
237
+ args = parser.parse_args()
238
+ main(args)
autoregressive/serve/README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## serving by vLLM
2
+
3
+ ### Install
4
+ ```
5
+ pip install vllm==0.4.1
6
+ ```
7
+
8
+ ### Comparison (A100)
9
+
10
+ Method | params | baseline(s) | vllm(s) | speed-up ratio
11
+ --- |:---:|:---:|:---:|:---:
12
+ [GPT-B](./fake_json/GPT-B.json) | 111M | 7.80 | 2.39 | 326 %
13
+ [GPT-L](./fake_json/GPT-L.json) | 343M | 13.72 | 3.48 | 380 %
14
+ [GPT-XL](./fake_json/GPT-XL.json) | 775M | 19.76 | 4.84 | 408 %
15
+ [GPT-XXL](./fake_json/GPT-XXL.json)| 1.4B | 26.38 | 6.36 | 414 %
16
+ [GPT-3B](./fake_json/GPT-3B.json) | 3.1B | 14.73 | 6.26 | 235 %
17
+
18
+ ```
19
+ ### GPT-B
20
+ # 7.80 seconds
21
+ python3 autoregressive/sample/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_B_384.pt --image-size 384
22
+
23
+ # 2.39 seconds
24
+ python3 autoregressive/serve/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_B_384.pt --image-size 384
25
+
26
+
27
+ ### GPT-L
28
+ # 13.72 seconds
29
+ python3 autoregressive/sample/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_L_384.pt --gpt-model GPT-L --image-size 384
30
+
31
+ # 3.48 seconds
32
+ python3 autoregressive/serve/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_L_384.pt --gpt-model GPT-L --image-size 384
33
+
34
+
35
+ ### GPT-XL
36
+ # 19.76 seconds
37
+ python3 autoregressive/sample/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_XL_384.pt --gpt-model GPT-XL --image-size 384
38
+
39
+ # 4.84 seconds
40
+ python3 autoregressive/serve/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_XL_384.pt --gpt-model GPT-XL --image-size 384
41
+
42
+
43
+ ### GPT-XXL
44
+ # 26.38 seconds
45
+ python3 autoregressive/sample/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_XXL_384.pt --from-fsdp --gpt-model GPT-XXL --image-size 384
46
+
47
+ # 6.36 seconds
48
+ python3 autoregressive/serve/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_XXL_384.pt --from-fsdp --gpt-model GPT-XXL --image-size 384
49
+
50
+
51
+ ### GPT-3B
52
+ # 14.73 seconds
53
+ python3 autoregressive/sample/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_3B_384.pt --from-fsdp --gpt-model GPT-3B --image-size 384
54
+
55
+ # 6.26 seconds
56
+ python3 autoregressive/serve/sample_c2i.py --vq-ckpt ./pretrained_models/vq_ds16_c2i.pt --gpt-ckpt ./pretrained_models/c2i_3B_384.pt --from-fsdp --gpt-model GPT-3B --image-size 384
57
+
58
+ ```
autoregressive/serve/fake_json/GPT-3B.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 3584,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.21.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 16384,
27
+ "word_embed_proj_dim": 768
28
+ }
autoregressive/serve/fake_json/GPT-B.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 768,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 12,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.21.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 16384,
27
+ "word_embed_proj_dim": 768
28
+ }
autoregressive/serve/fake_json/GPT-L.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 1024,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.21.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 16384,
27
+ "word_embed_proj_dim": 768
28
+ }
autoregressive/serve/fake_json/GPT-XL.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 1280,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 20,
20
+ "num_hidden_layers": 36,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.21.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 16384,
27
+ "word_embed_proj_dim": 768
28
+ }
autoregressive/serve/fake_json/GPT-XXL.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": true,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 3072,
14
+ "hidden_size": 1536,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 24,
20
+ "num_hidden_layers": 48,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.21.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 16384,
27
+ "word_embed_proj_dim": 768
28
+ }
autoregressive/serve/gpt_model.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional, List
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from vllm.model_executor.layers.layernorm import RMSNorm
8
+ from vllm.model_executor.layers.activation import SiluAndMul
9
+ from vllm.model_executor.sampling_metadata import SamplingMetadata
10
+ from vllm.sequence import SamplerOutput
11
+
12
+ from vllm.attention import AttentionMetadata
13
+ from vllm.attention import Attention as pagedAttention
14
+
15
+ from vllm.model_executor.layers.logits_processor import LogitsProcessor
16
+ from autoregressive.serve.sampler import Sampler
17
+
18
+ def find_multiple(n: int, k: int):
19
+ if n % k == 0:
20
+ return n
21
+ return n + k - (n % k)
22
+
23
+ @dataclass
24
+ class ModelArgs:
25
+ dim: int = 4096
26
+ n_layer: int = 32
27
+ n_head: int = 32
28
+ n_kv_head: Optional[int] = None
29
+ multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
30
+ ffn_dim_multiplier: Optional[float] = None
31
+ rope_base: float = 10000
32
+ norm_eps: float = 1e-5
33
+ initializer_range: float = 0.02
34
+
35
+ num_classes: int = 1000
36
+ class_dropout_prob: float = 0.1
37
+ model_type: str = 'c2i'
38
+ cfg_scale: float = 4.0
39
+
40
+ vocab_size: int = 16384
41
+ cls_token_num: int = 1
42
+ block_size: int = 256
43
+ max_batch_size: int = 32
44
+ max_seq_len: int = 2048
45
+
46
+
47
+ #################################################################################
48
+ # Embedding Layers for Class Labels #
49
+ #################################################################################
50
+ class LabelEmbedder(nn.Module):
51
+ """
52
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
53
+ """
54
+ def __init__(self, num_classes, hidden_size, dropout_prob):
55
+ super().__init__()
56
+ use_cfg_embedding = dropout_prob > 0
57
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
58
+ self.num_classes = num_classes
59
+ self.dropout_prob = dropout_prob
60
+
61
+ # def token_drop(self, labels, force_drop_ids=None):
62
+ # """
63
+ # Drops labels to enable classifier-free guidance.
64
+ # """
65
+ # if force_drop_ids is None:
66
+ # drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
67
+ # else:
68
+ # drop_ids = force_drop_ids == 1
69
+ # labels = torch.where(drop_ids, self.num_classes, labels)
70
+ # return labels
71
+
72
+ # def forward(self, labels, train, force_drop_ids=None):
73
+ def forward(self, labels):
74
+ # use_dropout = self.dropout_prob > 0
75
+ # if (train and use_dropout) or (force_drop_ids is not None):
76
+ # labels = self.token_drop(labels, force_drop_ids)
77
+ embeddings = self.embedding_table(labels)
78
+ return embeddings
79
+
80
+
81
+ #################################################################################
82
+ # GPT Model #
83
+ #################################################################################
84
+ # class RMSNorm(torch.nn.Module):
85
+ # def __init__(self, dim: int, eps: float = 1e-5):
86
+ # super().__init__()
87
+ # self.eps = eps
88
+ # self.weight = nn.Parameter(torch.ones(dim))
89
+
90
+ # def _norm(self, x):
91
+ # return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
92
+
93
+ # def forward(self, x):
94
+ # output = self._norm(x.float()).type_as(x)
95
+ # return output * self.weight
96
+
97
+
98
+ class FeedForward(nn.Module):
99
+ def __init__(self, config: ModelArgs):
100
+ super().__init__()
101
+ hidden_dim = 4 * config.dim
102
+ hidden_dim = int(2 * hidden_dim / 3)
103
+ # custom dim factor multiplier
104
+ if config.ffn_dim_multiplier is not None:
105
+ hidden_dim = int(config.ffn_dim_multiplier * hidden_dim)
106
+ hidden_dim = find_multiple(hidden_dim, config.multiple_of)
107
+
108
+ # self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
109
+ # self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
110
+ self.w_merged = nn.Linear(config.dim, hidden_dim * 2, bias=False)
111
+ self.act_fn = SiluAndMul()
112
+
113
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
114
+ # self.ffn_dropout = nn.Dropout(config.ffn_dropout_p)
115
+
116
+ # def forward(self, x):
117
+ # return self.ffn_dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
118
+
119
+ def forward(self, x):
120
+ x = self.w_merged(x)
121
+ x = self.act_fn(x)
122
+ x = self.w2(x)
123
+ # return self.ffn_dropout(x)
124
+ return x
125
+
126
+
127
+ class Attention(nn.Module):
128
+ def __init__(self, config: ModelArgs):
129
+ super().__init__()
130
+ assert config.dim % config.n_head == 0
131
+ self.dim = config.dim
132
+ self.head_dim = config.dim // config.n_head
133
+ self.n_head = config.n_head
134
+ self.n_kv_head = config.n_kv_head if config.n_kv_head is not None else config.n_head
135
+ total_kv_dim = (self.n_head + 2 * self.n_kv_head) * self.head_dim
136
+
137
+ # key, query, value projections for all heads, but in a batch
138
+ self.wqkv = nn.Linear(config.dim, total_kv_dim, bias=False)
139
+ self.wo = nn.Linear(config.dim, config.dim, bias=False)
140
+
141
+ # pagedAttention
142
+ if config.dim // config.n_head == 100:
143
+ self.attn = None # for this case, we need to overwrite the attn in AttentionMonkeyPatch
144
+ else:
145
+ self.attn = pagedAttention(self.n_head, self.head_dim, self.head_dim**-0.5, num_kv_heads=self.n_kv_head)
146
+
147
+ # 2d rotary pos embedding
148
+ grid_size = int(config.block_size ** 0.5)
149
+ assert grid_size * grid_size == config.block_size
150
+ freqs_cis = precompute_freqs_cis_2d(grid_size, config.dim // config.n_head, config.rope_base, config.cls_token_num)
151
+ self.register_buffer('freqs_cis', freqs_cis)
152
+
153
+
154
+ def forward(
155
+ self,
156
+ x: torch.Tensor,
157
+ positions: torch.Tensor,
158
+ kv_cache: torch.Tensor,
159
+ attn_metadata: AttentionMetadata,
160
+ ):
161
+ kv_size = self.n_kv_head * self.head_dim
162
+ xq, xk, xv = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
163
+
164
+ xq = xq.view(*xq.shape[:-1], 1, self.n_head, self.head_dim)
165
+ xk = xk.view(*xk.shape[:-1], 1, self.n_kv_head, self.head_dim)
166
+ freqs_cis = self.freqs_cis[positions].unsqueeze(1)
167
+ xq = apply_rotary_emb_bs(xq, freqs_cis)
168
+ xk = apply_rotary_emb_bs(xk, freqs_cis)
169
+ xq = xq.flatten(1)
170
+ xk = xk.flatten(1)
171
+
172
+ output = self.attn(xq, xk, xv, kv_cache, attn_metadata)
173
+ output = self.wo(output)
174
+
175
+ return output
176
+
177
+
178
+ class AttentionMonkeyPatch(Attention):
179
+ """
180
+ Note:
181
+ In vllm, PagedAttention supports head sizes [64, 80, 96, 112, 128, 256].
182
+ However, LlamaGen-3B model has head size 100 (for some historical reasons).
183
+ Here we hack Attnetion to enable vllm support head size 100.
184
+ """
185
+ def __init__(self, config: ModelArgs):
186
+ super().__init__(config)
187
+ # overwrite PagedAttention
188
+ # hard-coded 112 for LlamaGen-3B model
189
+ self.attn = pagedAttention(self.n_head, 112, 100**-0.5, num_kv_heads=self.n_kv_head)
190
+
191
+ def forward(
192
+ self,
193
+ x: torch.Tensor,
194
+ positions: torch.Tensor,
195
+ kv_cache: torch.Tensor,
196
+ attn_metadata: AttentionMetadata,
197
+ ):
198
+ kv_size = self.n_kv_head * self.head_dim
199
+ xq, xk, xv = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
200
+
201
+ xq = xq.view(*xq.shape[:-1], 1, self.n_head, self.head_dim)
202
+ xk = xk.view(*xk.shape[:-1], 1, self.n_kv_head, self.head_dim)
203
+ freqs_cis = self.freqs_cis[positions].unsqueeze(1)
204
+ xq = apply_rotary_emb_bs(xq, freqs_cis)
205
+ xk = apply_rotary_emb_bs(xk, freqs_cis)
206
+ xq = xq.flatten(1)
207
+ xk = xk.flatten(1)
208
+ ############ padding to 112 to make vllm happy ############
209
+ zero_pad = torch.zeros(xq.shape[0], self.n_head, 112 - 100, device=xq.device, dtype=xq.dtype)
210
+ xq = xq.reshape(xq.shape[0], self.n_head, self.head_dim)
211
+ xk = xk.reshape(xk.shape[0], self.n_kv_head, self.head_dim)
212
+ xv = xv.reshape(xv.shape[0], self.n_kv_head, self.head_dim)
213
+ xq = torch.concat([xq, zero_pad], dim=-1).flatten(1)
214
+ xk = torch.concat([xk, zero_pad], dim=-1).flatten(1)
215
+ xv = torch.concat([xv, zero_pad], dim=-1).flatten(1)
216
+
217
+ output = self.attn(xq, xk, xv, kv_cache, attn_metadata)
218
+ ############ de-padding to 100 ############
219
+ output = output.reshape(output.shape[0], self.n_head, 112)
220
+ output = output[..., :100].flatten(1)
221
+
222
+ output = self.wo(output)
223
+
224
+ return output
225
+
226
+
227
+ class TransformerBlock(nn.Module):
228
+ def __init__(self, config: ModelArgs):
229
+ super().__init__()
230
+ if config.dim // config.n_head == 100:
231
+ self.attention = AttentionMonkeyPatch(config)
232
+ else:
233
+ self.attention = Attention(config)
234
+ self.feed_forward = FeedForward(config)
235
+ self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
236
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
237
+
238
+ def forward(self, x: torch.Tensor, positions: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata):
239
+ h = x + self.attention(self.attention_norm(x), positions, kv_cache, attn_metadata)
240
+ out = h + self.feed_forward(self.ffn_norm(h))
241
+ return out
242
+
243
+
244
+ class Transformer(nn.Module):
245
+ def __init__(self, config: ModelArgs):
246
+ super().__init__()
247
+ self.config = config
248
+ self.vocab_size = config.vocab_size
249
+ self.n_layer = config.n_layer
250
+ self.block_size = config.block_size
251
+ self.num_classes = config.num_classes
252
+ self.model_type = config.model_type
253
+ self.cls_token_num = config.cls_token_num
254
+ self.cfg_scale = config.cfg_scale
255
+ if self.model_type == 'c2i':
256
+ self.cls_embedding = LabelEmbedder(config.num_classes, config.dim, config.class_dropout_prob)
257
+ else:
258
+ raise Exception("vllm only supports c2i now, please check model type")
259
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
260
+
261
+ self.layers = torch.nn.ModuleList()
262
+ for layer_id in range(config.n_layer):
263
+ self.layers.append(TransformerBlock(config))
264
+
265
+ # output layer
266
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
267
+ self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
268
+
269
+ self.logits_processor = LogitsProcessor(config.vocab_size)
270
+
271
+ self.sampler = Sampler(config.cfg_scale)
272
+
273
+ def forward(
274
+ self,
275
+ input_ids: torch.Tensor=None,
276
+ positions: torch.Tensor=None,
277
+ kv_caches: List[torch.Tensor]=None,
278
+ attn_metadata: AttentionMetadata=None,
279
+ ):
280
+ # if positions.max() == 0: # prefill in inference
281
+ # token_embeddings = self.cls_embedding(input_ids)
282
+ # else: # decode_n_tokens(kv cache) in inference
283
+ # token_embeddings = self.tok_embeddings(input_ids)
284
+ cond_ids = torch.clamp(input_ids, max=self.num_classes)
285
+ token_embeddings = self.cls_embedding(cond_ids) * (positions.max() == 0) + \
286
+ self.tok_embeddings(input_ids) * (positions.max() != 0)
287
+
288
+ hh = token_embeddings
289
+ # transformer blocks
290
+ for layer_id, layer in enumerate(self.layers):
291
+ hh = layer(hh, positions, kv_caches[layer_id], attn_metadata)
292
+
293
+ # output layers
294
+ hh = self.norm(hh)
295
+ return hh
296
+
297
+ def compute_logits(self, hidden_states: torch.Tensor,
298
+ sampling_metadata: SamplingMetadata) -> torch.Tensor:
299
+ logits = self.logits_processor(self.output.weight, hidden_states, sampling_metadata)
300
+ return logits
301
+
302
+ def sample(
303
+ self,
304
+ logits: torch.Tensor,
305
+ sampling_metadata: SamplingMetadata,
306
+ ) -> Optional[SamplerOutput]:
307
+ next_tokens = self.sampler(logits, sampling_metadata)
308
+ return next_tokens
309
+
310
+
311
+ def custom_load_state_dict(self, model_weights):
312
+ model_weights = model_weights.copy()
313
+ for layer_id in range(len(self.layers)):
314
+ branch1 = f'layers.{layer_id}.feed_forward.w1.weight'
315
+ branch3 = f'layers.{layer_id}.feed_forward.w3.weight'
316
+ branch_merged = f'layers.{layer_id}.feed_forward.w_merged.weight'
317
+ model_weights[branch_merged] = torch.cat(
318
+ [model_weights[branch1], model_weights[branch3]], dim=0
319
+ )
320
+ model_weights.pop(branch1)
321
+ model_weights.pop(branch3)
322
+
323
+ if 'freqs_cis' in model_weights:
324
+ model_weights.pop('freqs_cis')
325
+
326
+ self.load_state_dict(model_weights, strict=False)
327
+
328
+
329
+
330
+ #################################################################################
331
+ # Rotary Positional Embedding Functions #
332
+ #################################################################################
333
+ # https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
334
+ def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000, cls_token_num=120):
335
+ freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
336
+ t = torch.arange(seq_len, device=freqs.device)
337
+ freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2)
338
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
339
+ cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1) # (cls_token_num+seq_len, head_dim // 2, 2)
340
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+seq_len, head_dim // 2, 2)
341
+ return cond_cache
342
+
343
+
344
+ def precompute_freqs_cis_2d(grid_size: int, n_elem: int, base: int = 10000, cls_token_num=120):
345
+ # split the dimension into half, one for x and one for y
346
+ half_dim = n_elem // 2
347
+ freqs = 1.0 / (base ** (torch.arange(0, half_dim, 2)[: (half_dim // 2)].float() / half_dim))
348
+ t = torch.arange(grid_size, device=freqs.device)
349
+ freqs = torch.outer(t, freqs) # (grid_size, head_dim // 2)
350
+ freqs_grid = torch.concat([
351
+ freqs[:, None, :].expand(-1, grid_size, -1),
352
+ freqs[None, :, :].expand(grid_size, -1, -1),
353
+ ], dim=-1) # (grid_size, grid_size, head_dim // 2)
354
+ cache_grid = torch.stack([torch.cos(freqs_grid), torch.sin(freqs_grid)], dim=-1) # (grid_size, grid_size, head_dim // 2, 2)
355
+ cache = cache_grid.flatten(0, 1)
356
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+grid_size**2, head_dim // 2, 2)
357
+ return cond_cache
358
+
359
+
360
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor):
361
+ # x: (bs, seq_len, n_head, head_dim)
362
+ # freqs_cis (seq_len, head_dim // 2, 2)
363
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
364
+ freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2) # (1, seq_len, 1, head_dim//2, 2)
365
+ x_out2 = torch.stack([
366
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
367
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
368
+ ], dim=-1)
369
+ x_out2 = x_out2.flatten(3)
370
+ return x_out2.type_as(x)
371
+
372
+
373
+ def apply_rotary_emb_bs(x: torch.Tensor, freqs_cis: torch.Tensor):
374
+ # x: (bs, seq_len, n_head, head_dim)
375
+ # freqs_cis (seq_len, head_dim // 2, 2)
376
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
377
+ freqs_cis = freqs_cis.view(xshaped.size(0), xshaped.size(1), 1, xshaped.size(3), 2) # (bs, seq_len, 1, head_dim//2, 2)
378
+ x_out2 = torch.stack([
379
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
380
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
381
+ ], dim=-1)
382
+ x_out2 = x_out2.flatten(3)
383
+ return x_out2.type_as(x)
384
+
385
+
386
+ #################################################################################
387
+ # GPT Configs #
388
+ #################################################################################
389
+ ### text-conditional
390
+ def GPT_7B(**kwargs):
391
+ return Transformer(ModelArgs(n_layer=32, n_head=32, dim=4096, **kwargs)) # 6.6B
392
+
393
+ def GPT_3B(**kwargs):
394
+ return Transformer(ModelArgs(n_layer=24, n_head=32, dim=3200, **kwargs)) # 3.1B
395
+
396
+ def GPT_1B(**kwargs):
397
+ return Transformer(ModelArgs(n_layer=22, n_head=32, dim=2048, **kwargs)) # 1.2B
398
+
399
+ ### class-conditional
400
+ def GPT_XXXL(**kwargs):
401
+ return Transformer(ModelArgs(n_layer=48, n_head=40, dim=2560, **kwargs)) # 3.9B
402
+
403
+ def GPT_XXL(**kwargs):
404
+ return Transformer(ModelArgs(n_layer=48, n_head=24, dim=1536, **kwargs)) # 1.4B
405
+
406
+ def GPT_XL(**kwargs):
407
+ return Transformer(ModelArgs(n_layer=36, n_head=20, dim=1280, **kwargs)) # 775M
408
+
409
+ def GPT_L(**kwargs):
410
+ return Transformer(ModelArgs(n_layer=24, n_head=16, dim=1024, **kwargs)) # 343M
411
+
412
+ def GPT_B(**kwargs):
413
+ return Transformer(ModelArgs(n_layer=12, n_head=12, dim=768, **kwargs)) # 111M
414
+
415
+
416
+ GPT_models = {
417
+ 'GPT-B': GPT_B, 'GPT-L': GPT_L, 'GPT-XL': GPT_XL, 'GPT-XXL': GPT_XXL, 'GPT-XXXL': GPT_XXXL,
418
+ 'GPT-1B': GPT_1B, 'GPT-3B': GPT_3B, 'GPT-7B': GPT_7B,
419
+ }
autoregressive/serve/gpu_executor.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Set, Tuple, Optional, Set
2
+ import argparse
3
+
4
+ from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig,
5
+ ModelConfig, ParallelConfig, SchedulerConfig,
6
+ SpeculativeConfig, VisionLanguageConfig)
7
+ from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase
8
+ from vllm.logger import init_logger
9
+ from vllm.lora.request import LoRARequest
10
+ from vllm.sequence import SamplerOutput, SequenceGroupMetadata
11
+ from vllm.utils import (get_distributed_init_method, get_ip, get_open_port,
12
+ make_async)
13
+
14
+ logger = init_logger(__name__)
15
+
16
+
17
+ class GPUExecutor(ExecutorBase):
18
+ def __init__(
19
+ self,
20
+ args: argparse.ArgumentParser,
21
+ model_config: ModelConfig,
22
+ cache_config: CacheConfig,
23
+ parallel_config: ParallelConfig,
24
+ scheduler_config: SchedulerConfig,
25
+ device_config: DeviceConfig,
26
+ load_config: LoadConfig,
27
+ lora_config: Optional[LoRAConfig],
28
+ vision_language_config: Optional[VisionLanguageConfig],
29
+ speculative_config: Optional[SpeculativeConfig],
30
+ ) -> None:
31
+ self.args = args
32
+ self.model_config = model_config
33
+ self.cache_config = cache_config
34
+ self.lora_config = lora_config
35
+ self.load_config = load_config
36
+ self.parallel_config = parallel_config
37
+ self.scheduler_config = scheduler_config
38
+ self.device_config = device_config
39
+ self.vision_language_config = vision_language_config
40
+ self.speculative_config = speculative_config
41
+
42
+ self._init_executor()
43
+
44
+ def _init_executor(self) -> None:
45
+ """Initialize the worker and load the model.
46
+
47
+ If speculative decoding is enabled, we instead create the speculative
48
+ worker.
49
+ """
50
+ if self.speculative_config is None:
51
+ self._init_non_spec_worker()
52
+ else:
53
+ self._init_spec_worker()
54
+
55
+ def _init_non_spec_worker(self):
56
+ # Lazy import the Worker to avoid importing torch.cuda/xformers
57
+ # before CUDA_VISIBLE_DEVICES is set in the Worker
58
+ # from vllm.worker.worker import Worker
59
+ from autoregressive.serve.worker import Worker
60
+
61
+ assert self.parallel_config.world_size == 1, (
62
+ "GPUExecutor only supports single GPU.")
63
+
64
+ distributed_init_method = get_distributed_init_method(
65
+ get_ip(), get_open_port())
66
+ self.driver_worker = Worker(
67
+ model_config=self.model_config,
68
+ parallel_config=self.parallel_config,
69
+ scheduler_config=self.scheduler_config,
70
+ device_config=self.device_config,
71
+ cache_config=self.cache_config,
72
+ load_config=self.load_config,
73
+ local_rank=0,
74
+ rank=0,
75
+ distributed_init_method=distributed_init_method,
76
+ lora_config=self.lora_config,
77
+ vision_language_config=self.vision_language_config,
78
+ is_driver_worker=True,
79
+ )
80
+ self.driver_worker.init_device()
81
+ self.driver_worker.load_model(self.args)
82
+
83
+ def _init_spec_worker(self):
84
+ """Initialize a SpecDecodeWorker, using a draft model for proposals.
85
+ """
86
+ assert self.speculative_config is not None
87
+
88
+ from vllm.spec_decode.multi_step_worker import MultiStepWorker
89
+ from vllm.spec_decode.spec_decode_worker import SpecDecodeWorker
90
+ from vllm.worker.worker import Worker
91
+
92
+ distributed_init_method = get_distributed_init_method(
93
+ get_ip(), get_open_port())
94
+
95
+ target_worker = Worker(
96
+ model_config=self.model_config,
97
+ parallel_config=self.parallel_config,
98
+ scheduler_config=self.scheduler_config,
99
+ device_config=self.device_config,
100
+ cache_config=self.cache_config,
101
+ load_config=self.load_config,
102
+ local_rank=0,
103
+ rank=0,
104
+ distributed_init_method=distributed_init_method,
105
+ lora_config=self.lora_config,
106
+ vision_language_config=self.vision_language_config,
107
+ is_driver_worker=True,
108
+ )
109
+
110
+ draft_worker = MultiStepWorker(
111
+ model_config=self.speculative_config.draft_model_config,
112
+ parallel_config=self.speculative_config.draft_parallel_config,
113
+ scheduler_config=self.scheduler_config,
114
+ device_config=self.device_config,
115
+ cache_config=self.cache_config,
116
+ load_config=self.load_config,
117
+ local_rank=0,
118
+ rank=0,
119
+ distributed_init_method=distributed_init_method,
120
+ lora_config=self.lora_config,
121
+ vision_language_config=self.vision_language_config,
122
+ is_driver_worker=True,
123
+ )
124
+
125
+ spec_decode_worker = SpecDecodeWorker.from_workers(
126
+ proposer_worker=draft_worker, scorer_worker=target_worker)
127
+
128
+ assert self.parallel_config.world_size == 1, (
129
+ "GPUExecutor only supports single GPU.")
130
+
131
+ self.driver_worker = spec_decode_worker
132
+
133
+ # Load model handled in spec decode worker.
134
+ self.driver_worker.init_device()
135
+
136
+ def determine_num_available_blocks(self) -> Tuple[int, int]:
137
+ """Determine the number of available KV blocks by invoking the
138
+ underlying worker.
139
+ """
140
+ return self.driver_worker.determine_num_available_blocks()
141
+
142
+ def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
143
+ """Initialize the KV cache by invoking the underlying worker.
144
+ """
145
+ # NOTE: This is logged in the executor because there can be >1 worker
146
+ # with other executors. We could log in the engine level, but work
147
+ # remains to abstract away the device for non-GPU configurations.
148
+ logger.info(f"# GPU blocks: {num_gpu_blocks}, "
149
+ f"# CPU blocks: {num_cpu_blocks}")
150
+
151
+ self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
152
+
153
+ def execute_model(
154
+ self,
155
+ seq_group_metadata_list: List[SequenceGroupMetadata],
156
+ blocks_to_swap_in: Dict[int, int],
157
+ blocks_to_swap_out: Dict[int, int],
158
+ blocks_to_copy: Dict[int, List[int]],
159
+ num_lookahead_slots: int,
160
+ ) -> List[SamplerOutput]:
161
+ output = self.driver_worker.execute_model(
162
+ seq_group_metadata_list=seq_group_metadata_list,
163
+ blocks_to_swap_in=blocks_to_swap_in,
164
+ blocks_to_swap_out=blocks_to_swap_out,
165
+ blocks_to_copy=blocks_to_copy,
166
+ num_lookahead_slots=num_lookahead_slots,
167
+ )
168
+ return output
169
+
170
+ def add_lora(self, lora_request: LoRARequest) -> bool:
171
+ assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
172
+ return self.driver_worker.add_lora(lora_request)
173
+
174
+ def remove_lora(self, lora_id: int) -> bool:
175
+ assert lora_id > 0, "lora_id must be greater than 0."
176
+ return self.driver_worker.remove_lora(lora_id)
177
+
178
+ def list_loras(self) -> Set[int]:
179
+ return self.driver_worker.list_loras()
180
+
181
+ def check_health(self) -> None:
182
+ # GPUExecutor will always be healthy as long as
183
+ # it's running.
184
+ return
185
+
186
+
187
+ class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase):
188
+
189
+ async def execute_model_async(
190
+ self,
191
+ seq_group_metadata_list: List[SequenceGroupMetadata],
192
+ blocks_to_swap_in: Dict[int, int],
193
+ blocks_to_swap_out: Dict[int, int],
194
+ blocks_to_copy: Dict[int, List[int]],
195
+ ) -> SamplerOutput:
196
+ output = await make_async(self.driver_worker.execute_model)(
197
+ seq_group_metadata_list=seq_group_metadata_list,
198
+ blocks_to_swap_in=blocks_to_swap_in,
199
+ blocks_to_swap_out=blocks_to_swap_out,
200
+ blocks_to_copy=blocks_to_copy)
201
+ return output
autoregressive/serve/llm_engine.py ADDED
@@ -0,0 +1,671 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # vLLM: https://github.com/vllm-project/vllm/blob/main/vllm/engine/llm_engine.py
3
+ import time
4
+ from typing import Iterable, List, Optional, Type, Union
5
+ import argparse
6
+
7
+ from transformers import GenerationConfig, PreTrainedTokenizer
8
+
9
+ import vllm
10
+ from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoadConfig,
11
+ LoRAConfig, ModelConfig, ParallelConfig,
12
+ SchedulerConfig, SpeculativeConfig,
13
+ VisionLanguageConfig)
14
+ from vllm.core.scheduler import Scheduler, SchedulerOutputs
15
+ from vllm.engine.arg_utils import EngineArgs
16
+ from vllm.engine.metrics import StatLogger, Stats
17
+ from vllm.engine.output_processor.interfaces import (
18
+ SequenceGroupOutputProcessor)
19
+ from vllm.engine.output_processor.stop_checker import StopChecker
20
+ from vllm.engine.output_processor.util import create_output_by_sequence_group
21
+ from vllm.engine.ray_utils import initialize_ray_cluster
22
+ from vllm.executor.executor_base import ExecutorBase
23
+ from vllm.logger import init_logger
24
+ from vllm.lora.request import LoRARequest
25
+ from vllm.outputs import RequestOutput
26
+ from vllm.sampling_params import SamplingParams
27
+ from vllm.sequence import (MultiModalData, SamplerOutput, Sequence,
28
+ SequenceGroup)
29
+ from vllm.transformers_utils.detokenizer import Detokenizer
30
+ from vllm.transformers_utils.tokenizer_group import (BaseTokenizerGroup,
31
+ get_tokenizer_group)
32
+ from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled,
33
+ usage_message)
34
+ from vllm.utils import Counter
35
+
36
+ logger = init_logger(__name__)
37
+ _LOCAL_LOGGING_INTERVAL_SEC = 5
38
+
39
+
40
+ def _load_generation_config_dict(model_config: ModelConfig):
41
+ try:
42
+ return GenerationConfig.from_pretrained(
43
+ model_config.model,
44
+ revision=model_config.revision,
45
+ ).to_diff_dict()
46
+ except OSError:
47
+ # Not found.
48
+ return {}
49
+
50
+
51
+ class LLMEngine:
52
+ """An LLM engine that receives requests and generates texts.
53
+
54
+ This is the main class for the vLLM engine. It receives requests
55
+ from clients and generates texts from the LLM. It includes a tokenizer, a
56
+ language model (possibly distributed across multiple GPUs), and GPU memory
57
+ space allocated for intermediate states (aka KV cache). This class utilizes
58
+ iteration-level scheduling and efficient memory management to maximize the
59
+ serving throughput.
60
+
61
+ The `LLM` class wraps this class for offline batched inference and the
62
+ `AsyncLLMEngine` class wraps this class for online serving.
63
+
64
+ NOTE: The config arguments are derived from the `EngineArgs` class. For the
65
+ comprehensive list of arguments, see `EngineArgs`.
66
+
67
+ Args:
68
+ model_config: The configuration related to the LLM model.
69
+ cache_config: The configuration related to the KV cache memory
70
+ management.
71
+ parallel_config: The configuration related to distributed execution.
72
+ scheduler_config: The configuration related to the request scheduler.
73
+ device_config: The configuration related to the device.
74
+ lora_config (Optional): The configuration related to serving multi-LoRA.
75
+ vision_language_config (Optional): The configuration related to vision
76
+ language models.
77
+ speculative_config (Optional): The configuration related to speculative
78
+ decoding.
79
+ executor_class: The model executor class for managing distributed
80
+ execution.
81
+ log_stats: Whether to log statistics.
82
+ usage_context: Specified entry point, used for usage info collection
83
+ """
84
+
85
+ def __init__(
86
+ self,
87
+ args: argparse.ArgumentParser,
88
+ model_config: ModelConfig,
89
+ cache_config: CacheConfig,
90
+ parallel_config: ParallelConfig,
91
+ scheduler_config: SchedulerConfig,
92
+ device_config: DeviceConfig,
93
+ load_config: LoadConfig,
94
+ lora_config: Optional[LoRAConfig],
95
+ vision_language_config: Optional[VisionLanguageConfig],
96
+ speculative_config: Optional[SpeculativeConfig],
97
+ decoding_config: Optional[DecodingConfig],
98
+ executor_class: Type[ExecutorBase],
99
+ log_stats: bool,
100
+ usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
101
+ ) -> None:
102
+ logger.info(
103
+ f"Initializing an LLM engine (v{vllm.__version__}) with config: "
104
+ f"model={model_config.model!r}, "
105
+ f"speculative_config={speculative_config!r}, "
106
+ f"tokenizer={model_config.tokenizer!r}, "
107
+ f"skip_tokenizer_init={model_config.skip_tokenizer_init}, "
108
+ f"tokenizer_mode={model_config.tokenizer_mode}, "
109
+ f"revision={model_config.revision}, "
110
+ f"tokenizer_revision={model_config.tokenizer_revision}, "
111
+ f"trust_remote_code={model_config.trust_remote_code}, "
112
+ f"dtype={model_config.dtype}, "
113
+ f"max_seq_len={model_config.max_model_len}, "
114
+ f"download_dir={load_config.download_dir!r}, "
115
+ f"load_format={load_config.load_format}, "
116
+ f"tensor_parallel_size={parallel_config.tensor_parallel_size}, "
117
+ f"disable_custom_all_reduce="
118
+ f"{parallel_config.disable_custom_all_reduce}, "
119
+ f"quantization={model_config.quantization}, "
120
+ f"enforce_eager={model_config.enforce_eager}, "
121
+ f"kv_cache_dtype={cache_config.cache_dtype}, "
122
+ f"quantization_param_path={model_config.quantization_param_path}, "
123
+ f"device_config={device_config.device}, "
124
+ f"decoding_config={decoding_config!r}, "
125
+ f"seed={model_config.seed})")
126
+ # TODO(woosuk): Print more configs in debug mode.
127
+
128
+ self.model_config = model_config
129
+ self.cache_config = cache_config
130
+ self.lora_config = lora_config
131
+ self.vision_language_config = vision_language_config
132
+ self.parallel_config = parallel_config
133
+ self.scheduler_config = scheduler_config
134
+ self.device_config = device_config
135
+ self.speculative_config = speculative_config
136
+ self.load_config = load_config
137
+ self.decoding_config = decoding_config or DecodingConfig()
138
+ self.log_stats = log_stats
139
+
140
+ if not self.model_config.skip_tokenizer_init:
141
+ self.tokenizer: BaseTokenizerGroup
142
+ self._init_tokenizer()
143
+ self.detokenizer = Detokenizer(self.tokenizer)
144
+ else:
145
+ self.detokenizer = None
146
+ self.tokenizer = None
147
+
148
+ self.seq_counter = Counter()
149
+ self.generation_config_fields = _load_generation_config_dict(
150
+ model_config)
151
+
152
+ self.model_executor = executor_class(
153
+ args=args,
154
+ model_config=model_config,
155
+ cache_config=cache_config,
156
+ parallel_config=parallel_config,
157
+ scheduler_config=scheduler_config,
158
+ device_config=device_config,
159
+ lora_config=lora_config,
160
+ vision_language_config=vision_language_config,
161
+ speculative_config=speculative_config,
162
+ load_config=load_config,
163
+ )
164
+
165
+ self._initialize_kv_caches()
166
+
167
+ # If usage stat is enabled, collect relevant info.
168
+ if is_usage_stats_enabled():
169
+ from vllm.model_executor.model_loader import (
170
+ get_architecture_class_name)
171
+ usage_message.report_usage(
172
+ get_architecture_class_name(model_config),
173
+ usage_context,
174
+ extra_kvs={
175
+ # Common configuration
176
+ "dtype":
177
+ str(model_config.dtype),
178
+ "tensor_parallel_size":
179
+ parallel_config.tensor_parallel_size,
180
+ "block_size":
181
+ cache_config.block_size,
182
+ "gpu_memory_utilization":
183
+ cache_config.gpu_memory_utilization,
184
+
185
+ # Quantization
186
+ "quantization":
187
+ model_config.quantization,
188
+ "kv_cache_dtype":
189
+ cache_config.cache_dtype,
190
+
191
+ # Feature flags
192
+ "enable_lora":
193
+ bool(lora_config),
194
+ "enable_prefix_caching":
195
+ cache_config.enable_prefix_caching,
196
+ "enforce_eager":
197
+ model_config.enforce_eager,
198
+ "disable_custom_all_reduce":
199
+ parallel_config.disable_custom_all_reduce,
200
+ })
201
+
202
+ if self.tokenizer:
203
+ # Ping the tokenizer to ensure liveness if it runs in a
204
+ # different process.
205
+ self.tokenizer.ping()
206
+
207
+ # Create the scheduler.
208
+ # NOTE: the cache_config here have been updated with the numbers of
209
+ # GPU and CPU blocks, which are profiled in the distributed executor.
210
+ self.scheduler = Scheduler(scheduler_config, cache_config, lora_config)
211
+
212
+ # Metric Logging.
213
+ if self.log_stats:
214
+ self.stat_logger = StatLogger(
215
+ local_interval=_LOCAL_LOGGING_INTERVAL_SEC,
216
+ labels=dict(model_name=model_config.model))
217
+ self.stat_logger.info("cache_config", self.cache_config)
218
+
219
+ # Create sequence output processor, e.g. for beam search or
220
+ # speculative decoding.
221
+ self.output_processor = (
222
+ SequenceGroupOutputProcessor.create_output_processor(
223
+ self.scheduler_config,
224
+ self.detokenizer,
225
+ self.scheduler,
226
+ self.seq_counter,
227
+ self.get_tokenizer_for_seq,
228
+ stop_checker=StopChecker(
229
+ self.scheduler_config.max_model_len,
230
+ self.get_tokenizer_for_seq,
231
+ ),
232
+ ))
233
+
234
+ def _initialize_kv_caches(self) -> None:
235
+ """Initialize the KV cache in the worker(s).
236
+
237
+ The workers will determine the number of blocks in both the GPU cache
238
+ and the swap CPU cache.
239
+ """
240
+ num_gpu_blocks, num_cpu_blocks = (
241
+ self.model_executor.determine_num_available_blocks())
242
+
243
+ if self.cache_config.num_gpu_blocks_override is not None:
244
+ num_gpu_blocks_override = self.cache_config.num_gpu_blocks_override
245
+ logger.info(f"Overriding {num_gpu_blocks=} with "
246
+ f"{num_gpu_blocks_override=}")
247
+ num_gpu_blocks = num_gpu_blocks_override
248
+
249
+ self.cache_config.num_gpu_blocks = num_gpu_blocks
250
+ self.cache_config.num_cpu_blocks = num_cpu_blocks
251
+
252
+ self.model_executor.initialize_cache(num_gpu_blocks, num_cpu_blocks)
253
+
254
+ @classmethod
255
+ def from_engine_args(
256
+ cls,
257
+ engine_args: EngineArgs,
258
+ usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
259
+ args: argparse.ArgumentParser = None,
260
+ ) -> "LLMEngine":
261
+ """Creates an LLM engine from the engine arguments."""
262
+ # Create the engine configs.
263
+ engine_config = engine_args.create_engine_config()
264
+
265
+ # Initialize the cluster and specify the executor class.
266
+ if engine_config.device_config.device_type == "neuron":
267
+ from vllm.executor.neuron_executor import NeuronExecutor
268
+ executor_class = NeuronExecutor
269
+ elif engine_config.device_config.device_type == "cpu":
270
+ from vllm.executor.cpu_executor import CPUExecutor
271
+ executor_class = CPUExecutor
272
+ elif engine_config.parallel_config.worker_use_ray:
273
+ initialize_ray_cluster(engine_config.parallel_config)
274
+ from vllm.executor.ray_gpu_executor import RayGPUExecutor
275
+ executor_class = RayGPUExecutor
276
+ else:
277
+ assert engine_config.parallel_config.world_size == 1, (
278
+ "Ray is required if parallel_config.world_size > 1.")
279
+ # from vllm.executor.gpu_executor import GPUExecutor
280
+ from autoregressive.serve.gpu_executor import GPUExecutor
281
+ executor_class = GPUExecutor
282
+
283
+ # Create the LLM engine.
284
+ engine = cls(
285
+ **engine_config.to_dict(),
286
+ executor_class=executor_class,
287
+ log_stats=not engine_args.disable_log_stats,
288
+ usage_context=usage_context,
289
+ args=args,
290
+ )
291
+ return engine
292
+
293
+ def __reduce__(self):
294
+ # This is to ensure that the LLMEngine is not referenced in
295
+ # the closure used to initialize Ray worker actors
296
+ raise RuntimeError("LLMEngine should not be pickled!")
297
+
298
+ def get_tokenizer(self) -> "PreTrainedTokenizer":
299
+ return self.tokenizer.get_lora_tokenizer(None)
300
+
301
+ def get_tokenizer_for_seq(self,
302
+ sequence: Sequence) -> "PreTrainedTokenizer":
303
+ return self.tokenizer.get_lora_tokenizer(sequence.lora_request)
304
+
305
+ def _init_tokenizer(self, **tokenizer_init_kwargs):
306
+ init_kwargs = dict(
307
+ tokenizer_id=self.model_config.tokenizer,
308
+ enable_lora=bool(self.lora_config),
309
+ max_num_seqs=self.scheduler_config.max_num_seqs,
310
+ max_input_length=None,
311
+ tokenizer_mode=self.model_config.tokenizer_mode,
312
+ trust_remote_code=self.model_config.trust_remote_code,
313
+ revision=self.model_config.tokenizer_revision)
314
+ init_kwargs.update(tokenizer_init_kwargs)
315
+ self.tokenizer = get_tokenizer_group(
316
+ self.parallel_config.tokenizer_pool_config, **init_kwargs)
317
+
318
+ def _verify_args(self) -> None:
319
+ self.model_config.verify_with_parallel_config(self.parallel_config)
320
+ self.cache_config.verify_with_parallel_config(self.parallel_config)
321
+ if self.lora_config:
322
+ self.lora_config.verify_with_model_config(self.model_config)
323
+ self.lora_config.verify_with_scheduler_config(
324
+ self.scheduler_config)
325
+
326
+ def encode_request(
327
+ self,
328
+ request_id: str, # pylint: disable=unused-argument
329
+ prompt: Optional[str],
330
+ prompt_token_ids: Optional[List[int]] = None,
331
+ lora_request: Optional[LoRARequest] = None,
332
+ ):
333
+ if prompt_token_ids is None:
334
+ assert prompt is not None
335
+ prompt_token_ids = self.tokenizer.encode(request_id=request_id,
336
+ prompt=prompt,
337
+ lora_request=lora_request)
338
+ return prompt_token_ids
339
+
340
+ def add_request(
341
+ self,
342
+ request_id: str,
343
+ prompt: Optional[str],
344
+ sampling_params: SamplingParams,
345
+ prompt_token_ids: Optional[List[int]] = None,
346
+ arrival_time: Optional[float] = None,
347
+ lora_request: Optional[LoRARequest] = None,
348
+ multi_modal_data: Optional[MultiModalData] = None,
349
+ ) -> None:
350
+ """Add a request to the engine's request pool.
351
+
352
+ The request is added to the request pool and will be processed by the
353
+ scheduler as `engine.step()` is called. The exact scheduling policy is
354
+ determined by the scheduler.
355
+
356
+ Args:
357
+ request_id: The unique ID of the request.
358
+ prompt: The prompt string. Can be None if prompt_token_ids is
359
+ provided.
360
+ sampling_params: The sampling parameters for text generation.
361
+ prompt_token_ids: The token IDs of the prompt. If None, we
362
+ use the tokenizer to convert the prompts to token IDs.
363
+ arrival_time: The arrival time of the request. If None, we use
364
+ the current monotonic time.
365
+ multi_modal_data: Multi modal data per request.
366
+
367
+ Details:
368
+ - Set arrival_time to the current time if it is None.
369
+ - Set prompt_token_ids to the encoded prompt if it is None.
370
+ - Create `best_of` number of :class:`~vllm.Sequence` objects.
371
+ - Create a :class:`~vllm.SequenceGroup` object
372
+ from the list of :class:`~vllm.Sequence`.
373
+ - Add the :class:`~vllm.SequenceGroup` object to the scheduler.
374
+
375
+ Example:
376
+ >>> # initialize engine
377
+ >>> engine = LLMEngine.from_engine_args(engine_args)
378
+ >>> # set request arguments
379
+ >>> example_prompt = "Who is the president of the United States?"
380
+ >>> sampling_params = SamplingParams(temperature=0.0)
381
+ >>> request_id = 0
382
+ >>>
383
+ >>> # add the request to the engine
384
+ >>> engine.add_request(
385
+ >>> str(request_id),
386
+ >>> example_prompt,
387
+ >>> SamplingParams(temperature=0.0))
388
+ >>> # continue the request processing
389
+ >>> ...
390
+ """
391
+ if lora_request is not None and not self.lora_config:
392
+ raise ValueError(f"Got lora_request {lora_request} but LoRA is "
393
+ "not enabled!")
394
+ max_logprobs = self.get_model_config().max_logprobs
395
+ if (sampling_params.logprobs
396
+ and sampling_params.logprobs > max_logprobs) or (
397
+ sampling_params.prompt_logprobs
398
+ and sampling_params.prompt_logprobs > max_logprobs):
399
+ raise ValueError(f"Cannot request more than "
400
+ f"{max_logprobs} logprobs.")
401
+ if arrival_time is None:
402
+ arrival_time = time.time()
403
+ prompt_token_ids = self.encode_request(
404
+ request_id=request_id,
405
+ prompt=prompt,
406
+ prompt_token_ids=prompt_token_ids,
407
+ lora_request=lora_request)
408
+
409
+ # Create the sequences.
410
+ block_size = self.cache_config.block_size
411
+ seq_id = next(self.seq_counter)
412
+ eos_token_id = None
413
+ if self.tokenizer:
414
+ eos_token_id = self.tokenizer.get_lora_tokenizer(
415
+ lora_request).eos_token_id
416
+ else:
417
+ logger.warning("Use None for EOS token id because tokenizer is "
418
+ "not initialized")
419
+ seq = Sequence(seq_id, prompt, prompt_token_ids, block_size,
420
+ eos_token_id, lora_request)
421
+
422
+ # Defensive copy of SamplingParams, which are used by the sampler,
423
+ # this doesn't deep-copy LogitsProcessor objects
424
+ sampling_params = sampling_params.clone()
425
+ # Add the eos token id into the sampling_params to support min_tokens
426
+ # processing
427
+ if seq.eos_token_id is not None:
428
+ sampling_params.all_stop_token_ids.add(seq.eos_token_id)
429
+ sampling_params.update_from_generation_config(
430
+ self.generation_config_fields)
431
+
432
+ # Create the sequence group.
433
+ seq_group = SequenceGroup(request_id, [seq], sampling_params,
434
+ arrival_time, lora_request, multi_modal_data)
435
+
436
+ # Add the sequence group to the scheduler.
437
+ self.scheduler.add_seq_group(seq_group)
438
+
439
+ def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:
440
+ """Aborts a request(s) with the given ID.
441
+
442
+ Args:
443
+ request_id: The ID(s) of the request to abort.
444
+
445
+ Details:
446
+ - Refer to the
447
+ :meth:`~vllm.core.scheduler.Scheduler.abort_seq_group`
448
+ from class :class:`~vllm.core.scheduler.Scheduler`.
449
+
450
+ Example:
451
+ >>> # initialize engine and add a request with request_id
452
+ >>> request_id = str(0)
453
+ >>> # abort the request
454
+ >>> engine.abort_request(request_id)
455
+ """
456
+ self.scheduler.abort_seq_group(request_id)
457
+
458
+ def get_model_config(self) -> ModelConfig:
459
+ """Gets the model configuration."""
460
+ return self.model_config
461
+
462
+ def get_num_unfinished_requests(self) -> int:
463
+ """Gets the number of unfinished requests."""
464
+ return self.scheduler.get_num_unfinished_seq_groups()
465
+
466
+ def has_unfinished_requests(self) -> bool:
467
+ """Returns True if there are unfinished requests."""
468
+ return self.scheduler.has_unfinished_seqs()
469
+
470
+ def _process_model_outputs(
471
+ self, output: List[SamplerOutput],
472
+ scheduled_seq_groups: List[SequenceGroup],
473
+ ignored_seq_groups: List[SequenceGroup]) -> List[RequestOutput]:
474
+ """Apply the model output to the sequences in the scheduled seq groups.
475
+
476
+ Returns RequestOutputs that can be returned to the client.
477
+ """
478
+ now = time.time()
479
+
480
+ # Organize outputs by [sequence group][step] instead of
481
+ # [step][sequence group].
482
+ output_by_sequence_group = create_output_by_sequence_group(
483
+ sampler_outputs=output, num_seq_groups=len(scheduled_seq_groups))
484
+
485
+ # Update the scheduled sequence groups with the model outputs.
486
+ for scheduled_seq_group, outputs in zip(scheduled_seq_groups,
487
+ output_by_sequence_group):
488
+ seq_group = scheduled_seq_group.seq_group
489
+ seq_group.update_num_computed_tokens(
490
+ scheduled_seq_group.token_chunk_size)
491
+ # If uncomputed tokens > 0, it means prefill is chunked.
492
+ # We don't need to process outputs in that case.
493
+ if seq_group.get_num_uncomputed_tokens() == 0:
494
+ self.output_processor.process_outputs(seq_group, outputs)
495
+
496
+ # Free the finished sequence groups.
497
+ self.scheduler.free_finished_seq_groups()
498
+
499
+ # Create the outputs.
500
+ request_outputs: List[RequestOutput] = []
501
+ for scheduled_seq_group in scheduled_seq_groups:
502
+ seq_group = scheduled_seq_group.seq_group
503
+ seq_group.maybe_set_first_token_time(now)
504
+ request_output = RequestOutput.from_seq_group(seq_group)
505
+ request_outputs.append(request_output)
506
+ for seq_group in ignored_seq_groups:
507
+ request_output = RequestOutput.from_seq_group(seq_group)
508
+ request_outputs.append(request_output)
509
+ return request_outputs
510
+
511
+ def step(self) -> List[RequestOutput]:
512
+ """Performs one decoding iteration and returns newly generated results.
513
+
514
+ .. figure:: https://i.imgur.com/sv2HssD.png
515
+ :alt: Overview of the step function
516
+ :align: center
517
+
518
+ Overview of the step function.
519
+
520
+ Details:
521
+ - Step 1: Schedules the sequences to be executed in the next
522
+ iteration and the token blocks to be swapped in/out/copy.
523
+
524
+ - Depending on the scheduling policy,
525
+ sequences may be `preempted/reordered`.
526
+ - A Sequence Group (SG) refer to a group of sequences
527
+ that are generated from the same prompt.
528
+
529
+ - Step 2: Calls the distributed executor to execute the model.
530
+ - Step 3: Processes the model output. This mainly includes:
531
+
532
+ - Decodes the relevant outputs.
533
+ - Updates the scheduled sequence groups with model outputs
534
+ based on its `sampling parameters` (`use_beam_search` or not).
535
+ - Frees the finished sequence groups.
536
+
537
+ - Finally, it creates and returns the newly generated results.
538
+
539
+ Example:
540
+ >>> # Please see the example/ folder for more detailed examples.
541
+ >>>
542
+ >>> # initialize engine and request arguments
543
+ >>> engine = LLMEngine.from_engine_args(engine_args)
544
+ >>> example_inputs = [(0, "What is LLM?",
545
+ >>> SamplingParams(temperature=0.0))]
546
+ >>>
547
+ >>> # Start the engine with an event loop
548
+ >>> while True:
549
+ >>> if example_inputs:
550
+ >>> req_id, prompt, sampling_params = example_inputs.pop(0)
551
+ >>> engine.add_request(str(req_id), prompt, sampling_params)
552
+ >>>
553
+ >>> # continue the request processing
554
+ >>> request_outputs = engine.step()
555
+ >>> for request_output in request_outputs:
556
+ >>> if request_output.finished:
557
+ >>> # return or show the request output
558
+ >>>
559
+ >>> if not (engine.has_unfinished_requests() or example_inputs):
560
+ >>> break
561
+ """
562
+ seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()
563
+ if not scheduler_outputs.is_empty():
564
+ output = self.model_executor.execute_model(
565
+ seq_group_metadata_list=seq_group_metadata_list,
566
+ blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,
567
+ blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,
568
+ blocks_to_copy=scheduler_outputs.blocks_to_copy,
569
+ num_lookahead_slots=scheduler_outputs.num_lookahead_slots)
570
+ else:
571
+ output = []
572
+
573
+ request_outputs = self._process_model_outputs(
574
+ output, scheduler_outputs.scheduled_seq_groups,
575
+ scheduler_outputs.ignored_seq_groups)
576
+
577
+ # Log stats.
578
+ if self.log_stats:
579
+ self.stat_logger.log(self._get_stats(scheduler_outputs))
580
+
581
+ return request_outputs
582
+
583
+ def do_log_stats(self) -> None:
584
+ """Forced log when no requests active."""
585
+ if self.log_stats:
586
+ self.stat_logger.log(self._get_stats(scheduler_outputs=None))
587
+
588
+ def _get_stats(self,
589
+ scheduler_outputs: Optional[SchedulerOutputs]) -> Stats:
590
+ """Get Stats to be Logged to Prometheus."""
591
+ now = time.time()
592
+
593
+ # KV Cache Usage in %.
594
+ num_total_gpu = self.cache_config.num_gpu_blocks
595
+ num_free_gpu = self.scheduler.block_manager.get_num_free_gpu_blocks()
596
+ gpu_cache_usage = 1.0 - (num_free_gpu / num_total_gpu)
597
+
598
+ num_total_cpu = self.cache_config.num_cpu_blocks
599
+ cpu_cache_usage = 0.
600
+ if num_total_cpu > 0:
601
+ num_free_cpu = self.scheduler.block_manager.get_num_free_cpu_blocks(
602
+ )
603
+ cpu_cache_usage = 1.0 - (num_free_cpu / num_total_cpu)
604
+
605
+ # Scheduler State
606
+ num_running = len(self.scheduler.running)
607
+ num_swapped = len(self.scheduler.swapped)
608
+ num_waiting = len(self.scheduler.waiting)
609
+
610
+ # Iteration stats if we have scheduler output.
611
+ num_prompt_tokens = 0
612
+ num_generation_tokens = 0
613
+ time_to_first_tokens = []
614
+ time_per_output_tokens = []
615
+ time_e2e_requests = []
616
+ if scheduler_outputs is not None:
617
+ prompt_run = scheduler_outputs.num_prefill_groups > 0
618
+
619
+ # Number of Tokens.
620
+ if prompt_run:
621
+ num_prompt_tokens = sum(
622
+ len(scheduled_seq_group.seq_group.prompt_token_ids)
623
+ for scheduled_seq_group in
624
+ scheduler_outputs.scheduled_seq_groups)
625
+ num_generation_tokens = sum(
626
+ scheduled_seq_group.seq_group.num_seqs()
627
+ for scheduled_seq_group in
628
+ scheduler_outputs.scheduled_seq_groups)
629
+ else:
630
+ num_generation_tokens = scheduler_outputs.num_batched_tokens
631
+
632
+ # Latency Timings.
633
+ time_last_iters = []
634
+ for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups:
635
+ seq_group = scheduled_seq_group.seq_group
636
+ # Time since last token.
637
+ # (n.b. updates seq_group.metrics.last_token_time)
638
+ time_last_iters.append(seq_group.get_last_latency(now))
639
+ # Time since arrival for all finished requests.
640
+ if seq_group.is_finished():
641
+ time_e2e_requests.append(now -
642
+ seq_group.metrics.arrival_time)
643
+
644
+ time_to_first_tokens = time_last_iters if prompt_run else []
645
+ time_per_output_tokens = [] if prompt_run else time_last_iters
646
+
647
+ return Stats(
648
+ now=now,
649
+ num_running=num_running,
650
+ num_swapped=num_swapped,
651
+ num_waiting=num_waiting,
652
+ gpu_cache_usage=gpu_cache_usage,
653
+ cpu_cache_usage=cpu_cache_usage,
654
+ num_prompt_tokens=num_prompt_tokens,
655
+ num_generation_tokens=num_generation_tokens,
656
+ time_to_first_tokens=time_to_first_tokens,
657
+ time_per_output_tokens=time_per_output_tokens,
658
+ time_e2e_requests=time_e2e_requests,
659
+ )
660
+
661
+ def add_lora(self, lora_request: LoRARequest) -> bool:
662
+ return self.model_executor.add_lora(lora_request)
663
+
664
+ def remove_lora(self, lora_id: int) -> bool:
665
+ return self.model_executor.remove_lora(lora_id)
666
+
667
+ def list_loras(self) -> List[int]:
668
+ return self.model_executor.list_loras()
669
+
670
+ def check_health(self) -> None:
671
+ self.model_executor.check_health()
autoregressive/serve/model_runner.py ADDED
@@ -0,0 +1,1232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import time
3
+ from enum import IntEnum
4
+ from typing import Dict, List, NamedTuple, Optional, Set, Tuple
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from vllm.attention import (AttentionMetadata, AttentionMetadataPerStage,
11
+ get_attn_backend)
12
+ from vllm.config import (DeviceConfig, LoadConfig, LoRAConfig, ModelConfig,
13
+ ParallelConfig, SchedulerConfig, VisionLanguageConfig)
14
+ from vllm.distributed import broadcast_tensor_dict, with_pynccl_for_all_reduce
15
+ from vllm.distributed.device_communicators import (custom_all_reduce,
16
+ pynccl_utils)
17
+ from vllm.logger import init_logger
18
+ from vllm.lora.layers import LoRAMapping
19
+ from vllm.lora.request import LoRARequest
20
+ from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
21
+ from vllm.model_executor import SamplingMetadata
22
+ from vllm.model_executor.model_loader import get_model
23
+ from vllm.sampling_params import SamplingParams, SamplingType
24
+ from vllm.sequence import (MultiModalData, SamplerOutput, SequenceData,
25
+ SequenceGroupMetadata)
26
+ from vllm.utils import (CudaMemoryProfiler, async_tensor_h2d, is_hip,
27
+ is_pin_memory_available, make_tensor_with_pad,
28
+ maybe_expand_dim)
29
+ from autoregressive.serve.gpt_model import GPT_models
30
+
31
+ logger = init_logger(__name__)
32
+
33
+ _PAD_SLOT_ID = -1
34
+ LORA_WARMUP_RANK = 8
35
+ _BATCH_SIZE_ALIGNMENT = 8
36
+ # Capture graphs for token size 1, 2, 4, 8, 16, 24, 32, 40, ..., 256.
37
+ # NOTE: _get_graph_batch_size needs to be updated if this list is changed.
38
+ _BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [
39
+ _BATCH_SIZE_ALIGNMENT * i for i in range(1, 33)
40
+ ]
41
+
42
+
43
+ class PreparePromptMetadata(NamedTuple):
44
+ input_tokens: List[int]
45
+ input_positions: List[int]
46
+ attn_metadata: Optional[AttentionMetadataPerStage]
47
+ prompt_lens: List[int]
48
+ subquery_lens: List[int]
49
+ lora_index_mapping: List[int]
50
+ lora_prompt_mapping: List[int]
51
+ lora_requests: Set[LoRARequest]
52
+ multi_modal_input: Optional[torch.Tensor]
53
+ slot_mapping: List[int]
54
+
55
+ @classmethod
56
+ def empty(cls):
57
+ return PreparePromptMetadata(
58
+ input_tokens=[],
59
+ input_positions=[],
60
+ attn_metadata=None,
61
+ prompt_lens=[],
62
+ subquery_lens=[],
63
+ lora_index_mapping=[],
64
+ lora_prompt_mapping=[],
65
+ lora_requests=set(),
66
+ multi_modal_input=None,
67
+ slot_mapping=[],
68
+ )
69
+
70
+
71
+ class PrepareDecodeMetadata(NamedTuple):
72
+ input_tokens: List[int]
73
+ input_positions: List[int]
74
+ attn_metadata: Optional[AttentionMetadata]
75
+ lora_index_mapping: List[int]
76
+ lora_prompt_mapping: List[int]
77
+ lora_requests: Set[LoRARequest]
78
+ slot_mapping: List[int]
79
+
80
+ @classmethod
81
+ def empty(cls):
82
+ return PrepareDecodeMetadata(
83
+ input_tokens=[],
84
+ input_positions=[],
85
+ attn_metadata=None,
86
+ lora_index_mapping=[],
87
+ lora_prompt_mapping=[],
88
+ lora_requests=set(),
89
+ slot_mapping=[],
90
+ )
91
+
92
+
93
+ # How batches are constructed.
94
+ class BatchType(IntEnum):
95
+ # Every batch is prefill.
96
+ PREFILL = 0
97
+ # Every batch is decode.
98
+ DECODE = 1
99
+ # Batch is a mixture of prefill and decode.
100
+ MIXED = 2
101
+
102
+
103
+ class ModelRunner:
104
+
105
+ def __init__(
106
+ self,
107
+ model_config: ModelConfig,
108
+ parallel_config: ParallelConfig,
109
+ scheduler_config: SchedulerConfig,
110
+ device_config: DeviceConfig,
111
+ load_config: LoadConfig,
112
+ lora_config: Optional[LoRAConfig],
113
+ kv_cache_dtype: Optional[str] = "auto",
114
+ is_driver_worker: bool = False,
115
+ vision_language_config: Optional[VisionLanguageConfig] = None,
116
+ ):
117
+ self.model_config = model_config
118
+ self.parallel_config = parallel_config
119
+ self.scheduler_config = scheduler_config
120
+ self.lora_config = lora_config
121
+ self.load_config = load_config
122
+ self.is_driver_worker = is_driver_worker
123
+
124
+ # model_config can be None in tests/samplers/test_sampler.py.
125
+ # FIXME(woosuk): This is a hack to make the tests work. Refactor this.
126
+ self.sliding_window = (model_config.get_sliding_window()
127
+ if model_config is not None else None)
128
+ self.device_config = (device_config
129
+ if device_config is not None else DeviceConfig())
130
+ self.device = self.device_config.device
131
+
132
+ # Set after load_model.
133
+ self.lora_manager: LRUCacheWorkerLoRAManager = None
134
+
135
+ self.graph_runners: Dict[int, CUDAGraphRunner] = {}
136
+ self.graph_memory_pool: Optional[Tuple[
137
+ int, int]] = None # Set during graph capture.
138
+
139
+ self.max_context_len_to_capture = (
140
+ self.model_config.max_context_len_to_capture
141
+ if self.model_config is not None else 0)
142
+
143
+ self.pin_memory = is_pin_memory_available()
144
+ self.kv_cache_dtype = kv_cache_dtype
145
+ self.vision_language_config = vision_language_config
146
+
147
+ self.attn_backend = get_attn_backend(
148
+ self.model_config.dtype if model_config is not None else None)
149
+
150
+ # Lazy initialization
151
+ self.model: torch.nn.Module # Set after load_model
152
+ self.block_size: int # Set after initial profiling.
153
+ # When using CUDA graph, the input block tables must be padded to
154
+ # max_context_len_to_capture. However, creating the block table in
155
+ # Python can be expensive. To optimize this, we cache the block table
156
+ # in numpy and only copy the actual input content at every iteration.
157
+ # The shape of the cached block table will be
158
+ # (max batch size to capture, max context len to capture / block size).
159
+ self.graph_block_tables: torch.Tensor # Set after initial profiling.
160
+
161
+ def load_model(self, args) -> None:
162
+ with CudaMemoryProfiler() as m:
163
+ # self.model = get_model(
164
+ # model_config=self.model_config,
165
+ # device_config=self.device_config,
166
+ # load_config=self.load_config,
167
+ # lora_config=self.lora_config,
168
+ # vision_language_config=self.vision_language_config,
169
+ # parallel_config=self.parallel_config,
170
+ # scheduler_config=self.scheduler_config,
171
+ # )
172
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
173
+ latent_size = args.image_size // args.downsample_size
174
+ gpt_model = GPT_models[args.gpt_model](
175
+ vocab_size=args.codebook_size,
176
+ block_size=latent_size ** 2,
177
+ num_classes=args.num_classes,
178
+ cls_token_num=args.cls_token_num,
179
+ model_type=args.gpt_type,
180
+ cfg_scale=args.cfg_scale,
181
+ ).to(device='cuda', dtype=precision) # TODO: make device configurable
182
+
183
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
184
+ if args.from_fsdp: # fspd
185
+ model_weight = checkpoint
186
+ elif "model" in checkpoint: # ddp
187
+ model_weight = checkpoint["model"]
188
+ elif "state_dict" in checkpoint:
189
+ model_weight = checkpoint["state_dict"]
190
+ else:
191
+ raise Exception("please check model weight, maybe add --from-fsdp to run command")
192
+ gpt_model.custom_load_state_dict(model_weight)
193
+ gpt_model.eval()
194
+ del checkpoint
195
+ self.model = gpt_model
196
+
197
+ self.model_memory_usage = m.consumed_memory
198
+ logger.info(f"Loading model weights took "
199
+ f"{self.model_memory_usage / float(2**30):.4f} GB")
200
+
201
+ if self.lora_config:
202
+ assert hasattr(self.model, "supported_lora_modules"
203
+ ) and self.model.supported_lora_modules, (
204
+ "Model does not support LoRA")
205
+ assert hasattr(
206
+ self.model,
207
+ "embedding_modules"), "Model does not have embedding_modules"
208
+ assert hasattr(self.model, "embedding_padding_modules"
209
+ ), "Model does not have embedding_padding_modules"
210
+ self.lora_manager = LRUCacheWorkerLoRAManager(
211
+ self.scheduler_config.max_num_seqs,
212
+ self.scheduler_config.max_num_batched_tokens, self.vocab_size,
213
+ self.lora_config, self.device, self.model.embedding_modules,
214
+ self.model.embedding_padding_modules)
215
+ self.model = self.lora_manager.create_lora_manager(self.model)
216
+
217
+ if self.kv_cache_dtype == "fp8" and is_hip():
218
+ # Currently scaled KV cache is only enabled on ROCm
219
+ if self.model_config.quantization_param_path is not None:
220
+ if callable(getattr(self.model, "load_kv_cache_scales", None)):
221
+ self.model.load_kv_cache_scales(
222
+ self.model_config.quantization_param_path)
223
+ else:
224
+ raise RuntimeError("Using FP8 KV cache and scaling "
225
+ "factors provided but model "
226
+ f"{self.model.__class__} does not "
227
+ "support loading scaling factors.")
228
+ else:
229
+ logger.warn("Using FP8 KV cache but no scaling factors "
230
+ "provided. Defaulting to scaling factors of 1.0. "
231
+ "This may lead to less accurate results!")
232
+ elif self.model_config.quantization_param_path is not None:
233
+ logger.warn("KV cache scaling factors provided, "
234
+ "but the KV cache data type is not FP8. "
235
+ "KV cache scaling factors will not be used.")
236
+
237
+ def set_block_size(self, block_size: int) -> None:
238
+ self.block_size = block_size
239
+
240
+ self.graph_block_tables = np.zeros(
241
+ (max(_BATCH_SIZES_TO_CAPTURE), self.get_max_block_per_batch()),
242
+ dtype=np.int32)
243
+
244
+ def get_max_block_per_batch(self) -> int:
245
+ block_size = self.block_size
246
+ return (self.max_context_len_to_capture + block_size - 1) // block_size
247
+
248
+ def _prepare_prompt(
249
+ self,
250
+ seq_group_metadata_list: List[SequenceGroupMetadata],
251
+ ) -> PreparePromptMetadata:
252
+ input_tokens: List[int] = []
253
+ input_positions: List[int] = []
254
+ slot_mapping: List[int] = []
255
+ lora_index_mapping: List[int] = []
256
+ lora_prompt_mapping: List[int] = []
257
+ lora_requests: Set[LoRARequest] = set()
258
+
259
+ prompt_lens: List[int] = []
260
+ context_lens: List[int] = []
261
+ subquery_lens: List[int] = []
262
+ prefix_block_tables: List[List[int]] = []
263
+ multi_modal_input_list: List[torch.Tensor] = []
264
+
265
+ if len(seq_group_metadata_list) == 0:
266
+ return PreparePromptMetadata.empty()
267
+
268
+ for seq_group_metadata in seq_group_metadata_list:
269
+ assert seq_group_metadata.is_prompt
270
+ seq_ids = list(seq_group_metadata.seq_data.keys())
271
+ assert len(seq_ids) == 1
272
+ seq_id = seq_ids[0]
273
+
274
+ computed_block_nums = seq_group_metadata.computed_block_nums
275
+ if (self.scheduler_config is not None
276
+ and self.scheduler_config.chunked_prefill_enabled
277
+ and not (computed_block_nums is None
278
+ or computed_block_nums == [])):
279
+ raise RuntimeError(
280
+ "chunked prefill cannot be used with prefix caching "
281
+ "now.")
282
+
283
+ token_chunk_size = seq_group_metadata.token_chunk_size
284
+ seq_data = seq_group_metadata.seq_data[seq_id]
285
+ computed_len = seq_data.get_num_computed_tokens()
286
+ # We should use get_len here because in case of preemption
287
+ # it contains output tokens.
288
+ prefill_end = min(seq_data.get_len(),
289
+ computed_len + token_chunk_size)
290
+ prompt_tokens = seq_data.get_token_ids()[computed_len:prefill_end]
291
+ prompt_len = prefill_end
292
+ prompt_lens.append(prompt_len)
293
+
294
+ # NOTE: This only works for oooooooxxx style attention.
295
+ if computed_block_nums is not None and len(
296
+ computed_block_nums) > 0 and self.sliding_window is None:
297
+ # Prefix is not supported with sliding_window
298
+ computed_len = len(computed_block_nums) * self.block_size
299
+ prompt_tokens = prompt_tokens[computed_len:]
300
+ prefix_block_tables.append(computed_block_nums)
301
+ elif self.scheduler_config.chunked_prefill_enabled:
302
+ if seq_group_metadata.block_tables is not None:
303
+ # Prefill has chunked before.
304
+ block_table = seq_group_metadata.block_tables[seq_id]
305
+ prefix_block_tables.append(block_table)
306
+ else:
307
+ # The first prefill.
308
+ prefix_block_tables.append([])
309
+ else:
310
+ prefix_block_tables.append([])
311
+ # Right now, prefill start is always 0. However, this
312
+ # assumption can be changed once chunked prefill is introduced.
313
+ assert computed_len == 0
314
+
315
+ # actual prompt lens
316
+ context_lens.append(computed_len)
317
+ subquery_lens.append(prompt_len - computed_len)
318
+
319
+ input_tokens.extend(prompt_tokens)
320
+ # NOTE(woosuk): Here we assume that the first token in the prompt
321
+ # is always the first token in the sequence.
322
+ input_positions.extend(list(range(computed_len, prefill_end)))
323
+ lora_id = seq_group_metadata.lora_int_id
324
+
325
+ if lora_id > 0:
326
+ lora_requests.add(seq_group_metadata.lora_request)
327
+
328
+ lora_index_mapping += [lora_id] * (prompt_len - computed_len)
329
+ lora_prompt_mapping.extend(
330
+ [lora_id] *
331
+ (prompt_len - computed_len
332
+ if seq_group_metadata.sampling_params.prompt_logprobs else 1))
333
+
334
+ if seq_group_metadata.multi_modal_data:
335
+ multi_modal_input_list.append(
336
+ seq_group_metadata.multi_modal_data.data)
337
+
338
+ if seq_group_metadata.block_tables is None:
339
+ # During memory profiling, the block tables are not initialized
340
+ # yet. In this case, we just use a dummy slot mapping.
341
+ slot_mapping.extend([_PAD_SLOT_ID] * prompt_len)
342
+ continue
343
+
344
+ # Compute the slot mapping.
345
+ block_table = seq_group_metadata.block_tables[seq_id]
346
+ # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID,
347
+ # where start_idx is max(0, prompt_len - sliding_window).
348
+ # For example, if the prompt len is 10, sliding window is 8, and
349
+ # block size is 4, the first two tokens are masked and the slot
350
+ # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1].
351
+ start_idx = 0
352
+ if self.sliding_window is not None:
353
+ assert computed_len == 0, (
354
+ "Prefix caching is currently not supported with "
355
+ "sliding window attention")
356
+ start_idx = max(0, prompt_len - self.sliding_window)
357
+
358
+ for i in range(computed_len, prefill_end):
359
+ if i < start_idx:
360
+ slot_mapping.append(_PAD_SLOT_ID)
361
+ continue
362
+
363
+ block_number = block_table[i // self.block_size]
364
+ block_offset = i % self.block_size
365
+ slot = block_number * self.block_size + block_offset
366
+ slot_mapping.append(slot)
367
+
368
+ max_subquery_len = max(subquery_lens)
369
+ max_prompt_len = max(prompt_lens)
370
+ assert max_subquery_len > 0
371
+
372
+ context_lens_tensor = torch.tensor(context_lens,
373
+ dtype=torch.int,
374
+ device=self.device)
375
+
376
+ if multi_modal_input_list:
377
+ assert self.vision_language_config, (
378
+ "Multi-modal inputs are only supported by "
379
+ "vision language models.")
380
+ multi_modal_input = torch.cat(multi_modal_input_list,
381
+ dim=0).to(self.device)
382
+ else:
383
+ multi_modal_input = None
384
+
385
+ # Prepare prefix block tables
386
+ max_prompt_block_table_len = max(len(t) for t in prefix_block_tables)
387
+ block_tables = make_tensor_with_pad(
388
+ prefix_block_tables,
389
+ max_len=max_prompt_block_table_len,
390
+ pad=0,
391
+ dtype=torch.int,
392
+ device=self.device,
393
+ )
394
+
395
+ # Query length can be shorter than key (i.e., prompt) when prefill
396
+ # is chunked or prefix cached.
397
+ subquery_lens_tensor = torch.tensor(subquery_lens,
398
+ dtype=torch.long,
399
+ device=self.device)
400
+ subquery_start_loc = torch.zeros(subquery_lens_tensor.shape[0] + 1,
401
+ dtype=torch.int32,
402
+ device=self.device)
403
+
404
+ prompt_lens_tensor = torch.tensor(prompt_lens,
405
+ dtype=torch.long,
406
+ device=self.device)
407
+ seq_start_loc = torch.zeros(prompt_lens_tensor.shape[0] + 1,
408
+ dtype=torch.int32,
409
+ device=self.device)
410
+
411
+ torch.cumsum(subquery_lens_tensor,
412
+ dim=0,
413
+ dtype=subquery_start_loc.dtype,
414
+ out=subquery_start_loc[1:])
415
+
416
+ torch.cumsum(prompt_lens_tensor,
417
+ dim=0,
418
+ dtype=seq_start_loc.dtype,
419
+ out=seq_start_loc[1:])
420
+
421
+ attn_metadata = self.attn_backend.make_metadata(
422
+ is_prompt=True,
423
+ prompt_lens=prompt_lens,
424
+ prompt_lens_tensor=prompt_lens_tensor,
425
+ max_subquery_len=max_subquery_len,
426
+ max_context_len=None,
427
+ max_prompt_len=max_prompt_len,
428
+ subquery_start_loc=subquery_start_loc,
429
+ seq_start_loc=seq_start_loc,
430
+ context_lens=context_lens_tensor,
431
+ block_tables=block_tables,
432
+ use_cuda_graph=False,
433
+ )
434
+
435
+ return PreparePromptMetadata(
436
+ input_tokens=input_tokens,
437
+ input_positions=input_positions,
438
+ attn_metadata=attn_metadata,
439
+ prompt_lens=prompt_lens,
440
+ subquery_lens=subquery_lens,
441
+ lora_index_mapping=lora_index_mapping,
442
+ lora_prompt_mapping=lora_prompt_mapping,
443
+ lora_requests=lora_requests,
444
+ multi_modal_input=multi_modal_input,
445
+ slot_mapping=slot_mapping,
446
+ )
447
+
448
+ def _prepare_decode(
449
+ self,
450
+ seq_group_metadata_list: List[SequenceGroupMetadata],
451
+ ) -> PrepareDecodeMetadata:
452
+ input_tokens: List[int] = []
453
+ input_positions: List[int] = []
454
+ slot_mapping: List[int] = []
455
+ context_lens: List[int] = []
456
+ block_tables: List[List[int]] = []
457
+ lora_index_mapping: List[int] = []
458
+ lora_prompt_mapping: List[int] = []
459
+ lora_requests: Set[LoRARequest] = set()
460
+
461
+ if len(seq_group_metadata_list) == 0:
462
+ return PrepareDecodeMetadata.empty()
463
+
464
+ for seq_group_metadata in seq_group_metadata_list:
465
+ assert not seq_group_metadata.is_prompt
466
+ assert seq_group_metadata.token_chunk_size == 1
467
+
468
+ seq_ids = list(seq_group_metadata.seq_data.keys())
469
+ lora_id = seq_group_metadata.lora_int_id
470
+
471
+ if lora_id > 0:
472
+ lora_requests.add(seq_group_metadata.lora_request)
473
+
474
+ for seq_id in seq_ids:
475
+ seq_data = seq_group_metadata.seq_data[seq_id]
476
+ generation_token = seq_data.get_last_token_id()
477
+ input_tokens.append(generation_token)
478
+
479
+ seq_len = seq_data.get_len()
480
+ position = seq_len - 1
481
+ input_positions.append(position)
482
+
483
+ context_len = seq_len if self.sliding_window is None else min(
484
+ seq_len, self.sliding_window)
485
+ context_lens.append(context_len)
486
+
487
+ block_table = seq_group_metadata.block_tables[seq_id]
488
+ block_number = block_table[position // self.block_size]
489
+ block_offset = position % self.block_size
490
+ slot = block_number * self.block_size + block_offset
491
+ slot_mapping.append(slot)
492
+ lora_index_mapping.append(lora_id)
493
+ lora_prompt_mapping.append(lora_id)
494
+
495
+ if self.sliding_window is not None:
496
+ sliding_window_blocks = (self.sliding_window //
497
+ self.block_size)
498
+ block_table = block_table[-sliding_window_blocks:]
499
+ block_tables.append(block_table)
500
+
501
+ # vLLM uses cuda graph only for decoding requests.
502
+ # See `capture_model` API for more details.
503
+ # For decoding requests, batch_size == input_tokens.
504
+ batch_size = len(input_tokens)
505
+ max_context_len = max(context_lens)
506
+ use_captured_graph = (
507
+ not self.model_config.enforce_eager
508
+ and batch_size <= _BATCH_SIZES_TO_CAPTURE[-1]
509
+ and max_context_len <= self.max_context_len_to_capture)
510
+ if use_captured_graph:
511
+ graph_batch_size = _get_graph_batch_size(batch_size)
512
+ assert graph_batch_size >= batch_size
513
+ for _ in range(graph_batch_size - batch_size):
514
+ input_tokens.append(0)
515
+ input_positions.append(0)
516
+ slot_mapping.append(_PAD_SLOT_ID)
517
+ context_lens.append(1)
518
+ block_tables.append([])
519
+ lora_index_mapping.append(0)
520
+ batch_size = graph_batch_size
521
+
522
+ context_lens_tensor = torch.tensor(context_lens,
523
+ dtype=torch.int,
524
+ device=self.device)
525
+
526
+ if use_captured_graph:
527
+ # When using cuda-graph all these tensors should be
528
+ # padded.
529
+ assert context_lens_tensor.shape[0] == len(input_tokens)
530
+ assert context_lens_tensor.shape[0] == len(input_positions)
531
+ assert context_lens_tensor.shape[0] == len(slot_mapping)
532
+
533
+ # The shape of graph_block_tables is
534
+ # [max batch size, max context len // block size].
535
+ input_block_tables = self.graph_block_tables[:batch_size]
536
+ for i, block_table in enumerate(block_tables):
537
+ if block_table:
538
+ input_block_tables[i, :len(block_table)] = block_table
539
+ block_tables = torch.tensor(input_block_tables, device=self.device)
540
+ else:
541
+ max_block_table_len = max(
542
+ len(block_table) for block_table in block_tables)
543
+ block_tables = make_tensor_with_pad(
544
+ block_tables,
545
+ max_len=max_block_table_len,
546
+ pad=0,
547
+ dtype=torch.int,
548
+ device=self.device,
549
+ )
550
+
551
+ attn_metadata = self.attn_backend.make_metadata(
552
+ is_prompt=False,
553
+ prompt_lens=None,
554
+ prompt_lens_tensor=None,
555
+ max_subquery_len=None,
556
+ max_context_len=max_context_len,
557
+ max_prompt_len=None,
558
+ subquery_start_loc=None,
559
+ seq_start_loc=None,
560
+ context_lens=context_lens_tensor,
561
+ block_tables=block_tables,
562
+ use_cuda_graph=use_captured_graph,
563
+ )
564
+ return PrepareDecodeMetadata(
565
+ input_tokens=input_tokens,
566
+ input_positions=input_positions,
567
+ attn_metadata=attn_metadata,
568
+ lora_index_mapping=lora_index_mapping,
569
+ lora_prompt_mapping=lora_prompt_mapping,
570
+ lora_requests=lora_requests,
571
+ slot_mapping=slot_mapping,
572
+ )
573
+
574
+ def _prepare_sample(
575
+ self,
576
+ seq_group_metadata_list: List[SequenceGroupMetadata],
577
+ prompt_lens: List[int],
578
+ subquery_lens: Optional[List[int]],
579
+ ) -> SamplingMetadata:
580
+ seq_groups: List[Tuple[List[int], SamplingParams]] = []
581
+ selected_token_indices: List[int] = []
582
+ generators: List[torch.Generator] = []
583
+ selected_token_start_idx = 0
584
+ categorized_sample_indices: Dict[SamplingType,
585
+ List[Tuple[int, int]]] = {
586
+ t: []
587
+ for t in SamplingType
588
+ }
589
+ categorized_sample_indices_start_idx = 0
590
+ categorized_sampled_token_indices_start_idx = 0
591
+
592
+ for i, seq_group_metadata in enumerate(seq_group_metadata_list):
593
+ seq_ids = list(seq_group_metadata.seq_data.keys())
594
+ sampling_params = seq_group_metadata.sampling_params
595
+ seq_groups.append((seq_ids, sampling_params))
596
+
597
+ if seq_group_metadata.is_prompt:
598
+ assert len(seq_ids) == 1
599
+ assert subquery_lens is not None
600
+ subquery_len = subquery_lens[i]
601
+ if sampling_params.prompt_logprobs is not None:
602
+ # NOTE: prompt token positions do not need sample, skip
603
+ categorized_sample_indices_start_idx += subquery_len - 1
604
+
605
+ categorized_sample_indices[
606
+ sampling_params.sampling_type].append(
607
+ (categorized_sample_indices_start_idx,
608
+ categorized_sampled_token_indices_start_idx))
609
+ categorized_sample_indices_start_idx += 1
610
+ categorized_sampled_token_indices_start_idx += 1
611
+
612
+ if sampling_params.prompt_logprobs is not None:
613
+ selected_token_indices.extend(
614
+ range(selected_token_start_idx,
615
+ selected_token_start_idx + subquery_len - 1))
616
+ selected_token_indices.append(selected_token_start_idx +
617
+ subquery_len - 1)
618
+ selected_token_start_idx += subquery_len
619
+
620
+ if sampling_params.seed is not None:
621
+ seq_group_metadata.state.generator = torch.Generator(
622
+ device=self.device).manual_seed(sampling_params.seed)
623
+ else:
624
+ num_seqs = len(seq_ids)
625
+ selected_token_indices.extend(
626
+ range(selected_token_start_idx,
627
+ selected_token_start_idx + num_seqs))
628
+ selected_token_start_idx += num_seqs
629
+
630
+ categorized_sample_indices[
631
+ sampling_params.sampling_type].extend(
632
+ list(
633
+ zip(
634
+ range(
635
+ categorized_sample_indices_start_idx,
636
+ categorized_sample_indices_start_idx +
637
+ num_seqs),
638
+ range(
639
+ categorized_sampled_token_indices_start_idx,
640
+ categorized_sampled_token_indices_start_idx
641
+ + num_seqs))))
642
+ categorized_sample_indices_start_idx += num_seqs
643
+ categorized_sampled_token_indices_start_idx += num_seqs
644
+
645
+ if sampling_params.seed is not None:
646
+ generators.append(seq_group_metadata.state.generator)
647
+
648
+ selected_token_indices = async_tensor_h2d(selected_token_indices,
649
+ dtype=torch.long,
650
+ target_device=self.device,
651
+ pin_memory=self.pin_memory)
652
+
653
+ categorized_sample_indices = {
654
+ t: maybe_expand_dim(
655
+ async_tensor_h2d(seq_ids,
656
+ dtype=torch.int,
657
+ target_device=self.device,
658
+ pin_memory=self.pin_memory), 2, 2)
659
+ for t, seq_ids in categorized_sample_indices.items()
660
+ }
661
+
662
+ seq_data: Dict[int, SequenceData] = {}
663
+ for seq_group_metadata in seq_group_metadata_list:
664
+ seq_data.update(seq_group_metadata.seq_data)
665
+
666
+ sampling_metadata = SamplingMetadata(
667
+ seq_groups=seq_groups,
668
+ seq_data=seq_data,
669
+ prompt_lens=prompt_lens,
670
+ selected_token_indices=selected_token_indices,
671
+ categorized_sample_indices=categorized_sample_indices,
672
+ generators=generators,
673
+ )
674
+ return sampling_metadata
675
+
676
+ def prepare_input_tensors(
677
+ self,
678
+ seq_group_metadata_list: List[SequenceGroupMetadata],
679
+ ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, SamplingMetadata,
680
+ Set[LoRARequest], LoRAMapping, torch.Tensor]:
681
+ if self.is_driver_worker:
682
+ prefill_reqs = []
683
+ decode_reqs = []
684
+ for seq_group_meta in seq_group_metadata_list:
685
+ if seq_group_meta.is_prompt:
686
+ prefill_reqs.append(seq_group_meta)
687
+ else:
688
+ decode_reqs.append(seq_group_meta)
689
+
690
+ # Prepare input tensors.
691
+ (
692
+ input_tokens,
693
+ input_positions,
694
+ prefill_attn_metadata,
695
+ prompt_lens,
696
+ subquery_lens,
697
+ lora_index_mapping,
698
+ lora_prompt_mapping,
699
+ lora_requests,
700
+ multi_modal_input,
701
+ slot_mapping,
702
+ ) = self._prepare_prompt(prefill_reqs)
703
+ (
704
+ decode_input_tokens,
705
+ decode_input_positions,
706
+ decode_attn_metadata,
707
+ decode_lora_index_mapping,
708
+ decode_lora_prompt_mapping,
709
+ decode_lora_requests,
710
+ decode_slot_mapping,
711
+ ) = self._prepare_decode(decode_reqs)
712
+ sampling_metadata = self._prepare_sample(seq_group_metadata_list,
713
+ prompt_lens,
714
+ subquery_lens)
715
+
716
+ if not self.scheduler_config.chunked_prefill_enabled:
717
+ assert (len(prefill_reqs) and len(decode_reqs)) == 0
718
+
719
+ num_prefills = len(prompt_lens)
720
+ num_prefill_tokens = len(input_tokens)
721
+ num_decode_tokens = len(decode_input_tokens)
722
+
723
+ # Coalesce tensors. Note that attn_metadata is currently not
724
+ # coalesced for simplicity.
725
+ input_tokens.extend(decode_input_tokens)
726
+ input_positions.extend(decode_input_positions)
727
+ slot_mapping.extend(decode_slot_mapping)
728
+ lora_index_mapping.extend(decode_lora_index_mapping)
729
+ lora_prompt_mapping.extend(decode_lora_prompt_mapping)
730
+ lora_requests.update(decode_lora_requests)
731
+
732
+ input_tokens = torch.tensor(input_tokens,
733
+ dtype=torch.long,
734
+ device=self.device)
735
+ input_positions = torch.tensor(input_positions,
736
+ dtype=torch.long,
737
+ device=self.device)
738
+ slot_mapping = torch.tensor(slot_mapping,
739
+ dtype=torch.long,
740
+ device=self.device)
741
+
742
+ if self.lora_config:
743
+ lora_mapping = LoRAMapping(
744
+ lora_index_mapping,
745
+ lora_prompt_mapping,
746
+ )
747
+ else:
748
+ lora_mapping = None
749
+
750
+ # Broadcast the metadata.
751
+ # If batch contains both prefill and decode, it sends 2 broadcasts.
752
+ # If it only contains 1 type, it triggers a single broadcast.
753
+ if (prefill_attn_metadata is not None
754
+ and decode_attn_metadata is not None):
755
+ batch_type = BatchType.MIXED
756
+ elif prefill_attn_metadata is not None:
757
+ batch_type = BatchType.PREFILL
758
+ else:
759
+ batch_type = BatchType.DECODE
760
+
761
+ metadata_dict = {
762
+ "input_tokens": input_tokens,
763
+ "input_positions": input_positions,
764
+ "selected_token_indices":
765
+ sampling_metadata.selected_token_indices,
766
+ "lora_requests": lora_requests,
767
+ "lora_mapping": lora_mapping,
768
+ "multi_modal_input": multi_modal_input,
769
+ "num_prefill_tokens": num_prefill_tokens,
770
+ "num_decode_tokens": num_decode_tokens,
771
+ "slot_mapping": slot_mapping,
772
+ "num_prefills": num_prefills,
773
+ "batch_type": batch_type,
774
+ }
775
+ if prefill_attn_metadata is not None:
776
+ metadata_dict.update(prefill_attn_metadata.asdict_zerocopy())
777
+ else:
778
+ assert decode_attn_metadata is not None
779
+ metadata_dict.update(decode_attn_metadata.asdict_zerocopy())
780
+ broadcast_tensor_dict(metadata_dict, src=0)
781
+
782
+ # Broadcast decode attn metadata for mixed batch type.
783
+ # The additional broadcast costs 300us overhead on 4 A10 GPUs.
784
+ # We can potentially reduce the overhead by coelescing tensors.
785
+ if batch_type == BatchType.MIXED:
786
+ assert decode_attn_metadata is not None
787
+ metadata_dict = decode_attn_metadata.asdict_zerocopy()
788
+ broadcast_tensor_dict(metadata_dict, src=0)
789
+ else:
790
+ metadata_dict = broadcast_tensor_dict(src=0)
791
+ input_tokens = metadata_dict.pop("input_tokens")
792
+ input_positions = metadata_dict.pop("input_positions")
793
+ slot_mapping = metadata_dict.pop("slot_mapping")
794
+ num_prefills = metadata_dict.pop("num_prefills")
795
+ selected_token_indices = metadata_dict.pop(
796
+ "selected_token_indices")
797
+ lora_mapping = metadata_dict.pop("lora_mapping")
798
+ lora_requests = metadata_dict.pop("lora_requests")
799
+ multi_modal_input = metadata_dict.pop("multi_modal_input")
800
+ num_prefill_tokens = metadata_dict.pop("num_prefill_tokens")
801
+ num_decode_tokens = metadata_dict.pop("num_decode_tokens")
802
+ batch_type = metadata_dict.pop("batch_type")
803
+
804
+ # Create an attention metadata.
805
+ prefill_attn_metadata = None
806
+ decode_attn_metadata = None
807
+ if batch_type == BatchType.PREFILL or batch_type == BatchType.MIXED:
808
+ prefill_attn_metadata = self.attn_backend.make_metadata(
809
+ **metadata_dict)
810
+ else:
811
+ decode_attn_metadata = self.attn_backend.make_metadata(
812
+ **metadata_dict)
813
+ sampling_metadata = SamplingMetadata(
814
+ seq_groups=None,
815
+ seq_data=None,
816
+ prompt_lens=None,
817
+ selected_token_indices=selected_token_indices,
818
+ categorized_sample_indices=None,
819
+ generators=None,
820
+ perform_sampling=False,
821
+ )
822
+
823
+ # if it is a mixed batch, decode attn_metadata is broadcasted
824
+ # separately.
825
+ if batch_type == BatchType.MIXED:
826
+ metadata_dict = broadcast_tensor_dict(src=0)
827
+ decode_attn_metadata = self.attn_backend.make_metadata(
828
+ **metadata_dict)
829
+
830
+ attn_metadata = AttentionMetadata(
831
+ num_prefills=num_prefills,
832
+ slot_mapping=slot_mapping,
833
+ num_prefill_tokens=num_prefill_tokens,
834
+ num_decode_tokens=num_decode_tokens,
835
+ prefill_metadata=prefill_attn_metadata,
836
+ decode_metadata=decode_attn_metadata,
837
+ kv_cache_dtype=self.kv_cache_dtype,
838
+ )
839
+
840
+ return (input_tokens, input_positions, attn_metadata,
841
+ sampling_metadata, lora_requests, lora_mapping,
842
+ multi_modal_input)
843
+
844
+ @torch.inference_mode()
845
+ def execute_model(
846
+ self,
847
+ seq_group_metadata_list: List[SequenceGroupMetadata],
848
+ kv_caches: List[torch.Tensor],
849
+ ) -> Optional[SamplerOutput]:
850
+ (input_tokens, input_positions, attn_metadata, sampling_metadata,
851
+ lora_requests, lora_mapping, multi_modal_input
852
+ ) = self.prepare_input_tensors(seq_group_metadata_list)
853
+ if self.lora_config:
854
+ self.set_active_loras(lora_requests, lora_mapping)
855
+
856
+ # Currently cuda graph is only supported by the decode phase.
857
+ prefill_meta = attn_metadata.prefill_metadata
858
+ decode_meta = attn_metadata.decode_metadata
859
+ if prefill_meta is None and decode_meta.use_cuda_graph:
860
+ graph_batch_size = input_tokens.shape[0]
861
+ model_executable = self.graph_runners[graph_batch_size]
862
+ else:
863
+ model_executable = self.model
864
+ execute_model_kwargs = {
865
+ "input_ids": input_tokens,
866
+ "positions": input_positions,
867
+ "kv_caches": kv_caches,
868
+ "attn_metadata": attn_metadata,
869
+ }
870
+ if self.vision_language_config:
871
+ execute_model_kwargs.update({"image_input": multi_modal_input})
872
+ hidden_states = model_executable(**execute_model_kwargs)
873
+
874
+ # Compute the logits.
875
+ logits = self.model.compute_logits(hidden_states, sampling_metadata)
876
+
877
+ # Only perform sampling in the driver worker.
878
+ if not sampling_metadata.perform_sampling:
879
+ return None
880
+
881
+ # Sample the next token.
882
+ output = self.model.sample(
883
+ logits=logits,
884
+ sampling_metadata=sampling_metadata,
885
+ )
886
+ return output
887
+
888
+ @torch.inference_mode()
889
+ def profile_run(self) -> None:
890
+ # Enable top-k sampling to reflect the accurate memory usage.
891
+ sampling_params = SamplingParams(top_p=0.99, top_k=self.vocab_size - 1)
892
+ max_num_batched_tokens = self.scheduler_config.max_num_batched_tokens
893
+ max_num_seqs = self.scheduler_config.max_num_seqs
894
+
895
+ # This represents the maximum number of different requests
896
+ # that will have unique loras, an therefore the max amount of memory
897
+ # consumption create dummy lora request copies from the lora request
898
+ # passed in, which contains a lora from the lora warmup path.
899
+ dummy_lora_requests = []
900
+ dummy_lora_requests_per_seq = []
901
+ if self.lora_config:
902
+ for idx in range(self.lora_config.max_loras):
903
+ lora_id = idx + 1
904
+ dummy_lora_request = LoRARequest(
905
+ lora_name=f"warmup_{lora_id}",
906
+ lora_int_id=lora_id,
907
+ lora_local_path="/not/a/real/path",
908
+ )
909
+ self.lora_manager.add_dummy_lora(dummy_lora_request,
910
+ rank=LORA_WARMUP_RANK)
911
+ dummy_lora_requests.append(dummy_lora_request)
912
+ dummy_lora_requests_per_seq = [
913
+ dummy_lora_requests[idx % len(dummy_lora_requests)]
914
+ for idx in range(max_num_seqs)
915
+ ]
916
+
917
+ # Profile memory usage with max_num_sequences sequences and the total
918
+ # number of tokens equal to max_num_batched_tokens.
919
+ seqs: List[SequenceGroupMetadata] = []
920
+ # Additional GPU memory may be needed for vision encoding, which needs
921
+ # to be accounted for when calculating the GPU blocks for
922
+ # vLLM blocker manager.
923
+ # To exercise the worst scenario for GPU memory consumption,
924
+ # the number of seqs (batch_size) is chosen to maximize the number
925
+ # of images processed.
926
+ if self.vision_language_config:
927
+ max_num_seqs = min(
928
+ max_num_seqs,
929
+ int(max_num_batched_tokens /
930
+ self.vision_language_config.image_feature_size))
931
+ for group_id in range(max_num_seqs):
932
+ seq_len = (max_num_batched_tokens // max_num_seqs +
933
+ (group_id < max_num_batched_tokens % max_num_seqs))
934
+ seq_data, fake_multi_modal_input = _prepare_fake_inputs(
935
+ seq_len, self.vision_language_config)
936
+ seq = SequenceGroupMetadata(
937
+ request_id=str(group_id),
938
+ is_prompt=True,
939
+ seq_data={group_id: seq_data},
940
+ sampling_params=sampling_params,
941
+ block_tables=None,
942
+ lora_request=dummy_lora_requests_per_seq[group_id]
943
+ if dummy_lora_requests_per_seq else None,
944
+ multi_modal_data=fake_multi_modal_input,
945
+ )
946
+ seqs.append(seq)
947
+
948
+ # Run the model with the dummy inputs.
949
+ num_layers = self.model_config.get_num_layers(self.parallel_config)
950
+ kv_caches = [None] * num_layers
951
+ self.execute_model(seqs, kv_caches)
952
+ torch.cuda.synchronize()
953
+ return
954
+
955
+ def remove_all_loras(self) -> bool:
956
+ if not self.lora_manager:
957
+ raise RuntimeError("LoRA is not enabled.")
958
+ return self.lora_manager.remove_all_loras()
959
+
960
+ def set_active_loras(self, lora_requests: Set[LoRARequest],
961
+ lora_mapping: LoRAMapping) -> None:
962
+ if not self.lora_manager:
963
+ raise RuntimeError("LoRA is not enabled.")
964
+ self.lora_manager.set_active_loras(lora_requests, lora_mapping)
965
+
966
+ def add_lora(self, lora_request: LoRARequest) -> bool:
967
+ if not self.lora_manager:
968
+ raise RuntimeError("LoRA is not enabled.")
969
+ return self.lora_manager.add_lora(lora_request)
970
+
971
+ def remove_lora(self, lora_id: int) -> bool:
972
+ if not self.lora_manager:
973
+ raise RuntimeError("LoRA is not enabled.")
974
+ return self.lora_manager.remove_lora(lora_id)
975
+
976
+ def list_loras(self) -> Set[int]:
977
+ if not self.lora_manager:
978
+ raise RuntimeError("LoRA is not enabled.")
979
+ return self.lora_manager.list_loras()
980
+
981
+ @torch.inference_mode()
982
+ def capture_model(self, kv_caches: List[torch.Tensor]) -> None:
983
+ """Cuda graph capture a model.
984
+
985
+ Note that CUDA graph's performance gain is negligible if number
986
+ of batched tokens are larger than 200. And since CUDA graph
987
+ requires fixed sized tensors, supporting large/variable batch
988
+ size requires high GPU memory overhead. Thus, vLLM only captures
989
+ decoding requests. Mixed batch (chunked prefill + decoding) or
990
+ prefill requests are not captured.
991
+
992
+ Since it is used for decoding-only, it assumes there's only 1 token
993
+ per sequence in the batch.
994
+ """
995
+ # NOTE(woosuk): This is a hack to ensure that the NCCL backend is never
996
+ # deleted before the CUDA graphs.
997
+ self.pynccl_backend = pynccl_utils.get_nccl_backend()
998
+
999
+ assert not self.model_config.enforce_eager
1000
+ logger.info("Capturing the model for CUDA graphs. This may lead to "
1001
+ "unexpected consequences if the model is not static. To "
1002
+ "run the model in eager mode, set 'enforce_eager=True' or "
1003
+ "use '--enforce-eager' in the CLI.")
1004
+ logger.info("CUDA graphs can take additional 1~3 GiB memory per GPU. "
1005
+ "If you are running out of memory, consider decreasing "
1006
+ "`gpu_memory_utilization` or enforcing eager mode. "
1007
+ "You can also reduce the `max_num_seqs` as needed "
1008
+ "to decrease memory usage.")
1009
+ start_time = time.perf_counter()
1010
+
1011
+ # Prepare dummy inputs. These will be reused for all batch sizes.
1012
+ max_batch_size = max(_BATCH_SIZES_TO_CAPTURE)
1013
+ input_tokens = torch.zeros(max_batch_size, dtype=torch.long).cuda()
1014
+ input_positions = torch.zeros(max_batch_size, dtype=torch.long).cuda()
1015
+ slot_mapping = torch.empty(max_batch_size, dtype=torch.long).cuda()
1016
+ slot_mapping.fill_(_PAD_SLOT_ID)
1017
+ context_lens = torch.ones(max_batch_size, dtype=torch.int32).cuda()
1018
+ block_tables = torch.from_numpy(self.graph_block_tables).cuda()
1019
+
1020
+ graph_batch_size = _get_graph_batch_size(
1021
+ self.scheduler_config.max_num_seqs)
1022
+ batch_size_capture_list = [
1023
+ bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size
1024
+ ]
1025
+
1026
+ # NOTE(woosuk): There are 3 backends for all-reduce: custom all-reduce
1027
+ # kernel, pynccl, and PyTorch NCCL. When using CUDA graph, we use
1028
+ # either custom all-reduce kernel or pynccl. When not using CUDA
1029
+ # graph, we use either custom all-reduce kernel or PyTorch NCCL.
1030
+ # We always prioritize using custom all-reduce kernel but fall back
1031
+ # to PyTorch or pynccl if it is disabled or not supported.
1032
+ with custom_all_reduce.capture():
1033
+ # NOTE: Capturing the largest batch size first may help reduce the
1034
+ # memory usage of CUDA graph.
1035
+ for batch_size in reversed(batch_size_capture_list):
1036
+ # Create dummy attn_metadata.
1037
+ decode_metadata = self.attn_backend.make_metadata(
1038
+ is_prompt=False,
1039
+ prompt_lens=None,
1040
+ prompt_lens_tensor=None,
1041
+ max_subquery_len=None,
1042
+ max_context_len=self.max_context_len_to_capture,
1043
+ max_prompt_len=None,
1044
+ subquery_start_loc=None,
1045
+ seq_start_loc=None,
1046
+ context_lens=context_lens[:batch_size],
1047
+ block_tables=block_tables[:batch_size],
1048
+ use_cuda_graph=True,
1049
+ )
1050
+ attn_metadata = AttentionMetadata(
1051
+ num_prefills=0,
1052
+ num_prefill_tokens=0,
1053
+ num_decode_tokens=batch_size,
1054
+ slot_mapping=slot_mapping[:batch_size],
1055
+ prefill_metadata=None,
1056
+ decode_metadata=decode_metadata,
1057
+ kv_cache_dtype=self.kv_cache_dtype,
1058
+ )
1059
+
1060
+ if self.lora_config:
1061
+ lora_mapping = LoRAMapping(
1062
+ [0] * batch_size,
1063
+ [0] * batch_size,
1064
+ )
1065
+ self.set_active_loras(set(), lora_mapping)
1066
+
1067
+ graph_runner = CUDAGraphRunner(self.model)
1068
+ graph_runner.capture(
1069
+ input_tokens[:batch_size],
1070
+ input_positions[:batch_size],
1071
+ kv_caches,
1072
+ attn_metadata,
1073
+ memory_pool=self.graph_memory_pool,
1074
+ )
1075
+ self.graph_memory_pool = graph_runner.graph.pool()
1076
+ self.graph_runners[batch_size] = graph_runner
1077
+
1078
+ end_time = time.perf_counter()
1079
+ elapsed_time = end_time - start_time
1080
+ # This usually takes < 10 seconds.
1081
+ logger.info(f"Graph capturing finished in {elapsed_time:.0f} secs.")
1082
+
1083
+ def __del__(self) -> None:
1084
+ # Delete the CUDA graphs before deleting the pynccl communicator.
1085
+ # NOTE(woosuk): This is necessary because otherwise deadlocks can
1086
+ # happen.
1087
+ # FIXME(woosuk): This is a bit hacky. Find a more robust solution.
1088
+ # TODO(youkaichao): when we get enough user feedback that pynccl is
1089
+ # more stable than cupy, we can remove this, e.g. in v0.4.1.
1090
+ self.graph_runners.clear()
1091
+ self.pynccl_backend = None
1092
+
1093
+ @property
1094
+ def vocab_size(self) -> int:
1095
+ return self.model_config.get_vocab_size()
1096
+
1097
+
1098
+ class CUDAGraphRunner:
1099
+
1100
+ def __init__(self, model: nn.Module):
1101
+ self.model = model
1102
+ self.input_buffers: Dict[str, torch.Tensor] = {}
1103
+ self.output_buffers: Dict[str, torch.Tensor] = {}
1104
+
1105
+ self._graph: Optional[torch.cuda.CUDAGraph] = None
1106
+
1107
+ @property
1108
+ def graph(self):
1109
+ assert self._graph is not None
1110
+ return self._graph
1111
+
1112
+ def capture(
1113
+ self,
1114
+ input_ids: torch.Tensor,
1115
+ positions: torch.Tensor,
1116
+ kv_caches: List[torch.Tensor],
1117
+ attn_metadata: AttentionMetadata,
1118
+ memory_pool,
1119
+ **kwargs,
1120
+ ) -> None:
1121
+ assert self._graph is None
1122
+ # Run the model once without capturing the graph.
1123
+ # This is to make sure that the captured graph does not include the
1124
+ # kernel launches for initial benchmarking (e.g., Triton autotune).
1125
+ with _maybe_pynccl():
1126
+ self.model(
1127
+ input_ids,
1128
+ positions,
1129
+ kv_caches,
1130
+ attn_metadata,
1131
+ **kwargs,
1132
+ )
1133
+ torch.cuda.synchronize()
1134
+
1135
+ # Capture the graph.
1136
+ # NOTE(woosuk): Python 3.8 does not support multi-line with statements.
1137
+ # https://stackoverflow.com/questions/31039022/python-multi-line-with-statement
1138
+ self._graph = torch.cuda.CUDAGraph()
1139
+ with torch.cuda.graph(self._graph, pool=memory_pool): # noqa: SIM117
1140
+ with _maybe_pynccl():
1141
+ hidden_states = self.model(
1142
+ input_ids,
1143
+ positions,
1144
+ kv_caches,
1145
+ attn_metadata,
1146
+ **kwargs,
1147
+ )
1148
+ torch.cuda.synchronize()
1149
+
1150
+ # Save the input and output buffers.
1151
+ self.input_buffers = {
1152
+ "input_ids": input_ids,
1153
+ "positions": positions,
1154
+ "kv_caches": kv_caches,
1155
+ "slot_mapping": attn_metadata.slot_mapping,
1156
+ "context_lens": attn_metadata.decode_metadata.context_lens,
1157
+ "block_tables": attn_metadata.decode_metadata.block_tables,
1158
+ }
1159
+ self.output_buffers = {"hidden_states": hidden_states}
1160
+ return
1161
+
1162
+ def forward(
1163
+ self,
1164
+ input_ids: torch.Tensor,
1165
+ positions: torch.Tensor,
1166
+ kv_caches: List[torch.Tensor],
1167
+ attn_metadata: AttentionMetadata,
1168
+ **kwargs,
1169
+ ) -> torch.Tensor:
1170
+ # KV caches are fixed tensors, so we don't need to copy them.
1171
+ del kv_caches
1172
+
1173
+ # Copy the input tensors to the input buffers.
1174
+ self.input_buffers["input_ids"].copy_(input_ids, non_blocking=True)
1175
+ self.input_buffers["positions"].copy_(positions, non_blocking=True)
1176
+ self.input_buffers["slot_mapping"].copy_(attn_metadata.slot_mapping,
1177
+ non_blocking=True)
1178
+ self.input_buffers["context_lens"].copy_(
1179
+ attn_metadata.decode_metadata.context_lens, non_blocking=True)
1180
+ self.input_buffers["block_tables"].copy_(
1181
+ attn_metadata.decode_metadata.block_tables, non_blocking=True)
1182
+ # Run the graph.
1183
+ self.graph.replay()
1184
+
1185
+ # Return the output tensor.
1186
+ return self.output_buffers["hidden_states"]
1187
+
1188
+ def __call__(self, *args, **kwargs):
1189
+ return self.forward(*args, **kwargs)
1190
+
1191
+
1192
+ @contextlib.contextmanager
1193
+ def _maybe_pynccl():
1194
+ if pynccl_utils.is_initialized(
1195
+ ) and not custom_all_reduce.is_initialized():
1196
+ with with_pynccl_for_all_reduce():
1197
+ yield
1198
+ else:
1199
+ yield
1200
+
1201
+
1202
+ def _get_graph_batch_size(batch_size: int) -> int:
1203
+ """Returns the padded batch size given actual batch size.
1204
+
1205
+ Batch sizes are 1, 2, 4, _BATCH_SIZE_ALIGNMENT,
1206
+ 2*_BATCH_SIZE_ALIGNMENT, 3*_BATCH_SIZE_ALIGNMENT...
1207
+ """
1208
+ if batch_size <= 2:
1209
+ return batch_size
1210
+ elif batch_size <= 4:
1211
+ return 4
1212
+ else:
1213
+ return ((batch_size + _BATCH_SIZE_ALIGNMENT - 1) //
1214
+ _BATCH_SIZE_ALIGNMENT * _BATCH_SIZE_ALIGNMENT)
1215
+
1216
+
1217
+ def _prepare_fake_inputs(
1218
+ seq_len: int, vision_language_config: Optional[VisionLanguageConfig]):
1219
+ """Prepare fake inputs for profile run."""
1220
+ if vision_language_config:
1221
+ prompt_tokens = [
1222
+ vision_language_config.image_token_id
1223
+ ] * vision_language_config.image_feature_size + [0] * (
1224
+ seq_len - vision_language_config.image_feature_size)
1225
+ fake_image_input = MultiModalData(
1226
+ type=MultiModalData.Type.IMAGE,
1227
+ data=torch.zeros(vision_language_config.image_input_shape,
1228
+ dtype=torch.float16))
1229
+ else:
1230
+ prompt_tokens = [0] * seq_len
1231
+ fake_image_input = None
1232
+ return SequenceData(prompt_tokens), fake_image_input
autoregressive/serve/sample_c2i.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import argparse
3
+ import torch
4
+ from torchvision.utils import save_image
5
+ import sys
6
+ sys.path.append('/data/zongmingli/LlamaGen')
7
+ from tokenizer.tokenizer_image.vq_model import VQ_models
8
+ from autoregressive.serve.gpt_model import GPT_models
9
+ from autoregressive.serve.llm import LLM
10
+ from vllm import SamplingParams
11
+
12
+
13
+ def main(args):
14
+ # Setup PyTorch:
15
+ torch.manual_seed(args.seed)
16
+ torch.backends.cudnn.deterministic = True
17
+ torch.backends.cudnn.benchmark = False
18
+ torch.set_grad_enabled(False)
19
+ device = "cuda" if torch.cuda.is_available() else "cpu"
20
+
21
+ # create and load model
22
+ vq_model = VQ_models[args.vq_model](
23
+ codebook_size=args.codebook_size,
24
+ codebook_embed_dim=args.codebook_embed_dim)
25
+ vq_model.to(device)
26
+ vq_model.eval()
27
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
28
+ vq_model.load_state_dict(checkpoint["model"])
29
+ del checkpoint
30
+ print(f"image tokenizer is loaded")
31
+
32
+ # Labels to condition the model with (feel free to change):
33
+ class_labels = [207, 360, 387, 974, 88, 979, 417, 279]
34
+ latent_size = args.image_size // args.downsample_size
35
+ qzshape = [len(class_labels), args.codebook_embed_dim, latent_size, latent_size]
36
+ prompt_token_ids = [[cind] for cind in class_labels]
37
+ if args.cfg_scale > 1.0:
38
+ prompt_token_ids.extend([[args.num_classes] for _ in range(len(prompt_token_ids))])
39
+ # Create an LLM.
40
+ llm = LLM(
41
+ args=args,
42
+ model='autoregressive/serve/fake_json/{}.json'.format(args.gpt_model),
43
+ gpu_memory_utilization=0.9,
44
+ skip_tokenizer_init=True)
45
+ print(f"gpt model is loaded")
46
+
47
+ # Create a sampling params object.
48
+ sampling_params = SamplingParams(
49
+ temperature=args.temperature, top_p=args.top_p, top_k=args.top_k,
50
+ max_tokens=latent_size ** 2)
51
+
52
+ # Generate texts from the prompts. The output is a list of RequestOutput objects
53
+ # that contain the prompt, generated text, and other information.
54
+ t1 = time.time()
55
+ outputs = llm.generate(
56
+ prompt_token_ids=prompt_token_ids,
57
+ sampling_params=sampling_params,
58
+ use_tqdm=False)
59
+ sampling_time = time.time() - t1
60
+ print(f"gpt sampling takes about {sampling_time:.2f} seconds.")
61
+
62
+ # decode to image
63
+ index_sample = torch.tensor([output.outputs[0].token_ids for output in outputs], device=device)
64
+ if args.cfg_scale > 1.0:
65
+ index_sample = index_sample[:len(class_labels)]
66
+ t2 = time.time()
67
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
68
+ decoder_time = time.time() - t2
69
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
70
+
71
+ # Save and display images:
72
+ save_image(samples, "sample_{}_vllm.png".format(args.gpt_type), nrow=4, normalize=True, value_range=(-1, 1))
73
+ print(f"image is saved to sample_{args.gpt_type}_vllm.png")
74
+
75
+
76
+ if __name__ == '__main__':
77
+ parser = argparse.ArgumentParser()
78
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
79
+ parser.add_argument("--gpt-ckpt", type=str, required=True, help="ckpt path for gpt model")
80
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
81
+ parser.add_argument("--from-fsdp", action='store_true')
82
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
83
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
84
+ parser.add_argument("--compile", action='store_true', default=False)
85
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
86
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
87
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
88
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
89
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384)
90
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
91
+ parser.add_argument("--num-classes", type=int, default=1000)
92
+ parser.add_argument("--cfg-scale", type=float, default=4.0)
93
+ parser.add_argument("--seed", type=int, default=0)
94
+ parser.add_argument("--top-k", type=int, default=2000,help="top-k value to sample with")
95
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
96
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
97
+ args = parser.parse_args()
98
+ main(args)
autoregressive/serve/sampler.py ADDED
@@ -0,0 +1,868 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A layer that samples the next tokens from the model's outputs."""
2
+ import itertools
3
+ from typing import Dict, List, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from vllm.model_executor.layers.ops.sample import sample as sample_triton
9
+ from vllm.model_executor.sampling_metadata import (SamplingMetadata,
10
+ SamplingTensors)
11
+ from vllm.sampling_params import SamplingParams, SamplingType
12
+ from vllm.sequence import (Logprob, PromptLogprobs, SampleLogprobs,
13
+ SamplerOutput, SequenceData, SequenceGroupOutput,
14
+ SequenceOutput)
15
+
16
+
17
+ class Sampler(nn.Module):
18
+ """Samples the next tokens from the model's outputs.
19
+
20
+ This layer does the following:
21
+ 1. Discard the hidden states that are not used for sampling (i.e., all
22
+ tokens except the final one in each prompt).
23
+ 2. Compute the logits for the next tokens.
24
+ 3. Apply presence, frequency and repetition penalties.
25
+ 4. Apply temperature scaling.
26
+ 5. Apply top-p and top-k truncation.
27
+ 6. Sample the next tokens.
28
+ Here, each sequence group within the batch can have different sampling
29
+ parameters (e.g., sampling method, temperature, top-p, top-k, etc.).
30
+
31
+ The structure of the logits tensor is coupled with the seq_groups in
32
+ sampling_metadata. Typically, each sequence in each seq_group has one row in
33
+ logits for the next token to be sampled; however, for a seq_group with a
34
+ prompt request with the prompt_logprobs sampling parameter, there are rows
35
+ in logits for each token in the input prompt.
36
+ """
37
+
38
+ def __init__(self, cfg_scale=1.0):
39
+ super().__init__()
40
+ self.cfg_scale = cfg_scale
41
+ # Whether or not the SamplerOutput should have on-device tensors
42
+ # containing the sampled token ids and probabilities. This is used by
43
+ # speculative decoding.
44
+ self.include_gpu_probs_tensor = False
45
+
46
+ def forward(
47
+ self,
48
+ logits: torch.Tensor,
49
+ sampling_metadata: SamplingMetadata,
50
+ ) -> Optional[SamplerOutput]:
51
+ assert logits is not None
52
+ _, vocab_size = logits.shape
53
+
54
+ if self.cfg_scale > 1.0:
55
+ logits_combined = logits
56
+ cond_logits, uncond_logits = torch.split(logits_combined, len(logits_combined) // 2, dim=0)
57
+ logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_scale
58
+ logits = torch.cat([logits, logits], dim=0)
59
+
60
+ # Apply min_tokens penalty which sets stop tokens to -inf if min_tokens
61
+ # have not been generated yet
62
+ logits = _apply_min_tokens_penalty(logits, sampling_metadata)
63
+
64
+ # Prepare sampling tensors with pinned memory to avoid blocking.
65
+ (sampling_tensors, do_penalties, do_top_p_top_k,
66
+ do_min_p) = SamplingTensors.from_sampling_metadata(
67
+ sampling_metadata, vocab_size, logits.device, logits.dtype)
68
+
69
+ # Apply presence and frequency penalties.
70
+ if do_penalties:
71
+ logits = _apply_penalties(logits, sampling_tensors.prompt_tokens,
72
+ sampling_tensors.output_tokens,
73
+ sampling_tensors.presence_penalties,
74
+ sampling_tensors.frequency_penalties,
75
+ sampling_tensors.repetition_penalties)
76
+
77
+ # Apply temperature scaling.
78
+ # Use in-place division to avoid creating a new tensor.
79
+ logits.div_(sampling_tensors.temperatures.unsqueeze_(dim=1))
80
+
81
+ if do_top_p_top_k:
82
+ logits = _apply_top_k_top_p(logits, sampling_tensors.top_ps,
83
+ sampling_tensors.top_ks)
84
+
85
+ if do_min_p:
86
+ logits = _apply_min_p(logits, sampling_tensors.min_ps)
87
+
88
+ # We use float32 for probabilities and log probabilities.
89
+ # Compute the probabilities.
90
+ probs = torch.softmax(logits, dim=-1, dtype=torch.float)
91
+ # Compute the log probabilities.
92
+ # Use log_softmax to ensure numerical stability.
93
+ logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float)
94
+
95
+ # Sample the next tokens.
96
+ sample_results, maybe_sampled_tokens_tensor = _sample(
97
+ probs,
98
+ logprobs,
99
+ sampling_metadata,
100
+ sampling_tensors,
101
+ include_gpu_probs_tensor=self.include_gpu_probs_tensor,
102
+ modify_greedy_probs=self._should_modify_greedy_probs_inplace,
103
+ )
104
+
105
+
106
+ if self.cfg_scale > 1.0:
107
+ cond_result = sample_results[:len(sample_results) // 2]
108
+ sample_results = cond_result + cond_result
109
+
110
+
111
+ if self.include_gpu_probs_tensor:
112
+ assert maybe_sampled_tokens_tensor is not None
113
+ sampled_tokens_tensor = maybe_sampled_tokens_tensor
114
+ on_device_tensors = (probs, sampled_tokens_tensor)
115
+ else:
116
+ on_device_tensors = None
117
+
118
+ # Get the logprobs query results.
119
+ prompt_logprobs, sample_logprobs = _get_logprobs(
120
+ logprobs, sampling_metadata, sample_results)
121
+ return _build_sampler_output(sample_results,
122
+ sampling_metadata,
123
+ prompt_logprobs,
124
+ sample_logprobs,
125
+ on_device_tensors=on_device_tensors)
126
+
127
+ @property
128
+ def _should_modify_greedy_probs_inplace(self) -> bool:
129
+ """Whether or not the sampler should modify the probability distribution
130
+ of greedily-sampled tokens such that multinomial sampling would sample
131
+ the greedily-sampled token.
132
+
133
+ In other words, if True then we set the probability of the greedily-
134
+ sampled token to 1.
135
+
136
+ This is used by speculative decoding, which requires that the sampling
137
+ method be encoded into the probability distribution.
138
+ """
139
+ # Modify greedy probs if include_gpu_probs_tensor is set.
140
+ return self.include_gpu_probs_tensor
141
+
142
+
143
+ def _get_bin_counts_and_mask(
144
+ tokens: torch.Tensor,
145
+ vocab_size: int,
146
+ num_seqs: int,
147
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
148
+ # Compute the bin counts for the tokens.
149
+ # vocab_size + 1 for padding.
150
+ bin_counts = torch.zeros((num_seqs, vocab_size + 1),
151
+ dtype=torch.long,
152
+ device=tokens.device)
153
+ bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens))
154
+ bin_counts = bin_counts[:, :vocab_size]
155
+ mask = bin_counts > 0
156
+
157
+ return bin_counts, mask
158
+
159
+
160
+ def _apply_min_tokens_penalty(
161
+ logits: torch.Tensor,
162
+ sampling_metadata: SamplingMetadata,
163
+ ) -> torch.Tensor:
164
+ # list of indices in logits that will be set to -inf
165
+ logits_to_penalize = []
166
+ start_idx = 0
167
+ for i, seq_group in enumerate(sampling_metadata.seq_groups):
168
+ seq_ids, sampling_params = seq_group
169
+
170
+ # handle prompt_logprobs by skipping rows in logits added for the prompt
171
+ # tokens (prompt logprobs are not penalized)
172
+ if (i < sampling_metadata.num_prompts
173
+ and sampling_params.prompt_logprobs is not None):
174
+ assert len(seq_ids) == 1
175
+ start_idx += sampling_metadata.prompt_lens[i] - 1
176
+
177
+ min_tokens = sampling_params.min_tokens
178
+ if min_tokens > 0:
179
+ seqs_to_penalize = []
180
+ for i, seq_id in enumerate(seq_ids):
181
+ seq_data = sampling_metadata.seq_data[seq_id]
182
+ if len(seq_data.output_token_ids) < min_tokens:
183
+ seqs_to_penalize.append(i)
184
+
185
+ if seqs_to_penalize:
186
+ # convert to the index into logits
187
+ seqs_to_penalize = [start_idx + i for i in seqs_to_penalize]
188
+ # use set() to remove any duplicates
189
+ token_ids_to_penalize = set(sampling_params.stop_token_ids +
190
+ [sampling_params.eos_token_id])
191
+ # itertools.product pairs each seq index with every token id
192
+ logits_to_penalize.extend(
193
+ itertools.product(seqs_to_penalize, token_ids_to_penalize))
194
+
195
+ start_idx += len(seq_ids)
196
+
197
+ if logits_to_penalize:
198
+ # use zip and * to group indices along each dimension
199
+ # eg. [ (1,2), (1,3), (5,6) ] -> ( (1,1,5), (2,3,6) )
200
+ logits[tuple(zip(*logits_to_penalize))] = -float("inf")
201
+
202
+ # verifies that no rows in logits were missed unexpectedly
203
+ assert start_idx == logits.shape[0]
204
+ return logits
205
+
206
+
207
+ def _apply_penalties(logits: torch.Tensor, prompt_tokens_tensor: torch.Tensor,
208
+ output_tokens_tensor: torch.Tensor,
209
+ presence_penalties: torch.Tensor,
210
+ frequency_penalties: torch.Tensor,
211
+ repetition_penalties: torch.Tensor) -> torch.Tensor:
212
+ num_seqs, vocab_size = logits.shape
213
+ _, prompt_mask = _get_bin_counts_and_mask(prompt_tokens_tensor, vocab_size,
214
+ num_seqs)
215
+ output_bin_counts, output_mask = _get_bin_counts_and_mask(
216
+ output_tokens_tensor, vocab_size, num_seqs)
217
+
218
+ repetition_penalties = repetition_penalties[:, None].repeat(1, vocab_size)
219
+ repetition_penalties[~(prompt_mask | output_mask)] = 1.0
220
+ logits = torch.where(logits > 0, logits / repetition_penalties,
221
+ logits * repetition_penalties)
222
+
223
+ # We follow the definition in OpenAI API.
224
+ # Refer to https://platform.openai.com/docs/api-reference/parameter-details
225
+ logits -= frequency_penalties.unsqueeze_(dim=1) * output_bin_counts
226
+ logits -= presence_penalties.unsqueeze_(dim=1) * output_mask
227
+ return logits
228
+
229
+
230
+ def _apply_top_k_top_p(
231
+ logits: torch.Tensor,
232
+ p: torch.Tensor,
233
+ k: torch.Tensor,
234
+ ) -> torch.Tensor:
235
+ logits_sort, logits_idx = logits.sort(dim=-1, descending=False)
236
+
237
+ # Apply top-k.
238
+ top_k_mask = logits_sort.size(1) - k.to(torch.long)
239
+ # Get all the top_k values.
240
+ top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1))
241
+ top_k_mask = logits_sort < top_k_mask
242
+ logits_sort.masked_fill_(top_k_mask, -float("inf"))
243
+
244
+ # Apply top-p.
245
+ probs_sort = logits_sort.softmax(dim=-1)
246
+ probs_sum = probs_sort.cumsum(dim=-1)
247
+ top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1)
248
+ # at least one
249
+ top_p_mask[:, -1] = False
250
+ logits_sort.masked_fill_(top_p_mask, -float("inf"))
251
+
252
+ # Re-sort the probabilities.
253
+ src = torch.arange(logits_idx.shape[-1],
254
+ device=logits_idx.device).expand_as(logits_idx)
255
+ logits_idx_inv = torch.empty_like(logits_idx).scatter_(dim=-1,
256
+ index=logits_idx,
257
+ src=src)
258
+ logits = torch.gather(logits_sort, dim=-1, index=logits_idx_inv)
259
+ return logits
260
+
261
+
262
+ def _apply_min_p(
263
+ logits: torch.Tensor,
264
+ min_p: torch.Tensor,
265
+ ) -> torch.Tensor:
266
+ """
267
+ Adapted from
268
+ https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17
269
+ """
270
+ probs = torch.softmax(logits, dim=-1)
271
+ top_probs, _ = probs.max(dim=-1, keepdim=True)
272
+ scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs
273
+ tokens_to_remove = probs < scaled_min_p
274
+ logits = logits.masked_fill_(tokens_to_remove, -float("inf"))
275
+
276
+ return logits
277
+
278
+
279
+ def _greedy_sample(
280
+ selected_seq_groups: List[Tuple[List[int], SamplingParams]],
281
+ samples: torch.Tensor,
282
+ ) -> List[Tuple[List[int], List[int]]]:
283
+ samples = samples.tolist()
284
+ sample_idx = 0
285
+ results = []
286
+ for seq_group in selected_seq_groups:
287
+ seq_ids, _ = seq_group
288
+ num_parent_seqs = len(seq_ids)
289
+ assert num_parent_seqs == 1, (
290
+ "Greedy sampling should have only one seq.")
291
+ parent_ids = list(range(num_parent_seqs))
292
+ next_token_ids = [samples[sample_idx]]
293
+ results.append((next_token_ids, parent_ids))
294
+ sample_idx += num_parent_seqs
295
+ return results
296
+
297
+
298
+ def _random_sample(
299
+ selected_seq_groups: List[Tuple[List[int], SamplingParams]],
300
+ is_prompts: List[bool],
301
+ random_samples: torch.Tensor,
302
+ ) -> List[Tuple[List[int], List[int]]]:
303
+ # Find the maximum best_of value of the prompt phase requests.
304
+ random_samples = random_samples.cpu()
305
+ sample_idx = 0
306
+ results = []
307
+ for seq_group, is_prompt in zip(selected_seq_groups, is_prompts):
308
+ seq_ids, sampling_params = seq_group
309
+ num_parent_seqs = len(seq_ids)
310
+ if is_prompt:
311
+ # Prompt phase.
312
+ parent_ids = [0] * sampling_params.best_of
313
+ next_token_ids = random_samples[
314
+ sample_idx, :sampling_params.best_of].tolist()
315
+ else:
316
+ # Generation phase.
317
+ parent_ids = list(range(num_parent_seqs))
318
+ next_token_ids = random_samples[sample_idx:sample_idx +
319
+ num_parent_seqs, 0].tolist()
320
+ results.append((next_token_ids, parent_ids))
321
+ sample_idx += num_parent_seqs
322
+ return results
323
+
324
+
325
+ def _beam_search_sample(
326
+ selected_seq_groups: List[Tuple[List[int], SamplingParams]],
327
+ is_prompts: List[bool],
328
+ seq_data: Dict[int, SequenceData],
329
+ logprobs: torch.Tensor,
330
+ ) -> List[Tuple[List[int], List[int]]]:
331
+ # We sample 2 * beam_width candidates to make sure that with high
332
+ # probability we can get `beam_width` candidates in addition to
333
+ # the finished sequences for the next iteration. See
334
+ # https://github.com/tensorflow/tensor2tensor/blob/bafdc1b67730430d38d6ab802cbd51f9d053ba2e/tensor2tensor/utils/beam_search.py#L557-L563
335
+ # for details. See also HF reference:
336
+ # https://github.com/huggingface/transformers/blob/a4dd53d88e4852f023332d284ff07a01afcd5681/src/transformers/generation/utils.py#L3063-L3065
337
+ #
338
+ # NOTE: Beam search is not vectorized, so its speed can be slower than
339
+ # other sampling methods.
340
+ sample_idx = 0
341
+ results = []
342
+ for seq_group, is_prompt in zip(selected_seq_groups, is_prompts):
343
+ seq_ids, sampling_params = seq_group
344
+ num_parent_seqs = len(seq_ids)
345
+ beam_width = sampling_params.best_of
346
+ seq_group_logprobs = logprobs[sample_idx:sample_idx + num_parent_seqs]
347
+ if is_prompt:
348
+ # Prompt phase.
349
+ assert num_parent_seqs == 1, (
350
+ "Prompt input should have only one seq.")
351
+ parent_ids = [0] * (2 * beam_width)
352
+ _, next_token_ids = torch.topk(seq_group_logprobs[0],
353
+ 2 * beam_width)
354
+ next_token_ids = next_token_ids.tolist()
355
+ else:
356
+ # Generation phase.
357
+ cumulative_logprobs = [
358
+ seq_data[seq_id].cumulative_logprob for seq_id in seq_ids
359
+ ]
360
+ cumulative_logprobs = torch.tensor(
361
+ cumulative_logprobs,
362
+ dtype=torch.float,
363
+ device=seq_group_logprobs.device)
364
+ seq_group_logprobs = (seq_group_logprobs +
365
+ cumulative_logprobs.unsqueeze(dim=1))
366
+ _, topk_ids = torch.topk(seq_group_logprobs.flatten(),
367
+ 2 * beam_width)
368
+ topk_ids = topk_ids.tolist()
369
+ vocab_size = seq_group_logprobs.size(-1)
370
+ parent_ids = [i // vocab_size for i in topk_ids]
371
+ next_token_ids = [i % vocab_size for i in topk_ids]
372
+ results.append((next_token_ids, parent_ids))
373
+ sample_idx += num_parent_seqs
374
+ assert sample_idx == logprobs.size(0)
375
+ return results
376
+
377
+
378
+ # torch.multinomial forces a GPU<->CPU sync.
379
+ # Therefore, we use an optimized implementation instead.
380
+ # Note that we always sample with replacement.
381
+ # probs will be modified in place, but this is fine, as we pass
382
+ # in a copy already.
383
+ def _multinomial(
384
+ probs: torch.Tensor,
385
+ num_samples: int,
386
+ seq_groups: Optional[List[Tuple[List[int], SamplingParams]]] = None,
387
+ generators: Optional[List[torch.Generator]] = None,
388
+ ) -> torch.Tensor:
389
+ if num_samples > 1:
390
+ # This is equivalent to torch.repeat_interleaved (which also
391
+ # forces a GPU<->CPU sync).
392
+ # This allows us to do sampling with replacement by creating
393
+ # num_samples copies of each row in the tensor, and then
394
+ # batch sampling the resulting tensor.
395
+ probs = probs[:, None, :].expand(probs.shape[0], num_samples,
396
+ probs.shape[1]).contiguous().view(
397
+ -1, probs.shape[1])
398
+ q = torch.empty_like(probs)
399
+ if seq_groups is None:
400
+ q.exponential_()
401
+ else:
402
+ sample_idx = 0
403
+ for (seq_ids, _), generator in zip(seq_groups, generators):
404
+ next_sample_idx = sample_idx + len(seq_ids) * num_samples
405
+ q[sample_idx:next_sample_idx].exponential_(generator=generator)
406
+ sample_idx = next_sample_idx
407
+ return probs.div_(q).argmax(dim=1).view(-1, num_samples)
408
+
409
+
410
+ def _sample_with_torch(
411
+ probs: torch.Tensor,
412
+ logprobs: torch.Tensor,
413
+ sampling_metadata: SamplingMetadata,
414
+ include_gpu_probs_tensor: bool,
415
+ modify_greedy_probs: bool,
416
+ ) -> Tuple[List[Tuple[List[int], List[int]]], Optional[torch.Tensor]]:
417
+ categorized_seq_group_ids = {t: [] for t in SamplingType}
418
+ categorized_sample_indices = sampling_metadata.categorized_sample_indices
419
+ for i, seq_group in enumerate(sampling_metadata.seq_groups):
420
+ _, sampling_params = seq_group
421
+ sampling_type = sampling_params.sampling_type
422
+ categorized_seq_group_ids[sampling_type].append(i)
423
+
424
+ sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {}
425
+ sample_metadata = {}
426
+ multinomial_samples = {}
427
+
428
+ # Create output tensor for sampled token ids.
429
+ if include_gpu_probs_tensor:
430
+ sampled_token_ids_tensor = torch.empty(logprobs.shape[0],
431
+ 1,
432
+ dtype=torch.long,
433
+ device=logprobs.device)
434
+ else:
435
+ sampled_token_ids_tensor = None
436
+
437
+ # Counterintiutively, having two loops here is actually faster.
438
+ # The first loop can run without waiting on GPU<->CPU sync.
439
+ for sampling_type in SamplingType:
440
+ sample_indices = categorized_sample_indices[sampling_type][:, 0]
441
+ num_tokens = len(sample_indices)
442
+ if num_tokens == 0:
443
+ continue
444
+ seq_group_ids = categorized_seq_group_ids[sampling_type]
445
+ seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_ids]
446
+ is_prompts = [i < sampling_metadata.num_prompts for i in seq_group_ids]
447
+ sample_metadata[sampling_type] = (seq_group_ids, seq_groups,
448
+ is_prompts, sample_indices)
449
+ long_sample_indices = sample_indices.long()
450
+
451
+ if sampling_type == SamplingType.GREEDY:
452
+ greedy_samples = torch.argmax(logprobs[long_sample_indices],
453
+ dim=-1)
454
+
455
+ if include_gpu_probs_tensor:
456
+ # Store sampled tokens in output tensor.
457
+ sampled_token_ids_tensor[
458
+ long_sample_indices] = greedy_samples.unsqueeze(-1)
459
+
460
+ if modify_greedy_probs:
461
+ # If required, modify the probabilities such that sampling from
462
+ # the modified distribution would always sample the argmax
463
+ # token id.
464
+ _modify_greedy_probs_inplace(logprobs, probs,
465
+ long_sample_indices,
466
+ greedy_samples)
467
+
468
+ elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED):
469
+ max_best_of_in_batch = 1
470
+ for seq_group, is_prompt in zip(seq_groups, is_prompts):
471
+ if is_prompt:
472
+ _, sampling_params = seq_group
473
+ max_best_of_in_batch = max(max_best_of_in_batch,
474
+ sampling_params.best_of)
475
+ seeded_args = {} if sampling_type == SamplingType.RANDOM else {
476
+ "seq_groups": seq_groups,
477
+ "generators": sampling_metadata.generators,
478
+ }
479
+
480
+ multinomial_samples[sampling_type] = _multinomial(
481
+ probs[long_sample_indices], max_best_of_in_batch,
482
+ **seeded_args)
483
+
484
+ if include_gpu_probs_tensor:
485
+ # Store sampled tokens in output tensor.
486
+ sampled_token_ids_tensor[
487
+ long_sample_indices] = multinomial_samples[sampling_type]
488
+
489
+ elif sampling_type == SamplingType.BEAM:
490
+ beam_search_logprobs = logprobs[sample_indices]
491
+ else:
492
+ raise ValueError(f"Unsupported sampling type: {sampling_type}")
493
+
494
+ # GPU<->CPU sync happens in the loop below.
495
+ # This also converts the sample output to Python objects.
496
+
497
+ for sampling_type in SamplingType:
498
+ if sampling_type not in sample_metadata:
499
+ continue
500
+ seq_group_ids, seq_groups, is_prompts, sample_indices = sample_metadata[
501
+ sampling_type]
502
+ if sampling_type == SamplingType.GREEDY:
503
+ sample_results = _greedy_sample(seq_groups, greedy_samples)
504
+ elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED):
505
+ sample_results = _random_sample(seq_groups, is_prompts,
506
+ multinomial_samples[sampling_type])
507
+ elif sampling_type == SamplingType.BEAM:
508
+ sample_results = _beam_search_sample(seq_groups, is_prompts,
509
+ sampling_metadata.seq_data,
510
+ beam_search_logprobs)
511
+ sample_results_dict.update(zip(seq_group_ids, sample_results))
512
+
513
+ sample_results = [
514
+ sample_results_dict[i]
515
+ for i in range(len(sampling_metadata.seq_groups))
516
+ ]
517
+ return sample_results, sampled_token_ids_tensor
518
+
519
+
520
+ def _sample_with_triton_kernel(
521
+ probs: torch.Tensor,
522
+ logprobs: torch.Tensor,
523
+ sampling_metadata: SamplingMetadata,
524
+ sampling_tensors: SamplingTensors,
525
+ ) -> List[Tuple[List[int], List[int]]]:
526
+ categorized_seq_group_ids = {t: [] for t in SamplingType}
527
+ categorized_sample_indices = sampling_metadata.categorized_sample_indices
528
+ for i, seq_group in enumerate(sampling_metadata.seq_groups):
529
+ _, sampling_params = seq_group
530
+ sampling_type = sampling_params.sampling_type
531
+ categorized_seq_group_ids[sampling_type].append(i)
532
+
533
+ sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {}
534
+ sample_metadata = {}
535
+ max_best_of_in_batch = 1
536
+
537
+ # Counterintiutively, having two loops here is actually faster.
538
+ # The first loop can run without waiting on GPU<->CPU sync.
539
+ for sampling_type in SamplingType:
540
+ sample_indices = categorized_sample_indices[sampling_type][:, 0]
541
+ sampled_token_indices = categorized_sample_indices[sampling_type][:, 1]
542
+ num_tokens = len(sample_indices)
543
+ if num_tokens == 0:
544
+ continue
545
+ seq_group_ids = categorized_seq_group_ids[sampling_type]
546
+ seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_ids]
547
+ is_prompts = [i < sampling_metadata.num_prompts for i in seq_group_ids]
548
+ sample_metadata[sampling_type] = (seq_group_ids, seq_groups,
549
+ is_prompts, sample_indices,
550
+ sampled_token_indices)
551
+ if sampling_type in (SamplingType.GREEDY, SamplingType.RANDOM,
552
+ SamplingType.RANDOM_SEED):
553
+ for seq_group, is_prompt in zip(seq_groups, is_prompts):
554
+ if is_prompt:
555
+ _, sampling_params = seq_group
556
+ max_best_of_in_batch = max(max_best_of_in_batch,
557
+ sampling_params.best_of)
558
+ elif sampling_type == SamplingType.BEAM:
559
+ beam_search_logprobs = logprobs[sample_indices]
560
+ else:
561
+ raise ValueError(f"Unsupported sampling type: {sampling_type}")
562
+
563
+ sampled_tokens, _, _ = sample_triton(
564
+ probs=probs,
565
+ seeds=sampling_tensors.sampling_seeds,
566
+ max_best_of=max_best_of_in_batch,
567
+ sample_indices=sampling_tensors.sample_indices,
568
+ logprobs=logprobs,
569
+ # don't save logprobs because we have logic for that below
570
+ # TODO: use this instead of the CPU-based logic below
571
+ save_logprobs=False,
572
+ )
573
+
574
+ # GPU<->CPU sync happens in the loop below.
575
+
576
+ for sampling_type in SamplingType:
577
+ if sampling_type not in sample_metadata:
578
+ continue
579
+ (seq_group_ids, seq_groups, is_prompts, sample_indices,
580
+ sampled_token_indices) = sample_metadata[sampling_type]
581
+ if sampling_type == SamplingType.GREEDY:
582
+ sample_results = _greedy_sample(
583
+ seq_groups, sampled_tokens[sampled_token_indices][:, 0])
584
+ elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED):
585
+ sample_results = _random_sample(
586
+ seq_groups, is_prompts, sampled_tokens[sampled_token_indices])
587
+ elif sampling_type == SamplingType.BEAM:
588
+ sample_results = _beam_search_sample(seq_groups, is_prompts,
589
+ sampling_metadata.seq_data,
590
+ beam_search_logprobs)
591
+ sample_results_dict.update(zip(seq_group_ids, sample_results))
592
+
593
+ sample_results = [
594
+ sample_results_dict[i]
595
+ for i in range(len(sampling_metadata.seq_groups))
596
+ ]
597
+ return sample_results
598
+
599
+
600
+ def _sample(
601
+ probs: torch.Tensor, logprobs: torch.Tensor,
602
+ sampling_metadata: SamplingMetadata, sampling_tensors: SamplingTensors,
603
+ include_gpu_probs_tensor: bool, modify_greedy_probs: bool
604
+ ) -> Tuple[List[Tuple[List[int], List[int]]], Optional[torch.Tensor]]:
605
+ return _sample_with_torch(
606
+ probs,
607
+ logprobs,
608
+ sampling_metadata,
609
+ include_gpu_probs_tensor=include_gpu_probs_tensor,
610
+ modify_greedy_probs=modify_greedy_probs,
611
+ )
612
+
613
+ # TODO: Enable once Triton kernel & associated code is faster.
614
+ # return _sample_with_triton_kernel(probs, logprobs, sampling_metadata,
615
+ # sampling_tensors)
616
+
617
+
618
+ def _get_ranks(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
619
+ """
620
+ This function calculates the ranks of the chosen tokens in a logprob tensor.
621
+
622
+ Args:
623
+ x (torch.Tensor): 2D logprob tensor of shape (N, M)
624
+ where N is the no. of tokens and M is the vocab dim.
625
+ indices (torch.Tensor): List of chosen token indices.
626
+
627
+ Returns:
628
+ torch.Tensor: 1D tensor of shape (N,) where N is the no. of tokens.
629
+ Each element in the returned tensor represents the rank
630
+ of the chosen token in the input logprob tensor.
631
+ """
632
+ vals = x[torch.arange(0, len(x), device=x.device, dtype=indices.dtype),
633
+ indices]
634
+ return (x > vals[:, None]).long().sum(1).add_(1)
635
+
636
+
637
+ def _get_logprobs(
638
+ logprobs: torch.Tensor,
639
+ sampling_metadata: SamplingMetadata,
640
+ sample_results: List[Tuple[List[int], List[int]]],
641
+ ) -> Tuple[List[Optional[List[Optional[Dict[int, float]]]]], List[List[Dict[
642
+ int, float]]]]:
643
+ # Prepare query indices
644
+ batched_logprobs_query_seq_indices: List[int] = []
645
+ batched_logprobs_query_token_indices: List[int] = []
646
+ # at least get one logprob for each token
647
+ largest_num_logprobs = 1
648
+ sample_idx = 0
649
+ for i, (seq_group, sample_result) in enumerate(
650
+ zip(sampling_metadata.seq_groups, sample_results)):
651
+ seq_ids, sampling_params = seq_group
652
+ next_token_ids, parent_ids = sample_result
653
+ num_parent_seqs = len(seq_ids)
654
+ if (i < sampling_metadata.num_prompts
655
+ and sampling_params.prompt_logprobs is not None):
656
+ largest_num_logprobs = max(largest_num_logprobs,
657
+ sampling_params.prompt_logprobs)
658
+ prompt_len = sampling_metadata.prompt_lens[i]
659
+ prompt_tokens = sampling_metadata.seq_data[
660
+ seq_ids[0]].prompt_token_ids
661
+ batched_logprobs_query_seq_indices.extend(
662
+ sample_idx + j for j in range(prompt_len - 1))
663
+ batched_logprobs_query_token_indices.extend(
664
+ token_id for token_id in prompt_tokens[1:])
665
+ sample_idx += prompt_len - 1
666
+ batched_logprobs_query_seq_indices.extend(
667
+ [sample_idx + parent_id for parent_id in parent_ids])
668
+ batched_logprobs_query_token_indices.extend(next_token_ids)
669
+ if sampling_params.logprobs is not None:
670
+ largest_num_logprobs = max(largest_num_logprobs,
671
+ sampling_params.logprobs)
672
+ sample_idx += num_parent_seqs
673
+ assert sample_idx == logprobs.size(0)
674
+
675
+ batched_logprobs_query_seq_indices_gpu = torch.tensor(
676
+ batched_logprobs_query_seq_indices, device=logprobs.device)
677
+ batched_logprobs_query_token_indices_gpu = torch.tensor(
678
+ batched_logprobs_query_token_indices, device=logprobs.device)
679
+
680
+ # Batched query for logprobs of selected token
681
+ batched_logprobs_query_result = logprobs[[
682
+ batched_logprobs_query_seq_indices_gpu,
683
+ batched_logprobs_query_token_indices_gpu
684
+ ]]
685
+
686
+ batched_ranks_query_result = _get_ranks(
687
+ logprobs[batched_logprobs_query_seq_indices_gpu],
688
+ batched_logprobs_query_token_indices_gpu)
689
+
690
+ # Batched query for logprobs of topk tokens
691
+ if largest_num_logprobs > 0:
692
+ top_logprobs, top_token_ids = torch.topk(logprobs,
693
+ largest_num_logprobs,
694
+ dim=-1)
695
+ top_logprobs = top_logprobs.cpu()
696
+ top_token_ids = top_token_ids.cpu()
697
+ else:
698
+ top_logprobs, top_token_ids = None, None
699
+
700
+ batched_logprobs_query_result = batched_logprobs_query_result.cpu()
701
+ batched_ranks_query_result = batched_ranks_query_result.cpu()
702
+
703
+ # Gather results
704
+ result_prompt_logprobs: List[Optional[PromptLogprobs]] = []
705
+ result_sample_logprobs: List[SampleLogprobs] = []
706
+ sample_idx = 0
707
+ query_result_idx = 0
708
+ for i, (seq_group, sample_result) in enumerate(
709
+ zip(sampling_metadata.seq_groups, sample_results)):
710
+ seq_ids, sampling_params = seq_group
711
+ next_token_ids, parent_ids = sample_result
712
+
713
+ # Prompt logprobs
714
+ if (i < sampling_metadata.num_prompts
715
+ and sampling_params.prompt_logprobs is not None):
716
+ num_logprobs = sampling_params.prompt_logprobs
717
+ prompt_tokens = sampling_metadata.seq_data[
718
+ seq_ids[0]].prompt_token_ids
719
+ group_prompt_logprobs: PromptLogprobs = [None]
720
+ for token_id in prompt_tokens[1:]:
721
+ prompt_logprobs_dict = {
722
+ token_id:
723
+ (batched_logprobs_query_result[query_result_idx].item(),
724
+ batched_ranks_query_result[query_result_idx].item())
725
+ }
726
+ if num_logprobs > 0:
727
+ prompt_logprobs_dict.update(
728
+ zip(
729
+ top_token_ids[sample_idx, :num_logprobs].tolist(),
730
+ zip(
731
+ top_logprobs[
732
+ sample_idx, :num_logprobs].tolist(),
733
+ range(1, num_logprobs + 1))))
734
+ group_prompt_logprobs.append({
735
+ token_id: Logprob(*logprob_rank)
736
+ for token_id, logprob_rank in prompt_logprobs_dict.items()
737
+ })
738
+ sample_idx += 1
739
+ query_result_idx += 1
740
+ result_prompt_logprobs.append(group_prompt_logprobs)
741
+ else:
742
+ result_prompt_logprobs.append(None)
743
+
744
+ # Sample logprobs
745
+ num_logprobs = sampling_params.logprobs
746
+ if num_logprobs is None:
747
+ num_logprobs = 0
748
+ group_sample_logprobs: SampleLogprobs = []
749
+ for next_token_id, parent_id in zip(next_token_ids, parent_ids):
750
+ sample_logprobs_dict = {
751
+ next_token_id:
752
+ (batched_logprobs_query_result[query_result_idx].item(),
753
+ batched_ranks_query_result[query_result_idx].item())
754
+ }
755
+ query_result_idx += 1
756
+ if num_logprobs >= 0:
757
+ sample_logprobs_dict.update(
758
+ zip(
759
+ top_token_ids[sample_idx +
760
+ parent_id, :num_logprobs].tolist(),
761
+ zip(
762
+ top_logprobs[sample_idx +
763
+ parent_id, :num_logprobs].tolist(),
764
+ range(1, num_logprobs + 1))))
765
+ group_sample_logprobs.append({
766
+ token_id: Logprob(*logprob_rank)
767
+ for token_id, logprob_rank in sample_logprobs_dict.items()
768
+ })
769
+ result_sample_logprobs.append(group_sample_logprobs)
770
+ sample_idx += len(seq_ids)
771
+
772
+ return result_prompt_logprobs, result_sample_logprobs
773
+
774
+
775
+ def _modify_greedy_probs_inplace(logprobs: torch.Tensor, probs: torch.Tensor,
776
+ sample_indices: torch.Tensor,
777
+ greedy_samples: torch.Tensor) -> None:
778
+ """Modify the probability distributions of the greedily-sampled tokens such
779
+ that each sampled token has a "probability" of 1.0. This is required by
780
+ speculative decoding, which depends on the sampling method being encoded
781
+ within the probability distribution for correctness.
782
+
783
+ # Why do we only need to do this for greedy sampling?
784
+
785
+ vLLM's sampler performs the following steps for greedy or multinomial
786
+ (random) sampling:
787
+ 1. Get logits from model.
788
+ 2. Modify logits according to per-sequence sampling parameters.
789
+ - Multiply by temperature, top-k and top-p masking, penalize tokens
790
+ according to their frequency, etc.
791
+ 3. Sample a token.
792
+ - Random sampling simply samples from the modified probability
793
+ distribution.
794
+ - Greedy sampling performs `argmax` to obtain the token with the
795
+ highest likelihood.
796
+
797
+ Ignoring greedy sampling for a moment, we find that the computed probability
798
+ distribution has the following property: we can sample from it independently
799
+ and find that the token sampled by the Sampler has a frequency corresponding
800
+ to how often we see it in our sampling. In other words, for tokens sampled
801
+ with vLLM's random SamplingType, the computed probability distribution
802
+ encodes the sampling methodology completely.
803
+
804
+ Greedy sampling does not normally have this property. vLLM modifies logits
805
+ according to sampling params, then performs `argmax`, then returns the
806
+ sampled token and the computed probability distribution. If we sample from
807
+ the distribution, we'll find the likelihood of the greedily-sampled token
808
+ is not always 1.0.
809
+
810
+ Since lossless speculative decoding requires that the sampling methodology
811
+ be encoded within the probability distribution, we are motivated to modify
812
+ the probability distribution such that the sampled token has probability 1
813
+ when speculative decoding is used.
814
+
815
+ NOTE: Alternatively, we could use an extremely low temperature to achieve
816
+ greedy sampling using multinomial computation and unite the codepaths. This
817
+ has implications on the overall design of the sampler, e.g. how to record
818
+ accurate logprobs for the user, so this improvement is deferred to later.
819
+ """
820
+ logprobs[sample_indices, :] = -float('inf')
821
+ logprobs[sample_indices, greedy_samples] = 0.0
822
+ probs[sample_indices, :] = 0
823
+ probs[sample_indices, greedy_samples] = 1.0
824
+
825
+
826
+ def _build_sampler_output(
827
+ sample_results: List[Tuple[List[int], List[int]]],
828
+ sampling_metadata: SamplingMetadata,
829
+ prompt_logprobs: List[Optional[PromptLogprobs]],
830
+ sample_logprobs: List[SampleLogprobs],
831
+ on_device_tensors: Optional[Tuple[torch.Tensor, torch.Tensor]],
832
+ ) -> SamplerOutput:
833
+ """Construct Python objects with the output of sampling.
834
+
835
+ Args:
836
+ on_device_tensors: Tuple containing on-device tensors with the
837
+ probabilities used in sampling and the sampled token ids. This
838
+ allows post-processing without copies to CPU/serialization, e.g. in
839
+ speculative decoding rejection sampling.
840
+ """
841
+
842
+ sampler_output = []
843
+ for (seq_group, sample_result, group_prompt_logprobs,
844
+ group_sample_logprobs) in zip(sampling_metadata.seq_groups,
845
+ sample_results, prompt_logprobs,
846
+ sample_logprobs):
847
+ seq_ids, _ = seq_group
848
+ next_token_ids, parent_ids = sample_result
849
+ seq_outputs = []
850
+ for parent_id, next_token_id, logprobs in zip(parent_ids,
851
+ next_token_ids,
852
+ group_sample_logprobs):
853
+ seq_outputs.append(
854
+ SequenceOutput(seq_ids[parent_id], next_token_id, logprobs))
855
+ sampler_output.append(
856
+ SequenceGroupOutput(seq_outputs, group_prompt_logprobs))
857
+
858
+ # If not specified, store None values in SamplerOutput.
859
+ if on_device_tensors is not None:
860
+ sampled_token_probs, sampled_token_ids = on_device_tensors
861
+ else:
862
+ sampled_token_probs, sampled_token_ids = (None, None)
863
+
864
+ return SamplerOutput(
865
+ outputs=sampler_output,
866
+ sampled_token_probs=sampled_token_probs,
867
+ sampled_token_ids=sampled_token_ids,
868
+ )
autoregressive/serve/worker.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A GPU worker class."""
2
+ import gc
3
+ import os
4
+ from typing import Any, Dict, List, Optional, Set, Tuple
5
+
6
+ import torch
7
+ import torch.distributed
8
+
9
+ from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig,
10
+ ModelConfig, ParallelConfig, SchedulerConfig,
11
+ VisionLanguageConfig)
12
+ from vllm.distributed import (broadcast_tensor_dict,
13
+ ensure_model_parallel_initialized,
14
+ init_distributed_environment)
15
+ from vllm.distributed.device_communicators import pynccl_utils
16
+ from vllm.distributed.device_communicators.custom_all_reduce import (
17
+ init_custom_ar)
18
+ from vllm.lora.request import LoRARequest
19
+ from vllm.model_executor import set_random_seed
20
+ from vllm.sequence import SamplerOutput, SequenceGroupMetadata
21
+ from vllm.worker.cache_engine import CacheEngine
22
+ # from vllm.worker.model_runner import ModelRunner
23
+ from vllm.worker.worker_base import WorkerBase
24
+ from autoregressive.serve.model_runner import ModelRunner
25
+
26
+
27
+ class Worker(WorkerBase):
28
+ """A worker class that executes (a partition of) the model on a GPU.
29
+
30
+ Each worker is associated with a single GPU. The worker is responsible for
31
+ maintaining the KV cache and executing the model on the GPU. In case of
32
+ distributed inference, each worker is assigned a partition of the model.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ model_config: ModelConfig,
38
+ parallel_config: ParallelConfig,
39
+ scheduler_config: SchedulerConfig,
40
+ device_config: DeviceConfig,
41
+ cache_config: CacheConfig,
42
+ load_config: LoadConfig,
43
+ local_rank: int,
44
+ rank: int,
45
+ distributed_init_method: str,
46
+ lora_config: Optional[LoRAConfig] = None,
47
+ vision_language_config: Optional[VisionLanguageConfig] = None,
48
+ is_driver_worker: bool = False,
49
+ ) -> None:
50
+ self.model_config = model_config
51
+ self.parallel_config = parallel_config
52
+ self.scheduler_config = scheduler_config
53
+ self.device_config = device_config
54
+ self.cache_config = cache_config
55
+ self.local_rank = local_rank
56
+ self.rank = rank
57
+ self.distributed_init_method = distributed_init_method
58
+ self.lora_config = lora_config
59
+ self.load_config = load_config
60
+ self.is_driver_worker = is_driver_worker
61
+ if self.is_driver_worker:
62
+ assert self.rank == 0, "The driver worker must have rank 0."
63
+
64
+ if self.model_config.trust_remote_code:
65
+ # note: lazy import to avoid importing torch before initializing
66
+ from vllm.utils import init_cached_hf_modules
67
+ init_cached_hf_modules()
68
+ self.vision_language_config = vision_language_config
69
+ if self.vision_language_config:
70
+ assert not self.lora_config, (
71
+ "To be tested: vision language model with LoRA settings.")
72
+
73
+ self.model_runner = ModelRunner(
74
+ model_config,
75
+ parallel_config,
76
+ scheduler_config,
77
+ device_config,
78
+ load_config=load_config,
79
+ lora_config=self.lora_config,
80
+ kv_cache_dtype=self.cache_config.cache_dtype,
81
+ is_driver_worker=is_driver_worker,
82
+ vision_language_config=vision_language_config,
83
+ )
84
+ # Uninitialized cache engine. Will be initialized by
85
+ # initialize_cache.
86
+ self.cache_engine: CacheEngine
87
+ self.gpu_cache: List[torch.Tensor]
88
+
89
+ def init_device(self) -> None:
90
+ if self.device_config.device.type == "cuda":
91
+ # torch.distributed.all_reduce does not free the input tensor until
92
+ # the synchronization point. This causes the memory usage to grow
93
+ # as the number of all_reduce calls increases. This env var disables
94
+ # this behavior.
95
+ # Related issue:
96
+ # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
97
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
98
+
99
+ # This env var set by Ray causes exceptions with graph building.
100
+ os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
101
+ self.device = torch.device(f"cuda:{self.local_rank}")
102
+ torch.cuda.set_device(self.device)
103
+
104
+ _check_if_gpu_supports_dtype(self.model_config.dtype)
105
+ torch.cuda.empty_cache()
106
+ self.init_gpu_memory = torch.cuda.mem_get_info()[0]
107
+ else:
108
+ raise RuntimeError(
109
+ f"Not support device type: {self.device_config.device}")
110
+ # Initialize the distributed environment.
111
+ init_worker_distributed_environment(self.parallel_config, self.rank,
112
+ self.distributed_init_method,
113
+ self.local_rank)
114
+ # Set random seed.
115
+ set_random_seed(self.model_config.seed)
116
+
117
+ def load_model(self, args):
118
+ self.model_runner.load_model(args)
119
+
120
+ @torch.inference_mode()
121
+ def determine_num_available_blocks(self) -> Tuple[int, int]:
122
+ """Profiles the peak memory usage of the model to determine how many
123
+ KV blocks may be allocated without OOMs.
124
+
125
+ The engine will first conduct a profiling of the existing memory usage.
126
+ Then, it calculate the maximum possible number of GPU and CPU blocks
127
+ that can be allocated with the remaining free memory.
128
+
129
+ .. tip::
130
+ You may limit the usage of GPU memory
131
+ by adjusting the `gpu_memory_utilization` parameter.
132
+ """
133
+ # Profile the memory usage of the model and get the maximum number of
134
+ # cache blocks that can be allocated with the remaining free memory.
135
+ torch.cuda.empty_cache()
136
+
137
+ # Execute a forward pass with dummy inputs to profile the memory usage
138
+ # of the model.
139
+ self.model_runner.profile_run()
140
+
141
+ # Calculate the number of blocks that can be allocated with the
142
+ # profiled peak memory.
143
+ torch.cuda.synchronize()
144
+ free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
145
+ # NOTE(woosuk): Here we assume that the other processes using the same
146
+ # GPU did not change their memory usage during the profiling.
147
+ peak_memory = self.init_gpu_memory - free_gpu_memory
148
+ assert peak_memory > 0, (
149
+ "Error in memory profiling. This happens when the GPU memory was "
150
+ "not properly cleaned up before initializing the vLLM instance.")
151
+
152
+ cache_block_size = self.get_cache_block_size_bytes()
153
+ num_gpu_blocks = int(
154
+ (total_gpu_memory * self.cache_config.gpu_memory_utilization -
155
+ peak_memory) // cache_block_size)
156
+ num_cpu_blocks = int(self.cache_config.swap_space_bytes //
157
+ cache_block_size)
158
+ num_gpu_blocks = max(num_gpu_blocks, 0)
159
+ num_cpu_blocks = max(num_cpu_blocks, 0)
160
+ if self.model_runner.lora_manager:
161
+ self.model_runner.remove_all_loras()
162
+ gc.collect()
163
+ torch.cuda.empty_cache()
164
+ return num_gpu_blocks, num_cpu_blocks
165
+
166
+ def initialize_cache(self, num_gpu_blocks: int,
167
+ num_cpu_blocks: int) -> None:
168
+ """Allocate GPU and CPU KV cache with the specified number of blocks.
169
+
170
+ This also warms up the model, which may record CUDA graphs.
171
+ """
172
+ raise_if_cache_size_invalid(num_gpu_blocks,
173
+ self.cache_config.block_size,
174
+ self.model_config.max_model_len)
175
+
176
+ self.cache_config.num_gpu_blocks = num_gpu_blocks
177
+ self.cache_config.num_cpu_blocks = num_cpu_blocks
178
+
179
+ self._init_cache_engine()
180
+ self._warm_up_model()
181
+
182
+ def _init_cache_engine(self):
183
+ assert self.cache_config.num_gpu_blocks is not None
184
+ self.cache_engine = CacheEngine(self.cache_config, self.model_config,
185
+ self.parallel_config)
186
+ self.gpu_cache = self.cache_engine.gpu_cache
187
+ self.model_runner.set_block_size(self.cache_engine.block_size)
188
+
189
+ def _warm_up_model(self) -> None:
190
+ if not self.model_config.enforce_eager:
191
+ self.model_runner.capture_model(self.gpu_cache)
192
+ # Reset the seed to ensure that the random state is not affected by
193
+ # the model initialization and profiling.
194
+ set_random_seed(self.model_config.seed)
195
+
196
+ def cache_swap(
197
+ self,
198
+ blocks_to_swap_in: Dict[int, int],
199
+ blocks_to_swap_out: Dict[int, int],
200
+ blocks_to_copy: Dict[int, List[int]],
201
+ ) -> None:
202
+ # Issue cache operations.
203
+ # TODO(woosuk): Profile swapping overhead and optimize if needed.
204
+ if blocks_to_swap_in:
205
+ self.cache_engine.swap_in(blocks_to_swap_in)
206
+ if blocks_to_swap_out:
207
+ self.cache_engine.swap_out(blocks_to_swap_out)
208
+ if blocks_to_copy:
209
+ self.cache_engine.copy(blocks_to_copy)
210
+
211
+ @torch.inference_mode()
212
+ def execute_model(
213
+ self,
214
+ seq_group_metadata_list: Optional[List[SequenceGroupMetadata]] = None,
215
+ blocks_to_swap_in: Optional[Dict[int, int]] = None,
216
+ blocks_to_swap_out: Optional[Dict[int, int]] = None,
217
+ blocks_to_copy: Optional[Dict[int, List[int]]] = None,
218
+ num_lookahead_slots: int = 0,
219
+ ) -> List[SamplerOutput]:
220
+
221
+ if self.is_driver_worker:
222
+ assert seq_group_metadata_list is not None
223
+ num_seq_groups = len(seq_group_metadata_list)
224
+ assert blocks_to_swap_in is not None
225
+ assert blocks_to_swap_out is not None
226
+ assert blocks_to_copy is not None
227
+ data: Dict[str, Any] = {
228
+ "num_seq_groups": num_seq_groups,
229
+ "blocks_to_swap_in": blocks_to_swap_in,
230
+ "blocks_to_swap_out": blocks_to_swap_out,
231
+ "blocks_to_copy": blocks_to_copy,
232
+ }
233
+ broadcast_tensor_dict(data, src=0)
234
+ else:
235
+ data = broadcast_tensor_dict(src=0)
236
+ num_seq_groups = data["num_seq_groups"]
237
+ blocks_to_swap_in = data["blocks_to_swap_in"]
238
+ blocks_to_swap_out = data["blocks_to_swap_out"]
239
+ blocks_to_copy = data["blocks_to_copy"]
240
+
241
+ assert blocks_to_swap_in is not None
242
+ assert blocks_to_swap_out is not None
243
+ assert blocks_to_copy is not None
244
+ self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)
245
+
246
+ # If there is no input, we don't need to execute the model.
247
+ if num_seq_groups == 0:
248
+ return []
249
+
250
+ output = self.model_runner.execute_model(seq_group_metadata_list,
251
+ self.gpu_cache)
252
+
253
+ # Worker only supports single-step execution. Wrap the output in a list
254
+ # to conform to interface.
255
+ return [output]
256
+
257
+ def add_lora(self, lora_request: LoRARequest) -> bool:
258
+ return self.model_runner.add_lora(lora_request)
259
+
260
+ def remove_lora(self, lora_id: int) -> bool:
261
+ return self.model_runner.remove_lora(lora_id)
262
+
263
+ def list_loras(self) -> Set[int]:
264
+ return self.model_runner.list_loras()
265
+
266
+ @property
267
+ def max_model_len(self) -> int:
268
+ return self.model_config.max_model_len
269
+
270
+ @property
271
+ def vocab_size(self) -> int:
272
+ return self.model_runner.vocab_size
273
+
274
+ def get_cache_block_size_bytes(self) -> int:
275
+ """Get the size of the KV cache block size in bytes.
276
+ """
277
+ return CacheEngine.get_cache_block_size(self.cache_config,
278
+ self.model_config,
279
+ self.parallel_config)
280
+
281
+
282
+ def init_worker_distributed_environment(
283
+ parallel_config: ParallelConfig,
284
+ rank: int,
285
+ distributed_init_method: Optional[str] = None,
286
+ local_rank: int = -1,
287
+ ) -> None:
288
+ """Initialize the distributed environment."""
289
+ init_distributed_environment(parallel_config.world_size, rank,
290
+ distributed_init_method, local_rank)
291
+
292
+ if pynccl_utils.is_initialized():
293
+ pynccl_world_size = pynccl_utils.get_world_size()
294
+ if pynccl_world_size != parallel_config.world_size:
295
+ raise RuntimeError(
296
+ "pynccl is already initialized but the pynccl world "
297
+ "size does not match parallel_config.world_size "
298
+ f"({pynccl_world_size} vs. {parallel_config.world_size}).")
299
+ elif parallel_config.world_size > 1:
300
+ # NOTE(woosuk): We don't initialize pynccl process group when world size
301
+ # is 1.
302
+ pynccl_utils.init_process_group(
303
+ world_size=parallel_config.world_size,
304
+ local_rank=local_rank,
305
+ rank=rank,
306
+ init_method=distributed_init_method,
307
+ )
308
+
309
+ ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
310
+ parallel_config.pipeline_parallel_size)
311
+
312
+ # Initialize a custom fast all-reduce implementation.
313
+ if not parallel_config.disable_custom_all_reduce:
314
+ init_custom_ar()
315
+
316
+ # A small all_reduce for warmup.
317
+ torch.distributed.all_reduce(torch.zeros(1).cuda())
318
+ if pynccl_utils.is_initialized():
319
+ pynccl_utils.all_reduce(torch.zeros(1).cuda())
320
+
321
+
322
+ def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
323
+ # Check if the GPU supports the dtype.
324
+ if torch_dtype == torch.bfloat16:
325
+ compute_capability = torch.cuda.get_device_capability()
326
+ if compute_capability[0] < 8:
327
+ gpu_name = torch.cuda.get_device_name()
328
+ raise ValueError(
329
+ "Bfloat16 is only supported on GPUs with compute capability "
330
+ f"of at least 8.0. Your {gpu_name} GPU has compute capability "
331
+ f"{compute_capability[0]}.{compute_capability[1]}. "
332
+ "You can use float16 instead by explicitly setting the"
333
+ "`dtype` flag in CLI, for example: --dtype=half.")
334
+
335
+
336
+ def raise_if_cache_size_invalid(num_gpu_blocks, block_size,
337
+ max_model_len) -> None:
338
+ if num_gpu_blocks <= 0:
339
+ raise ValueError("No available memory for the cache blocks. "
340
+ "Try increasing `gpu_memory_utilization` when "
341
+ "initializing the engine.")
342
+ max_seq_len = block_size * num_gpu_blocks
343
+ if max_model_len > max_seq_len:
344
+ raise ValueError(
345
+ f"The model's max seq len ({max_model_len}) "
346
+ "is larger than the maximum number of tokens that can be "
347
+ f"stored in KV cache ({max_seq_len}). Try increasing "
348
+ "`gpu_memory_utilization` or decreasing `max_model_len` when "
349
+ "initializing the engine.")
autoregressive/test/metric.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from skimage.metrics import structural_similarity as ssim
3
+ from sklearn.metrics import f1_score
4
+ from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
5
+ from torchmetrics.classification import BinaryF1Score
6
+
7
+ class SSIM:
8
+ def __init__(self, data_range=1.0):
9
+ ssim = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)#.to(device)
10
+ self.total_ssim = 0.0
11
+ self.count = 0
12
+ def update(self, img1, img2):
13
+ ssim_value = ssim((img1/255).clip(0,1), (img2/255).clip(0,1), data_range=1.0)
14
+ self.total_ssim += ssim_value
15
+ self.count += 1
16
+
17
+ def calculate(self):
18
+ if self.count == 0:
19
+ raise ValueError("No images have been added.")
20
+ return self.total_ssim / self.count
21
+
22
+
23
+
24
+ class F1score:
25
+ def __init__(self, threshold=128):
26
+ self.threshold = threshold
27
+ self.total_f1 = 0
28
+ self.count = 0
29
+
30
+ def update(self, img1, img2):
31
+
32
+ assert img1.size == img2.size, "The images must be the same size."
33
+
34
+ binary_image1 = (img1 > self.threshold).astype(int)
35
+ binary_image2 = (img2 > self.threshold).astype(int)
36
+
37
+ y_true = binary_image1.flatten()
38
+ y_pred = binary_image2.flatten()
39
+
40
+ f1 = f1_score(y_true, y_pred)
41
+
42
+ self.total_f1 += f1
43
+ self.count += 1
44
+
45
+ def calculate(self):
46
+ average_f1 = self.total_f1 / self.count
47
+ return average_f1
48
+
49
+ class RMSE:
50
+ def __init__(self):
51
+ self.total_rmse = 0
52
+ self.count = 0
53
+
54
+ def update(self, img1, img2):
55
+
56
+ assert img1.size == img2.size, "The images must be the same size."
57
+ diff = img1 - img2
58
+ diff_squared = np.square(diff)
59
+ mse = np.mean(diff_squared)
60
+ rmse = np.sqrt(mse)
61
+ self.total_rmse += rmse
62
+ self.count += 1
63
+
64
+ def calculate(self):
65
+ average_f1 = self.total_rmse / self.count
66
+ return average_f1
67
+
68
+
69
+
70
+
71
+
72
+ if __name__ == "__main__":
73
+ img1_1 = np.random.randn(256,256)
74
+ img1_1 = img1_1 - img1_1.min()
75
+ img1_1 = 255*img1_1/img1_1.max()
76
+ img1_1 = img1_1.astype(np.uint8)
77
+ img1_2 = np.random.randn(256,256)
78
+ img1_2 = img1_2 - img1_2.min()
79
+ img1_2 = 255*img1_2/img1_2.max()
80
+ img1_2 = img1_2.astype(np.uint8)
81
+ img2_1 = np.random.randn(256,256)
82
+ img2_1 = img2_1 - img2_1.min()
83
+ img2_1 = 255*img2_1/img2_1.max()
84
+ img2_1 = img2_1.astype(np.uint8)
85
+ img2_2 = np.random.randn(256,256)
86
+ img2_2 = img2_2 - img2_2.min()
87
+ img2_2 = 255*img2_2/img2_2.max()
88
+ img2_2 = img2_2.astype(np.uint8)
89
+ img_pairs = [(img1_1, img2_1), (img1_2, img2_2)]
90
+
91
+ calculator = AverageSSIMCalculator()
92
+
93
+ for img1, img2 in img_pairs:
94
+ calculator.add_images(img1, img2)
95
+
96
+ avg_ssim = calculator.calculate_average_ssim()
97
+ print(f'Average SSIM: {avg_ssim}')
autoregressive/test/test_c2i.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/sample.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ torch.set_float32_matmul_precision('high')
7
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None)
8
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None)
9
+ from torchvision.utils import save_image
10
+ import os
11
+ import sys
12
+ current_directory = os.getcwd()
13
+ sys.path.append(current_directory)
14
+ import time
15
+ import argparse
16
+ from tokenizer.tokenizer_image.vq_model import VQ_models
17
+ from autoregressive.models.gpt import GPT_models
18
+ from autoregressive.models.generate import generate
19
+ from condition.hed import HEDdetector, nms
20
+ from condition.canny import CannyDetector
21
+ from condition.midas.depth import MidasDetector
22
+ from autoregressive.test.metric import SSIM, F1score, RMSE
23
+ import torch.distributed as dist
24
+ from dataset.augmentation import center_crop_arr
25
+ from dataset.build import build_dataset
26
+ from torch.utils.data import DataLoader
27
+ from torch.utils.data.distributed import DistributedSampler
28
+ from torchvision import transforms
29
+ from PIL import Image
30
+ import os
31
+ import numpy as np
32
+ import cv2
33
+ from tqdm import tqdm
34
+ from functools import partial
35
+ from skimage.transform import resize
36
+ from torch.nn.functional import interpolate
37
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
38
+ """
39
+ Builds a single .npz file from a folder of .png samples.
40
+ """
41
+ samples = []
42
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
43
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
44
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
45
+ samples.append(sample_np)
46
+ samples = np.stack(samples)
47
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
48
+ npz_path = f"{sample_dir}.npz"
49
+ np.savez(npz_path, arr_0=samples)
50
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
51
+ return npz_path
52
+
53
+ def main(args):
54
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
55
+ torch.set_grad_enabled(False)
56
+ # Setup DDP:
57
+ dist.init_process_group("nccl")
58
+ rank = dist.get_rank()
59
+ device = rank % torch.cuda.device_count()
60
+ seed = args.global_seed * dist.get_world_size() + rank
61
+ torch.manual_seed(seed)
62
+ torch.cuda.set_device(device)
63
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
64
+
65
+ # create and load model
66
+ vq_model = VQ_models[args.vq_model](
67
+ codebook_size=args.codebook_size,
68
+ codebook_embed_dim=args.codebook_embed_dim)
69
+ vq_model.to(device)
70
+ vq_model.eval()
71
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
72
+ vq_model.load_state_dict(checkpoint["model"])
73
+ del checkpoint
74
+ print(f"image tokenizer is loaded")
75
+
76
+ # create and load gpt model
77
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
78
+ latent_size = args.image_size // args.downsample_size
79
+ gpt_model = GPT_models[args.gpt_model](
80
+ vocab_size=args.codebook_size,
81
+ block_size=latent_size ** 2,
82
+ num_classes=args.num_classes,
83
+ cls_token_num=args.cls_token_num,
84
+ model_type=args.gpt_type,
85
+ condition_token_num=args.condition_token_nums,
86
+ # image_size=args.image_size
87
+ ).to(device=device, dtype=precision)
88
+
89
+
90
+ _, file_extension = os.path.splitext(args.gpt_ckpt)
91
+ if file_extension.lower() == '.safetensors':
92
+ from safetensors.torch import load_file
93
+ model_weight = load_file(args.gpt_ckpt)
94
+ gpt_model.load_state_dict(model_weight, strict=False)
95
+ gpt_model.eval()
96
+ else:
97
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
98
+ if "model" in checkpoint: # ddp
99
+ model_weight = checkpoint["model"]
100
+ elif "module" in checkpoint: # deepspeed
101
+ model_weight = checkpoint["module"]
102
+ elif "state_dict" in checkpoint:
103
+ model_weight = checkpoint["state_dict"]
104
+ else:
105
+ raise Exception("please check model weight")
106
+ gpt_model.load_state_dict(model_weight, strict=False)
107
+ gpt_model.eval()
108
+ del checkpoint
109
+ print(f"gpt model is loaded")
110
+
111
+ if args.condition_type == 'hed':
112
+ get_condition = HEDdetector(device=device)
113
+ get_metric = SSIM()
114
+ elif args.condition_type == 'canny':
115
+ get_condition = CannyDetector()
116
+ get_metric = F1score()
117
+ elif args.condition_type == 'depth':
118
+ get_condition = MidasDetector(device=device)
119
+ get_metric = RMSE()
120
+ # Setup data:
121
+ transform = transforms.Compose([
122
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
123
+ transforms.ToTensor(),
124
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
125
+ ])
126
+
127
+ if args.dataset == 'imagenet':
128
+ dataset = build_dataset(args, transform=transform)
129
+ elif args.dataset == 'coco':
130
+ dataset = build_dataset(args, transform=transform)
131
+ elif args.dataset == 'imagenet_code':
132
+ dataset = build_dataset(args)
133
+ else:
134
+ raise Exception("please check dataset")
135
+
136
+ sampler = DistributedSampler(
137
+ dataset,
138
+ num_replicas=dist.get_world_size(),
139
+ rank=rank,
140
+ shuffle=False,
141
+ seed=args.global_seed
142
+ )
143
+ loader = DataLoader(
144
+ dataset,
145
+ batch_size=args.per_proc_batch_size,
146
+ shuffle=False,
147
+ sampler=sampler,
148
+ num_workers=args.num_workers,
149
+ pin_memory=True,
150
+ drop_last=False
151
+ )
152
+
153
+
154
+ if args.compile:
155
+ print(f"compiling the model...")
156
+ gpt_model = torch.compile(
157
+ gpt_model,
158
+ mode="reduce-overhead",
159
+ fullgraph=True
160
+ ) # requires PyTorch 2.0 (optional)
161
+ else:
162
+ pass
163
+ # print(f"no need to compile model in demo")
164
+
165
+ # Create folder to save samples:
166
+ model_string_name = args.gpt_model.replace("/", "-")
167
+ if args.from_fsdp:
168
+ ckpt_string_name = args.gpt_ckpt.split('/')[-2]
169
+ else:
170
+ ckpt_string_name = os.path.basename(args.gpt_ckpt).replace(".pth", "").replace(".pt", "")
171
+
172
+ date = os.path.split(os.path.dirname(os.path.dirname(os.path.dirname(args.gpt_ckpt))))[-1]
173
+
174
+ sample_folder_dir = f"{args.sample_dir}/imagenet/{args.condition_type}"
175
+ if rank == 0:
176
+ if args.save_image:
177
+ os.makedirs(sample_folder_dir, exist_ok=True)
178
+ print(f"Saving .png samples at {sample_folder_dir}")
179
+ n = args.per_proc_batch_size
180
+ global_batch_size = n * dist.get_world_size()
181
+ total = 0
182
+
183
+ condition_null = None
184
+ num = 0
185
+ for batch in tqdm(loader):
186
+ num += 1
187
+ # if num > 40:
188
+ # break
189
+ class_labels = batch["labels"].to(device).squeeze(1)
190
+ condition_image = batch["condition_img"].to(device)
191
+ condition_imgs = batch["condition_imgs"].to(device)
192
+
193
+ batch_size = class_labels.shape[0]
194
+
195
+ c_indices = class_labels
196
+ qzshape = [len(class_labels), args.codebook_embed_dim, latent_size, latent_size]
197
+
198
+ index_sample = generate(
199
+ gpt_model, c_indices, latent_size ** 2, condition=condition_imgs.repeat(1,3,1,1).to(precision), condition_null=condition_null, condition_token_nums=args.condition_token_nums,
200
+ cfg_scale=args.cfg_scale, cfg_interval=args.cfg_interval,
201
+ temperature=args.temperature, top_k=args.top_k,
202
+ top_p=args.top_p, sample_logits=True,
203
+ )
204
+
205
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
206
+ samples = 255*(samples*0.5 + 0.5)
207
+ if samples.shape[2] != 256:
208
+ samples = interpolate(samples, size=(256, 256), mode='bilinear', align_corners=False)
209
+
210
+ condition_imgs = 255*(condition_imgs*0.5 + 0.5)
211
+ if condition_imgs.shape[2] != 256:
212
+ condition_imgs = interpolate(condition_imgs, size=(256, 256), mode='bilinear', align_corners=False)
213
+ for i in range(len(samples)):
214
+
215
+ sample = samples[i].to(torch.uint8).permute(1,2,0)
216
+ sample_condition = get_condition(sample)
217
+ if torch.is_tensor(sample_condition):
218
+ sample_condition = sample_condition.cpu().numpy()
219
+ condition_img = condition_imgs[i,0].cpu().detach().numpy()
220
+
221
+ get_metric.update(condition_img, sample_condition)
222
+
223
+ index = i * dist.get_world_size() + rank + total
224
+ if args.save_image:
225
+ save_image(2*(samples[i]/255 - 0.5), f"{sample_folder_dir}/{index:06d}.png", nrow=1, normalize=True, value_range=(-1, 1))
226
+
227
+ total += global_batch_size
228
+
229
+ metric = get_metric.calculate()
230
+ print(f'count: {get_metric.count}')
231
+ print(f'{args.condition_type}: {metric}')
232
+
233
+
234
+ if __name__ == "__main__":
235
+ parser = argparse.ArgumentParser()
236
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
237
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
238
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
239
+ parser.add_argument("--from-fsdp", action='store_true')
240
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
241
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
242
+ parser.add_argument("--compile", action='store_true', default=False)
243
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
244
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
245
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
246
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
247
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=256)
248
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
249
+ parser.add_argument("--num-classes", type=int, default=1000)
250
+ parser.add_argument("--cfg-scale", type=float, default=4.0)
251
+ parser.add_argument("--cfg-interval", type=float, default=-1)
252
+ parser.add_argument("--seed", type=int, default=0)
253
+ parser.add_argument("--top-k", type=int, default=2000,help="top-k value to sample with")
254
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
255
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
256
+ parser.add_argument("--condition-token-nums", type=int, default=0)
257
+ parser.add_argument("--condition-type", type=str, default='canny', choices=['canny', 'depth'])
258
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
259
+ parser.add_argument("--get-condition-img", type=bool, default=False)
260
+ parser.add_argument("--global-seed", type=int, default=0)
261
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco', 'imagenet_code'], default='imagenet_code')
262
+ parser.add_argument("--code-path", type=str, required=True)
263
+ parser.add_argument("--num-workers", type=int, default=16)
264
+ parser.add_argument("--sample-dir", type=str, default="samples")
265
+ parser.add_argument("--save-image", type=bool, default=False)
266
+ args = parser.parse_args()
267
+ main(args)
autoregressive/test/test_ssim.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
4
+ import torchvision.transforms as transforms
5
+ from PIL import Image
6
+
7
+ img1 = Image.open('autoregressive/test/label.png').convert('L') #
8
+ img2 = Image.open('autoregressive/test/pred.png').convert('L')
9
+
10
+ to_tensor = transforms.ToTensor()
11
+ img1_tensor = to_tensor(img1).unsqueeze(0) # (C, H, W) -> (1, C, H, W)
12
+ img2_tensor = to_tensor(img2).unsqueeze(0)
13
+
14
+ img1_tensor = img1_tensor.float()
15
+ img2_tensor = img2_tensor.float()
16
+
17
+ ms_ssim = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0)
18
+
19
+ ms_ssim_score = ms_ssim(img1_tensor, img2_tensor)
20
+
21
+ print("MS-SSIM:", ms_ssim_score.item())
autoregressive/test/test_t2i.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/sample.py
3
+ import warnings
4
+ warnings.filterwarnings('ignore')
5
+ import torch
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ torch.set_float32_matmul_precision('high')
9
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None)
10
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None)
11
+ from torchvision.utils import save_image
12
+ import os
13
+ import sys
14
+ current_directory = os.getcwd()
15
+ sys.path.append(current_directory)
16
+ import time
17
+ import argparse
18
+ from tokenizer.tokenizer_image.vq_model import VQ_models
19
+ from autoregressive.models.gpt_t2i import GPT_models
20
+ from autoregressive.models.generate import generate
21
+ from condition.hed import HEDdetector, nms
22
+ from condition.canny import CannyDetector
23
+ from autoregressive.test.metric import SSIM, F1score, RMSE
24
+ from condition.midas.depth import MidasDetector
25
+ import torch.distributed as dist
26
+ from dataset.augmentation import center_crop_arr
27
+ from dataset.build import build_dataset
28
+ from torch.utils.data import DataLoader
29
+ from torch.utils.data.distributed import DistributedSampler
30
+ from torchvision import transforms
31
+ from PIL import Image
32
+ import os
33
+ import numpy as np
34
+ import cv2
35
+ from tqdm import tqdm
36
+ from functools import partial
37
+ from dataset.t2i_control import build_t2i_control_code
38
+ from language.t5 import T5Embedder
39
+ from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
40
+ from condition.lineart import LineArt
41
+ import torch.nn.functional as F
42
+
43
+ def main(args):
44
+ # # Setup PyTorch:
45
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
46
+ torch.set_grad_enabled(False)
47
+ # Setup DDP:
48
+ dist.init_process_group("nccl")
49
+ rank = dist.get_rank()
50
+ device = rank % torch.cuda.device_count()
51
+ seed = args.global_seed * dist.get_world_size() + rank
52
+ torch.manual_seed(seed)
53
+ torch.cuda.set_device(device)
54
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
55
+
56
+ # create and load model
57
+ vq_model = VQ_models[args.vq_model](
58
+ codebook_size=args.codebook_size,
59
+ codebook_embed_dim=args.codebook_embed_dim)
60
+ vq_model.to(device)
61
+ vq_model.eval()
62
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
63
+ vq_model.load_state_dict(checkpoint["model"])
64
+ del checkpoint
65
+ print(f"image tokenizer is loaded")
66
+
67
+ # create and load gpt model
68
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
69
+ latent_size = args.image_size // args.downsample_size
70
+ gpt_model = GPT_models[args.gpt_model](
71
+ vocab_size=args.codebook_size,
72
+ block_size=latent_size ** 2,
73
+ num_classes=args.num_classes,
74
+ cls_token_num=args.cls_token_num,
75
+ model_type=args.gpt_type,
76
+ adapter_size=args.adapter_size,
77
+ condition_type=args.condition_type,
78
+ ).to(device=device, dtype=precision)
79
+
80
+ _, file_extension = os.path.splitext(args.gpt_ckpt)
81
+ if file_extension.lower() == '.safetensors':
82
+ from safetensors.torch import load_file
83
+ model_weight = load_file(args.gpt_ckpt)
84
+ gpt_model.load_state_dict(model_weight, strict=False)
85
+ gpt_model.eval()
86
+ else:
87
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
88
+ if "model" in checkpoint: # ddp
89
+ model_weight = checkpoint["model"]
90
+ elif "module" in checkpoint: # deepspeed
91
+ model_weight = checkpoint["module"]
92
+ elif "state_dict" in checkpoint:
93
+ model_weight = checkpoint["state_dict"]
94
+ else:
95
+ raise Exception("please check model weight")
96
+ gpt_model.load_state_dict(model_weight, strict=False)
97
+ gpt_model.eval()
98
+ del checkpoint
99
+ print(f"gpt model is loaded")
100
+
101
+
102
+ t5_model = T5Embedder(
103
+ device=device,
104
+ local_cache=True,
105
+ cache_dir=args.t5_path,
106
+ dir_or_name=args.t5_model_type,
107
+ torch_dtype=precision,
108
+ model_max_length=args.t5_feature_max_len,
109
+ )
110
+
111
+ # Setup data:
112
+ transform = transforms.Compose([
113
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
114
+ transforms.ToTensor(),
115
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
116
+ ])
117
+
118
+ dataset = build_t2i_control_code(args)
119
+
120
+ sampler = DistributedSampler(
121
+ dataset,
122
+ num_replicas=dist.get_world_size(),
123
+ rank=rank,
124
+ shuffle=False,
125
+ seed=args.global_seed
126
+ )
127
+ loader = DataLoader(
128
+ dataset,
129
+ batch_size=args.per_proc_batch_size,
130
+ shuffle=False,
131
+ sampler=sampler,
132
+ num_workers=args.num_workers,
133
+ pin_memory=True,
134
+ drop_last=False,
135
+ collate_fn=dataset.collate_fn,
136
+ )
137
+
138
+
139
+ if args.compile:
140
+ print(f"compiling the model...")
141
+ gpt_model = torch.compile(
142
+ gpt_model,
143
+ mode="reduce-overhead",
144
+ fullgraph=True
145
+ ) # requires PyTorch 2.0 (optional)
146
+ else:
147
+ pass
148
+ # print(f"no need to compile model in demo")
149
+
150
+ # Create folder to save samples:
151
+ model_string_name = args.gpt_model.replace("/", "-")
152
+ if args.from_fsdp:
153
+ ckpt_string_name = args.gpt_ckpt.split('/')[-2]
154
+ else:
155
+ ckpt_string_name = os.path.basename(args.gpt_ckpt).replace(".pth", "").replace(".pt", "")
156
+ # import pdb;pdb.set_trace()
157
+ date = os.path.split(os.path.dirname(os.path.dirname(os.path.dirname(args.gpt_ckpt))))[-1]
158
+ folder_name = f"{model_string_name}-{date}-{ckpt_string_name}-size-{args.image_size}-{args.vq_model}-" \
159
+ f"topk-{args.top_k}-topp-{args.top_p}-temperature-{args.temperature}-" \
160
+ f"cfg-{args.cfg_scale}-seed-{args.global_seed}"
161
+ sample_folder_dir = f"{args.sample_dir}"
162
+ if rank == 0:
163
+ if args.save_image:
164
+ os.makedirs(sample_folder_dir, exist_ok=True)
165
+ os.makedirs(f"{args.sample_dir}/visualization", exist_ok=True)
166
+ os.makedirs(f"{args.sample_dir}/annotations", exist_ok=True)
167
+ print(f"Saving .png samples at {sample_folder_dir}")
168
+ n = args.per_proc_batch_size
169
+ global_batch_size = n * dist.get_world_size()
170
+ total = 0
171
+
172
+ if args.condition_type == 'hed':
173
+ get_condition = HEDdetector().to(device).eval()
174
+ elif args.condition_type == 'canny':
175
+ get_condition = CannyDetector()
176
+ elif args.condition_type == 'lineart':
177
+ get_condition = LineArt()
178
+ get_condition.load_state_dict(torch.load('condition/ckpts/model.pth', map_location=torch.device('cpu')))
179
+ get_condition.to(device)
180
+
181
+ condition_null = None
182
+ num = 0
183
+ print(len(loader))
184
+ for batch in tqdm(loader):
185
+ num += 1
186
+ # if num>2:
187
+ # break
188
+ prompts = batch['prompt']
189
+ condition_imgs = batch['control'].to(device)
190
+
191
+ if args.condition_type in ['hed', 'lineart']:
192
+ with torch.no_grad():
193
+ condition_imgs = get_condition(condition_imgs.float())
194
+ if args.condition_type == 'hed':
195
+ condition_imgs = condition_imgs.unsqueeze(1)/255
196
+ # if args.condition_type == 'lineart':
197
+ # condition_imgs = 1 - condition_imgs
198
+ condition_imgs = condition_imgs.repeat(1,3,1,1)
199
+ condition_imgs = 2*(condition_imgs - 0.5)
200
+ # condition_origin = condition_imgs.clone()
201
+
202
+ if args.condition_type == 'seg':
203
+ labels = batch['label']
204
+
205
+
206
+ caption_embs, emb_masks = t5_model.get_text_embeddings(prompts)
207
+
208
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
209
+ new_caption_embs = []
210
+ for idx, (caption_emb, emb_mask) in enumerate(zip(caption_embs, emb_masks)):
211
+ valid_num = int(emb_mask.sum().item())
212
+ new_caption_emb = torch.cat([caption_emb[valid_num:],caption_emb[:valid_num]])
213
+ new_caption_embs.append(new_caption_emb)
214
+ new_caption_embs = torch.stack(new_caption_embs)
215
+ c_indices = new_caption_embs * new_emb_masks[:,:, None]
216
+ c_emb_masks = new_emb_masks
217
+
218
+ qzshape = [len(c_indices), args.codebook_embed_dim, args.image_H//args.downsample_size, args.image_W//args.downsample_size]
219
+
220
+ index_sample = generate(
221
+ gpt_model, c_indices, (args.image_H//args.downsample_size)*(args.image_W//args.downsample_size), c_emb_masks, condition=condition_imgs.to(precision),
222
+ cfg_scale=args.cfg_scale, cfg_interval=args.cfg_interval,
223
+ temperature=args.temperature, top_k=args.top_k,
224
+ top_p=args.top_p, sample_logits=True,
225
+ )
226
+
227
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
228
+
229
+ for i in range(len(samples)):
230
+ # # Save samples to disk as individual .png files
231
+ index = i * dist.get_world_size() + rank + total
232
+ if args.save_image:
233
+ save_image(samples[i], f"{args.sample_dir}/visualization/{index:06d}.png", nrow=1, normalize=True, value_range=(-1, 1))
234
+ save_image(condition_imgs[i,0], f"{args.sample_dir}/annotations/{index:06d}.png", nrow=1, normalize=True, value_range=(-1, 1))
235
+ if args.condition_type == 'seg':
236
+ Image.fromarray(labels[i].numpy().astype('uint8'), mode='L').save(f"{args.sample_dir}/annotations/{index:06d}.png")
237
+ total += global_batch_size
238
+
239
+
240
+
241
+ if __name__ == "__main__":
242
+ parser = argparse.ArgumentParser()
243
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
244
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
245
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i", help="class-conditional or text-conditional")
246
+ parser.add_argument("--from-fsdp", action='store_true')
247
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
248
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
249
+ parser.add_argument("--compile", action='store_true', default=False)
250
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
251
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
252
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
253
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
254
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512, 768], default=512)
255
+ parser.add_argument("--image-H", type=int, choices=[256, 320, 384, 400, 448, 512, 576, 640, 704, 768, 832, 960, 1024], default=512)
256
+ parser.add_argument("--image-W", type=int, choices=[256, 320, 384, 400, 448, 512, 576, 640, 704, 768, 832, 960, 1024], default=512)
257
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
258
+ parser.add_argument("--num-classes", type=int, default=1000)
259
+ parser.add_argument("--cfg-scale", type=float, default=4)
260
+ parser.add_argument("--cfg-interval", type=float, default=-1)
261
+ parser.add_argument("--seed", type=int, default=0)
262
+ parser.add_argument("--top-k", type=int, default=2000,help="top-k value to sample with")
263
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
264
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
265
+ parser.add_argument("--condition", type=str, default='hed', choices=['canny', 'hed'])
266
+ parser.add_argument("--per-proc-batch-size", type=int, default=25)
267
+ parser.add_argument("--global-seed", type=int, default=0)
268
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco', 'imagenet_code'], default='imagenet_code')
269
+ parser.add_argument("--num-workers", type=int, default=16)
270
+ parser.add_argument("--sample-dir", type=str, default="samples")
271
+ parser.add_argument("--num-fid-samples", type=int, default=2000)
272
+ parser.add_argument("--save-image", type=bool, default=True)
273
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
274
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
275
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
276
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
277
+ parser.add_argument("--code-path", type=str, default="code")
278
+ parser.add_argument("--code-path2", type=str, default=None)
279
+ parser.add_argument("--get-image", type=bool, default=False)
280
+ parser.add_argument("--get-prompt", type=bool, default=True)
281
+ parser.add_argument("--get-label", type=bool, default=False)
282
+ parser.add_argument("--condition-type", type=str, choices=['seg', 'canny', 'hed', 'lineart', 'depth'], default="canny")
283
+ parser.add_argument("--adapter-size", type=str, default="small")
284
+ args = parser.parse_args()
285
+ main(args)
autoregressive/train/extract_codes_c2i.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+ # import os
4
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
5
+ import torch
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ import torch.distributed as dist
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from torchvision import transforms
12
+ import numpy as np
13
+ import argparse
14
+ import os
15
+
16
+ from utils.distributed import init_distributed_mode
17
+ from dataset.augmentation import center_crop_arr
18
+ from dataset.build import build_dataset
19
+ from tokenizer.tokenizer_image.vq_model import VQ_models
20
+
21
+
22
+ #################################################################################
23
+ # Training Loop #
24
+ #################################################################################
25
+ def main(args):
26
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
27
+ # Setup DDP:
28
+ if not args.debug:
29
+ init_distributed_mode(args)
30
+ rank = dist.get_rank()
31
+ device = rank % torch.cuda.device_count()
32
+ seed = args.global_seed * dist.get_world_size() + rank
33
+ torch.manual_seed(seed)
34
+ torch.cuda.set_device(device)
35
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
36
+ else:
37
+ device = 'cuda'
38
+ rank = 0
39
+
40
+ # Setup a feature folder:
41
+ if args.debug or rank == 0:
42
+ os.makedirs(args.code_path, exist_ok=True)
43
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_codes'), exist_ok=True)
44
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_labels'), exist_ok=True)
45
+
46
+ # create and load model
47
+ vq_model = VQ_models[args.vq_model](
48
+ codebook_size=args.codebook_size,
49
+ codebook_embed_dim=args.codebook_embed_dim)
50
+ vq_model.to(device)
51
+ vq_model.eval()
52
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
53
+ vq_model.load_state_dict(checkpoint["model"])
54
+ del checkpoint
55
+
56
+ # Setup data:
57
+ if args.ten_crop:
58
+ crop_size = int(args.image_size * args.crop_range)
59
+ transform = transforms.Compose([
60
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
61
+ transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
62
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
63
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
64
+ ])
65
+ else:
66
+ crop_size = args.image_size
67
+ transform = transforms.Compose([
68
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
69
+ transforms.ToTensor(),
70
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
71
+ ])
72
+ dataset = build_dataset(args, transform=transform)
73
+ if not args.debug:
74
+ sampler = DistributedSampler(
75
+ dataset,
76
+ num_replicas=dist.get_world_size(),
77
+ rank=rank,
78
+ shuffle=False,
79
+ seed=args.global_seed
80
+ )
81
+ else:
82
+ sampler = None
83
+ loader = DataLoader(
84
+ dataset,
85
+ batch_size=1, # important!
86
+ shuffle=False,
87
+ sampler=sampler,
88
+ num_workers=args.num_workers,
89
+ pin_memory=True,
90
+ drop_last=False
91
+ )
92
+ from tqdm import tqdm
93
+ total = 0
94
+ for x, y in tqdm(loader):
95
+ x = x.to(device)
96
+ if args.ten_crop:
97
+ x_all = x.flatten(0, 1)
98
+ num_aug = 10
99
+ else:
100
+ x_flip = torch.flip(x, dims=[-1])
101
+ x_all = torch.cat([x, x_flip])
102
+ num_aug = 2
103
+ y = y.to(device)
104
+ with torch.no_grad():
105
+ _, _, [_, _, indices] = vq_model.encode(x_all)
106
+ codes = indices.reshape(x.shape[0], num_aug, -1)
107
+
108
+ x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
109
+ train_steps = rank + total
110
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_codes/{train_steps}.npy', x)
111
+
112
+ y = y.detach().cpu().numpy() # (1,)
113
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_labels/{train_steps}.npy', y)
114
+ if not args.debug:
115
+ total += dist.get_world_size()
116
+ else:
117
+ total += 1
118
+ #print(total)
119
+
120
+ dist.destroy_process_group()
121
+
122
+
123
+ if __name__ == "__main__":
124
+ parser = argparse.ArgumentParser()
125
+ parser.add_argument("--data-path", type=str, required=True)
126
+ parser.add_argument("--code-path", type=str, required=True)
127
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
128
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
129
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
130
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
131
+ parser.add_argument("--dataset", type=str, default='imagenet')
132
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
133
+ parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
134
+ parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
135
+ parser.add_argument("--global-seed", type=int, default=0)
136
+ parser.add_argument("--num-workers", type=int, default=24)
137
+ parser.add_argument("--debug", action='store_true')
138
+ args = parser.parse_args()
139
+ main(args)
autoregressive/train/extract_codes_t2i.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ import torch.distributed as dist
7
+ from torch.utils.data import Dataset, DataLoader
8
+ from torch.utils.data.distributed import DistributedSampler
9
+ from torchvision import transforms
10
+ import numpy as np
11
+ from PIL import Image
12
+ import glob
13
+ import argparse
14
+ import os
15
+ import json
16
+
17
+ from utils.distributed import init_distributed_mode
18
+ from dataset.augmentation import center_crop_arr
19
+ from tokenizer.tokenizer_image.vq_model import VQ_models
20
+
21
+
22
+ #################################################################################
23
+ # Training Helper Functions #
24
+ #################################################################################
25
+ class CustomDataset(Dataset):
26
+ def __init__(self, lst_dir, start, end, transform):
27
+ img_path_list = []
28
+ for lst_name in sorted(os.listdir(lst_dir))[start: end+1]:
29
+ if not lst_name.endswith('.jsonl'):
30
+ continue
31
+ file_path = os.path.join(lst_dir, lst_name)
32
+ with open(file_path, 'r') as file:
33
+ for line_idx, line in enumerate(file):
34
+ data = json.loads(line)
35
+ img_path = data['image_path']
36
+ code_dir = file_path.split('/')[-1].split('.')[0]
37
+ img_path_list.append((img_path, code_dir, line_idx))
38
+ self.img_path_list = img_path_list
39
+ self.transform = transform
40
+
41
+ def __len__(self):
42
+ return len(self.img_path_list)
43
+
44
+ def __getitem__(self, index):
45
+ img_path, code_dir, code_name = self.img_path_list[index]
46
+ img = Image.open(img_path).convert("RGB")
47
+ if self.transform is not None:
48
+ img = self.transform(img)
49
+ return img, code_dir, code_name
50
+
51
+
52
+
53
+ #################################################################################
54
+ # Training Loop #
55
+ #################################################################################
56
+ def main(args):
57
+ """
58
+ Trains a new DiT model.
59
+ """
60
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
61
+
62
+ # Setup DDP:
63
+ # dist.init_process_group("nccl")
64
+ init_distributed_mode(args)
65
+ rank = dist.get_rank()
66
+ device = rank % torch.cuda.device_count()
67
+ seed = args.global_seed * dist.get_world_size() + rank
68
+ torch.manual_seed(seed)
69
+ torch.cuda.set_device(device)
70
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
71
+
72
+ # Setup a feature folder:
73
+ if rank == 0:
74
+ os.makedirs(args.code_path, exist_ok=True)
75
+
76
+
77
+ # create and load model
78
+ vq_model = VQ_models[args.vq_model](
79
+ codebook_size=args.codebook_size,
80
+ codebook_embed_dim=args.codebook_embed_dim)
81
+ vq_model.to(device)
82
+ vq_model.eval()
83
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
84
+ vq_model.load_state_dict(checkpoint["model"])
85
+ del checkpoint
86
+
87
+
88
+ # Setup data:
89
+ transform = transforms.Compose([
90
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
91
+ transforms.ToTensor(),
92
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
93
+ ])
94
+ print(f"Dataset is preparing...")
95
+ dataset = CustomDataset(args.data_path, args.data_start, args.data_end, transform=transform)
96
+ sampler = DistributedSampler(
97
+ dataset,
98
+ num_replicas=dist.get_world_size(),
99
+ rank=rank,
100
+ shuffle=False,
101
+ seed=args.global_seed
102
+ )
103
+ loader = DataLoader(
104
+ dataset,
105
+ batch_size=1, # important!
106
+ shuffle=False,
107
+ sampler=sampler,
108
+ num_workers=args.num_workers,
109
+ pin_memory=True,
110
+ drop_last=False
111
+ )
112
+ print(f"Dataset contains {len(dataset):,} images")
113
+
114
+ # total = 0
115
+ for img, code_dir, code_name in loader:
116
+ img = img.to(device)
117
+ with torch.no_grad():
118
+ _, _, [_, _, indices] = vq_model.encode(img)
119
+ codes = indices.reshape(img.shape[0], -1)
120
+ x = codes.detach().cpu().numpy() # (1, args.image_size//16 * args.image_size//16)
121
+ os.makedirs(os.path.join(args.code_path, code_dir[0]), exist_ok=True)
122
+ np.save(os.path.join(args.code_path, code_dir[0], '{}.npy'.format(code_name.item())), x)
123
+
124
+ # total += dist.get_world_size()
125
+ print(code_name.item())
126
+
127
+ dist.destroy_process_group()
128
+
129
+
130
+ if __name__ == "__main__":
131
+ parser = argparse.ArgumentParser()
132
+ parser.add_argument("--data-path", type=str, required=True)
133
+ parser.add_argument("--code-path", type=str, required=True)
134
+ parser.add_argument("--data-start", type=int, required=True)
135
+ parser.add_argument("--data-end", type=int, required=True)
136
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
137
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
138
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
139
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
140
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=512)
141
+ parser.add_argument("--global-seed", type=int, default=0)
142
+ parser.add_argument("--num-workers", type=int, default=24)
143
+ args = parser.parse_args()
144
+ main(args)
autoregressive/train/extract_file_ade.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+ import os
4
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
5
+ import torch
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ import torch.distributed as dist
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from torchvision import transforms
12
+ import numpy as np
13
+ import argparse
14
+ import os
15
+ import sys
16
+ current_directory = os.getcwd()
17
+ sys.path.append(current_directory)
18
+ from utils.distributed import init_distributed_mode
19
+ from dataset.augmentation import center_crop_arr
20
+ from dataset.build import build_dataset
21
+ from tokenizer.tokenizer_image.vq_model import VQ_models
22
+ from condition.hed import HEDdetector, ControlNetHED_Apache2
23
+ import cv2
24
+ from torch.nn.parallel import DataParallel
25
+ from einops import rearrange
26
+ from datasets import load_dataset
27
+ from torchvision import transforms
28
+ from PIL import Image
29
+ from language.t5 import T5Embedder
30
+ #################################################################################
31
+ # Training Loop #
32
+ #################################################################################
33
+ resolution = (512, 512)
34
+ image_transforms = transforms.Compose(
35
+ [
36
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
37
+ transforms.ToTensor(),
38
+ transforms.Normalize([0.5], [0.5]),
39
+ ]
40
+ )
41
+ conditioning_image_transforms = transforms.Compose(
42
+ [
43
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
44
+ transforms.ToTensor(),
45
+ ]
46
+ )
47
+ label_image_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.NEAREST, antialias=True),
50
+ ]
51
+ )
52
+
53
+ def collate_fn(examples):
54
+
55
+ pil_images = [example['image'].convert("RGB") for example in examples]
56
+ images = [image_transforms(image) for image in pil_images]
57
+ images = torch.stack(images)
58
+
59
+ conditioning_images = [example['control_seg'].convert("RGB") for example in examples]
60
+ conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
61
+ conditioning_images = torch.stack(conditioning_images)
62
+
63
+ captions = [example['prompt'] for example in examples]
64
+
65
+ dtype = torch.long
66
+ # labels = [torch.from_numpy(np.array(example['panoptic_seg_map'])).unsqueeze(0) for example in examples] # seg_map panoptic_seg_map
67
+ # labels = [label_image_transforms(label) for label in labels]
68
+ # labels = torch.stack(labels)
69
+ labels = [example['seg_map'] for example in examples]
70
+
71
+
72
+ return {
73
+ "images": images, # -1~1
74
+ "conditioning_images": conditioning_images, # 0~1
75
+ "captions": captions,
76
+ "labels": labels
77
+ }
78
+
79
+ def main(args):
80
+
81
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
82
+ # Setup DDP:
83
+ if not args.debug:
84
+ init_distributed_mode(args)
85
+ rank = dist.get_rank()
86
+ device = rank % torch.cuda.device_count()
87
+ seed = args.global_seed * dist.get_world_size() + rank
88
+ torch.manual_seed(seed)
89
+ torch.cuda.set_device(device)
90
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
91
+ else:
92
+ device = 'cuda'
93
+ rank = 0
94
+
95
+ # Setup a feature folder:
96
+ if args.debug or rank == 0:
97
+ os.makedirs(args.code_path, exist_ok=True)
98
+ os.makedirs(os.path.join(args.code_path, f'code'), exist_ok=True)
99
+ os.makedirs(os.path.join(args.code_path, f'image'), exist_ok=True)
100
+ os.makedirs(os.path.join(args.code_path, f'control'), exist_ok=True)
101
+ os.makedirs(os.path.join(args.code_path, f'caption_emb'), exist_ok=True)
102
+ if args.split == 'validation':
103
+ os.makedirs(os.path.join(args.code_path, f'label'), exist_ok=True)
104
+ # create and load model
105
+ vq_model = VQ_models[args.vq_model](
106
+ codebook_size=args.codebook_size,
107
+ codebook_embed_dim=args.codebook_embed_dim)
108
+ vq_model.to(device)
109
+ vq_model.eval()
110
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
111
+ vq_model.load_state_dict(checkpoint["model"])
112
+ del checkpoint
113
+
114
+ t5_model = T5Embedder(
115
+ device=device,
116
+ local_cache=True,
117
+ cache_dir=args.t5_path,
118
+ dir_or_name=args.t5_model_type,
119
+ model_max_length=args.t5_feature_max_len,
120
+ )
121
+
122
+ # Setup data:
123
+ if args.ten_crop:
124
+ crop_size = int(args.image_size * args.crop_range)
125
+ transform = transforms.Compose([
126
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
127
+ transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
128
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
129
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
130
+ ])
131
+ else:
132
+ crop_size = args.image_size
133
+ transform = transforms.Compose([
134
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
135
+ transforms.ToTensor(),
136
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
137
+ ])
138
+ # dataset = build_dataset(args, transform=transform)
139
+ # dataset = load_dataset(
140
+ # args.data_path,
141
+ # cache_dir=None,
142
+ # )
143
+ dataset = load_dataset(args.data_path, split=args.split, # "train" or "validation"
144
+ cache_dir=None,)
145
+ print(dataset)
146
+
147
+ if not args.debug:
148
+ sampler = DistributedSampler(
149
+ # dataset[args.split],
150
+ dataset,
151
+ num_replicas=dist.get_world_size(),
152
+ rank=rank,
153
+ shuffle=False,
154
+ seed=args.global_seed
155
+ )
156
+ else:
157
+ sampler = None
158
+ loader = DataLoader(
159
+ # dataset[args.split],
160
+ dataset,
161
+ batch_size=1, # important!
162
+ shuffle=False,
163
+ sampler=sampler,
164
+ num_workers=args.num_workers,
165
+ collate_fn=collate_fn,
166
+ pin_memory=True,
167
+ drop_last=False
168
+ )
169
+
170
+ from tqdm import tqdm
171
+ total = 0
172
+ code_len = 1024
173
+ t5_feature_max_len = 120
174
+ t5_feature_dim = 2048
175
+ max_seq_length = t5_feature_max_len + code_len
176
+ for batch in tqdm(loader):
177
+
178
+ captions = batch['captions']
179
+
180
+ train_steps = rank + total
181
+ img_save_path = f'{args.code_path}/image/{train_steps}.png'
182
+ cond_save_path = f'{args.code_path}/control/{train_steps}.png'
183
+ label_save_path = f'{args.code_path}/label/{train_steps}.png'
184
+ Image.fromarray((255*(batch['images'][0].numpy().transpose(1,2,0)*0.5+0.5)).astype('uint8'), mode='RGB').save(img_save_path)
185
+ Image.fromarray((255*batch['conditioning_images'][0].numpy().transpose(1,2,0)).astype('uint8'), mode='RGB').save(cond_save_path)
186
+
187
+ label = Image.fromarray(np.array(batch['labels'][0]).astype('uint8'))
188
+ label.resize((512,512), Image.Resampling.NEAREST).save(label_save_path)
189
+ with torch.no_grad():
190
+ _, _, [_, _, indices] = vq_model.encode(batch['images'].to(device))
191
+
192
+ caption_emb, emb_mask = t5_model.get_text_embeddings(captions)
193
+ valid_num = int(emb_mask.sum().item())
194
+ caption_emb = caption_emb[:, :valid_num]
195
+
196
+ codes = indices.reshape(1, 1, -1)
197
+ x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
198
+ np.save(f'{args.code_path}/code/{train_steps}.npy', x)
199
+
200
+ caption_emb = caption_emb.to(torch.float32).detach().cpu().numpy()
201
+ caption_dict = {}
202
+ caption_dict['prompt'] = captions
203
+ caption_dict['caption_emb'] = caption_emb
204
+ np.savez(f'{args.code_path}/caption_emb/{train_steps}.npz', **caption_dict)
205
+
206
+ # 👇 新增:保存 prompt 为 .txt 文件,方便外部评估
207
+ with open(f'{args.code_path}/prompt/{train_steps}.txt', 'w', encoding='utf-8') as f:
208
+ f.write(captions[0]) # 假设 batch_size = 1
209
+
210
+ if not args.debug:
211
+ total += dist.get_world_size()
212
+ else:
213
+ total += 1
214
+
215
+ dist.destroy_process_group()
216
+
217
+
218
+ if __name__ == "__main__":
219
+ parser = argparse.ArgumentParser()
220
+ parser.add_argument("--code-path", type=str, required=True)
221
+ parser.add_argument("--data-path", type=str, required=True)
222
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
223
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
224
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
225
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
226
+ parser.add_argument("--dataset", type=str, default='imagenet')
227
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
228
+ parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
229
+ parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
230
+ parser.add_argument("--global-seed", type=int, default=0)
231
+ parser.add_argument("--num-workers", type=int, default=24)
232
+ parser.add_argument("--debug", action='store_true')
233
+ parser.add_argument("--min-threshold", type=int, default=200)
234
+ parser.add_argument("--max-threshold", type=int, default=400)
235
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
236
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
237
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
238
+ parser.add_argument("--split", type=str, default='train',help="train or validation")
239
+ args = parser.parse_args()
240
+ main(args)
241
+
242
+
243
+
244
+ # # Modified from:
245
+ # # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
246
+ # import os
247
+ # # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
248
+ # import torch
249
+ # torch.backends.cuda.matmul.allow_tf32 = True
250
+ # torch.backends.cudnn.allow_tf32 = True
251
+ # import torch.distributed as dist
252
+ # from torch.utils.data import DataLoader
253
+ # from torch.utils.data.distributed import DistributedSampler
254
+ # from torchvision import transforms
255
+ # import numpy as np
256
+ # import argparse
257
+ # import os
258
+ # import sys
259
+ # current_directory = os.getcwd()
260
+ # sys.path.append(current_directory)
261
+ # from utils.distributed import init_distributed_mode
262
+ # from dataset.augmentation import center_crop_arr
263
+ # from dataset.build import build_dataset
264
+ # from tokenizer.tokenizer_image.vq_model import VQ_models
265
+ # # from condition.hed import HEDdetector, ControlNetHED_Apache2
266
+ # import cv2
267
+ # from torch.nn.parallel import DataParallel
268
+ # from einops import rearrange
269
+ # from datasets import load_dataset
270
+ # from torchvision import transforms
271
+ # from PIL import Image
272
+ # from language.t5 import T5Embedder
273
+ # #################################################################################
274
+ # # Training Loop #
275
+ # #################################################################################
276
+ # resolution = (512, 512)
277
+ # image_transforms = transforms.Compose(
278
+ # [
279
+ # transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
280
+ # transforms.ToTensor(),
281
+ # transforms.Normalize([0.5], [0.5]),
282
+ # ]
283
+ # )
284
+ # conditioning_image_transforms = transforms.Compose(
285
+ # [
286
+ # transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
287
+ # transforms.ToTensor(),
288
+ # ]
289
+ # )
290
+ # label_image_transforms = transforms.Compose(
291
+ # [
292
+ # transforms.Resize(resolution, interpolation=transforms.InterpolationMode.NEAREST, antialias=True),
293
+ # ]
294
+ # )
295
+ # def collate_fn(examples):
296
+ # print("Examples:", examples) # 打印原始数据结构
297
+ # print("Available keys:", examples[0].keys()) # 打印数据集中包含的键
298
+ # exit() # 直接终止,查看输出
299
+
300
+ # def collate_fn(examples):
301
+
302
+ # pil_images = [example['image'].convert("RGB") for example in examples]
303
+ # images = [image_transforms(image) for image in pil_images]
304
+ # images = torch.stack(images)
305
+
306
+ # conditioning_images = [example['control_seg'].convert("RGB") for example in examples]
307
+ # conditioning_images = [example["control_seg"][0].convert("RGB") for example in examples]
308
+ # conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
309
+ # conditioning_images = torch.stack(conditioning_images)
310
+
311
+ # # captions = [example['prompt'] for example in examples]
312
+
313
+ # # dtype = torch.long
314
+ # # # labels = [torch.from_numpy(np.array(example['panoptic_seg_map'])).unsqueeze(0) for example in examples] # seg_map panoptic_seg_map
315
+ # # # labels = [label_image_transforms(label) for label in labels]
316
+ # # # labels = torch.stack(labels)
317
+ # # labels = [example['seg_map'] for example in examples]
318
+ # # **修改 captions 逻辑**
319
+ # captions = [" ".join(example["scene"]) if "scene" in example else "unknown" for example in examples]
320
+
321
+ # labels = [example["control_seg"][0] for example in examples]
322
+
323
+ # return {
324
+ # "images": images, # -1~1
325
+ # "conditioning_images": conditioning_images, # 0~1
326
+ # "captions": captions,
327
+ # "labels": labels
328
+ # }
329
+
330
+ # def main(args):
331
+
332
+ # assert torch.cuda.is_available(), "Training currently requires at least one GPU."
333
+ # # Setup DDP:
334
+ # # if not args.debug:
335
+ # # init_distributed_mode(args)
336
+ # # rank = dist.get_rank()
337
+ # # device = rank % torch.cuda.device_count()
338
+ # # seed = args.global_seed * dist.get_world_size() + rank
339
+ # # torch.manual_seed(seed)
340
+ # # torch.cuda.set_device(device)
341
+ # # print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
342
+ # # else:
343
+ # # device = 'cuda'
344
+ # # rank = 0
345
+ # device = torch.device("cuda")
346
+ # # Setup a feature folder:
347
+ # # if args.debug or rank == 0:
348
+ # # os.makedirs(args.code_path, exist_ok=True)
349
+ # # os.makedirs(os.path.join(args.code_path, f'code'), exist_ok=True)
350
+ # # os.makedirs(os.path.join(args.code_path, f'image'), exist_ok=True)
351
+ # # os.makedirs(os.path.join(args.code_path, f'control'), exist_ok=True)
352
+ # # os.makedirs(os.path.join(args.code_path, f'caption_emb'), exist_ok=True)
353
+ # # if args.split == 'validation':
354
+ # # os.makedirs(os.path.join(args.code_path, f'label'), exist_ok=True)
355
+ # os.makedirs(args.code_path, exist_ok=True)
356
+ # os.makedirs(os.path.join(args.code_path, f'code'), exist_ok=True)
357
+ # os.makedirs(os.path.join(args.code_path, f'image'), exist_ok=True)
358
+ # os.makedirs(os.path.join(args.code_path, f'control'), exist_ok=True)
359
+ # os.makedirs(os.path.join(args.code_path, f'caption_emb'), exist_ok=True)
360
+ # if args.split == 'validation':
361
+ # os.makedirs(os.path.join(args.code_path, f'label'), exist_ok=True)
362
+
363
+ # # create and load model加载 VQ 模型
364
+ # vq_model = VQ_models[args.vq_model](
365
+ # codebook_size=args.codebook_size,
366
+ # codebook_embed_dim=args.codebook_embed_dim)
367
+ # vq_model.to(device)
368
+ # vq_model.eval()
369
+ # # 加载模型权重
370
+ # checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
371
+ # vq_model.load_state_dict(checkpoint["model"])
372
+ # del checkpoint
373
+ # # 加载 T5 文本嵌入模型
374
+ # t5_model = T5Embedder(
375
+ # device=device,
376
+ # local_cache=True,
377
+ # cache_dir=args.t5_path,
378
+ # dir_or_name=args.t5_model_type,
379
+ # model_max_length=args.t5_feature_max_len,
380
+ # )
381
+ # # 以下注释
382
+ # # Setup data:
383
+ # if args.ten_crop:
384
+ # crop_size = int(args.image_size * args.crop_range)
385
+ # transform = transforms.Compose([
386
+ # transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
387
+ # transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
388
+ # transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
389
+ # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
390
+ # ])
391
+ # else:
392
+ # crop_size = args.image_size
393
+ # transform = transforms.Compose([
394
+ # transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
395
+ # transforms.ToTensor(),
396
+ # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
397
+ # ])
398
+ # # dataset = build_dataset(args, transform=transform)
399
+ # dataset = load_dataset(
400
+ # args.data_path,
401
+ # cache_dir=None,
402
+ # ) # 读取数据集
403
+ # if not args.debug:
404
+ # sampler = DistributedSampler(
405
+ # dataset[args.split],
406
+ # num_replicas=dist.get_world_size(),
407
+ # rank=rank,
408
+ # shuffle=False,
409
+ # seed=args.global_seed
410
+ # )
411
+ # else:
412
+ # sampler = None
413
+ # loader = DataLoader(
414
+ # dataset[args.split],
415
+ # batch_size=1, # important!
416
+ # shuffle=False,
417
+ # sampler=sampler,
418
+ # num_workers=args.num_workers,
419
+ # collate_fn=collate_fn,
420
+ # pin_memory=True,
421
+ # drop_last=False
422
+ # )
423
+ # # 以上注释并添加下边的数据预处理
424
+ # # # 设置数据预处理
425
+ # # crop_size = int(args.image_size * args.crop_range) if args.ten_crop else args.image_size
426
+ # # transform = transforms.Compose([
427
+ # # transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
428
+ # # transforms.ToTensor(),
429
+ # # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
430
+ # # ])
431
+
432
+ # # # 加载数据集
433
+ # # dataset = load_dataset(args.data_path, cache_dir=None)
434
+
435
+ # # # **修改 DataLoader**
436
+ # # loader = DataLoader(
437
+ # # dataset[args.split],
438
+ # # batch_size=1, # 重要!
439
+ # # shuffle=True, # **单 GPU 训练时使用 shuffle**
440
+ # # num_workers=args.num_workers,
441
+ # # collate_fn=collate_fn,
442
+ # # pin_memory=True,
443
+ # # drop_last=False
444
+ # # )
445
+
446
+ # from tqdm import tqdm
447
+ # total = 0
448
+ # code_len = 1024
449
+ # t5_feature_max_len = 120
450
+ # t5_feature_dim = 2048
451
+ # max_seq_length = t5_feature_max_len + code_len
452
+ # for batch in tqdm(loader):
453
+
454
+ # captions = batch['captions']
455
+
456
+ # # train_steps = rank + total
457
+ # img_save_path = f'{args.code_path}/image/{total}.png'
458
+ # cond_save_path = f'{args.code_path}/control/{total}.png'
459
+ # label_save_path = f'{args.code_path}/label/{total}.png'
460
+ # Image.fromarray((255*(batch['images'][0].numpy().transpose(1,2,0)*0.5+0.5)).astype('uint8'), mode='RGB').save(img_save_path)
461
+ # Image.fromarray((255*batch['conditioning_images'][0].numpy().transpose(1,2,0)).astype('uint8'), mode='RGB').save(cond_save_path)
462
+
463
+ # label = Image.fromarray(np.array(batch['labels'][0]).astype('uint8'))
464
+ # label.resize((512,512), Image.Resampling.NEAREST).save(label_save_path)
465
+ # with torch.no_grad():
466
+ # _, _, [_, _, indices] = vq_model.encode(batch['images'].to(device))
467
+
468
+ # caption_emb, emb_mask = t5_model.get_text_embeddings(captions)
469
+ # valid_num = int(emb_mask.sum().item())
470
+ # caption_emb = caption_emb[:, :valid_num]
471
+
472
+ # codes = indices.reshape(1, 1, -1)
473
+ # x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
474
+ # np.save(f'{args.code_path}/code/{total}.npy', x) # 保存将图像编码成的离散Token
475
+
476
+ # caption_emb = caption_emb.to(torch.float32).detach().cpu().numpy()
477
+ # caption_dict = {}
478
+ # caption_dict['prompt'] = captions
479
+ # caption_dict['caption_emb'] = caption_emb
480
+ # np.savez(f'{args.code_path}/caption_emb/{total}.npz', **caption_dict) # 保存文本的 T5 Embeddings
481
+ # # if not args.debug:
482
+ # # total += dist.get_world_size()
483
+ # # else:
484
+ # # total += 1
485
+ # total += 1
486
+ # dist.destroy_process_group()
487
+
488
+
489
+ # if __name__ == "__main__":
490
+ # parser = argparse.ArgumentParser()
491
+ # parser.add_argument("--code-path", type=str, required=True)
492
+ # parser.add_argument("--data-path", type=str, required=True)
493
+ # parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
494
+ # parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
495
+ # parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
496
+ # parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
497
+ # parser.add_argument("--dataset", type=str, default='imagenet')
498
+ # parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
499
+ # parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
500
+ # parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
501
+ # parser.add_argument("--global-seed", type=int, default=0)
502
+ # parser.add_argument("--num-workers", type=int, default=24)
503
+ # parser.add_argument("--debug", action='store_true')
504
+ # parser.add_argument("--min-threshold", type=int, default=200)
505
+ # parser.add_argument("--max-threshold", type=int, default=400)
506
+ # parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
507
+ # parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
508
+ # parser.add_argument("--t5-feature-max-len", type=int, default=120)
509
+ # parser.add_argument("--split", type=str, default='train')
510
+ # args = parser.parse_args()
511
+ # main(args)
autoregressive/train/extract_file_cocostuff.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+ import os
4
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
5
+ import torch
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ import torch.distributed as dist
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from torchvision import transforms
12
+ import numpy as np
13
+ import argparse
14
+ import os
15
+ import sys
16
+ current_directory = os.getcwd()
17
+ sys.path.append(current_directory)
18
+ from utils.distributed import init_distributed_mode
19
+ from dataset.augmentation import center_crop_arr
20
+ from dataset.build import build_dataset
21
+ from tokenizer.tokenizer_image.vq_model import VQ_models
22
+ from condition.hed import HEDdetector, ControlNetHED_Apache2
23
+ import cv2
24
+ from torch.nn.parallel import DataParallel
25
+ from einops import rearrange
26
+ from datasets import load_dataset
27
+ from torchvision import transforms
28
+ from PIL import Image
29
+ from language.t5 import T5Embedder
30
+ #################################################################################
31
+ # Training Loop #
32
+ #################################################################################
33
+ resolution = (512, 512)
34
+ image_transforms = transforms.Compose(
35
+ [
36
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
37
+ transforms.ToTensor(),
38
+ transforms.Normalize([0.5], [0.5]),
39
+ ]
40
+ )
41
+ conditioning_image_transforms = transforms.Compose(
42
+ [
43
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
44
+ transforms.ToTensor(),
45
+ ]
46
+ )
47
+ label_image_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.NEAREST, antialias=True),
50
+ ]
51
+ )
52
+
53
+ def collate_fn(examples):
54
+
55
+ pil_images = [example['image'].convert("RGB") for example in examples]
56
+ images = [image_transforms(image) for image in pil_images]
57
+ images = torch.stack(images)
58
+
59
+ conditioning_images = [example['control_seg'].convert("RGB") for example in examples]
60
+ conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
61
+ conditioning_images = torch.stack(conditioning_images)
62
+
63
+ captions = [example['prompt'] for example in examples]
64
+
65
+ dtype = torch.long
66
+ # labels = [torch.from_numpy(np.array(example['panoptic_seg_map'])).unsqueeze(0) for example in examples] # seg_map panoptic_seg_map
67
+ # labels = [label_image_transforms(label) for label in labels]
68
+ # labels = torch.stack(labels)
69
+ labels = [example['panoptic_seg_map'] for example in examples]
70
+
71
+
72
+ return {
73
+ "images": images, # -1~1
74
+ "conditioning_images": conditioning_images, # 0~1
75
+ "captions": captions,
76
+ "labels": labels
77
+ }
78
+
79
+ def main(args):
80
+
81
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
82
+ # Setup DDP:
83
+ if not args.debug:
84
+ init_distributed_mode(args)
85
+ rank = dist.get_rank()
86
+ device = rank % torch.cuda.device_count()
87
+ seed = args.global_seed * dist.get_world_size() + rank
88
+ torch.manual_seed(seed)
89
+ torch.cuda.set_device(device)
90
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
91
+ else:
92
+ device = 'cuda'
93
+ rank = 0
94
+
95
+ # Setup a feature folder:
96
+ if args.debug or rank == 0:
97
+ os.makedirs(args.code_path, exist_ok=True)
98
+ os.makedirs(os.path.join(args.code_path, f'code'), exist_ok=True)
99
+ os.makedirs(os.path.join(args.code_path, f'image'), exist_ok=True)
100
+ os.makedirs(os.path.join(args.code_path, f'control'), exist_ok=True)
101
+ os.makedirs(os.path.join(args.code_path, f'caption_emb'), exist_ok=True)
102
+ if args.split == 'validation':
103
+ os.makedirs(os.path.join(args.code_path, f'label'), exist_ok=True)
104
+ # create and load model
105
+ vq_model = VQ_models[args.vq_model](
106
+ codebook_size=args.codebook_size,
107
+ codebook_embed_dim=args.codebook_embed_dim)
108
+ vq_model.to(device)
109
+ vq_model.eval()
110
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
111
+ vq_model.load_state_dict(checkpoint["model"])
112
+ del checkpoint
113
+
114
+ t5_model = T5Embedder(
115
+ device=device,
116
+ local_cache=True,
117
+ cache_dir=args.t5_path,
118
+ dir_or_name=args.t5_model_type,
119
+ model_max_length=args.t5_feature_max_len,
120
+ )
121
+
122
+ # Setup data:
123
+ if args.ten_crop:
124
+ crop_size = int(args.image_size * args.crop_range)
125
+ transform = transforms.Compose([
126
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
127
+ transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
128
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
129
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
130
+ ])
131
+ else:
132
+ crop_size = args.image_size
133
+ transform = transforms.Compose([
134
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
135
+ transforms.ToTensor(),
136
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
137
+ ])
138
+ dataset = load_dataset(
139
+ args.data_path,
140
+ cache_dir=None,
141
+ )
142
+ if not args.debug:
143
+ sampler = DistributedSampler(
144
+ dataset[args.split],
145
+ num_replicas=dist.get_world_size(),
146
+ rank=rank,
147
+ shuffle=False,
148
+ seed=args.global_seed
149
+ )
150
+ else:
151
+ sampler = None
152
+ loader = DataLoader(
153
+ dataset[args.split],
154
+ batch_size=1, # important!
155
+ shuffle=False,
156
+ sampler=sampler,
157
+ num_workers=args.num_workers,
158
+ collate_fn=collate_fn,
159
+ pin_memory=True,
160
+ drop_last=False
161
+ )
162
+
163
+ from tqdm import tqdm
164
+ total = 0
165
+ code_len = 1024
166
+ t5_feature_max_len = 120
167
+ t5_feature_dim = 2048
168
+ max_seq_length = t5_feature_max_len + code_len
169
+ for batch in tqdm(loader):
170
+
171
+ captions = batch['captions']
172
+
173
+ train_steps = rank + total
174
+ img_save_path = f'{args.code_path}/image/{train_steps}.png'
175
+ cond_save_path = f'{args.code_path}/control/{train_steps}.png'
176
+ label_save_path = f'{args.code_path}/label/{train_steps}.png'
177
+ Image.fromarray((255*(batch['images'][0].numpy().transpose(1,2,0)*0.5+0.5)).astype('uint8'), mode='RGB').save(img_save_path)
178
+ Image.fromarray((255*batch['conditioning_images'][0].numpy().transpose(1,2,0)).astype('uint8'), mode='RGB').save(cond_save_path)
179
+
180
+ label = Image.fromarray(np.array(batch['labels'][0]).astype('uint8'))
181
+ label.resize((512,512), Image.Resampling.NEAREST).save(label_save_path)
182
+ with torch.no_grad():
183
+ _, _, [_, _, indices] = vq_model.encode(batch['images'].to(device))
184
+
185
+ caption_emb, emb_mask = t5_model.get_text_embeddings(captions)
186
+ valid_num = int(emb_mask.sum().item())
187
+ caption_emb = caption_emb[:, :valid_num]
188
+
189
+ codes = indices.reshape(1, 1, -1)
190
+ x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
191
+ np.save(f'{args.code_path}/code/{train_steps}.npy', x)
192
+
193
+ caption_emb = caption_emb.to(torch.float32).detach().cpu().numpy()
194
+ caption_dict = {}
195
+ caption_dict['prompt'] = captions
196
+ caption_dict['caption_emb'] = caption_emb
197
+ np.savez(f'{args.code_path}/caption_emb/{train_steps}.npz', **caption_dict)
198
+ if not args.debug:
199
+ total += dist.get_world_size()
200
+ else:
201
+ total += 1
202
+
203
+ dist.destroy_process_group()
204
+
205
+
206
+ if __name__ == "__main__":
207
+ parser = argparse.ArgumentParser()
208
+ parser.add_argument("--code-path", type=str, required=True)
209
+ parser.add_argument("--data-path", type=str, required=True)
210
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
211
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
212
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
213
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
214
+ parser.add_argument("--dataset", type=str, default='imagenet')
215
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
216
+ parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
217
+ parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
218
+ parser.add_argument("--global-seed", type=int, default=0)
219
+ parser.add_argument("--num-workers", type=int, default=24)
220
+ parser.add_argument("--debug", action='store_true')
221
+ parser.add_argument("--min-threshold", type=int, default=200)
222
+ parser.add_argument("--max-threshold", type=int, default=400)
223
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
224
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
225
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
226
+ parser.add_argument("--split", type=str, default='train')
227
+ args = parser.parse_args()
228
+ main(args)
autoregressive/train/extract_file_imagenet.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+ import os
4
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
5
+ import torch
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ import torch.distributed as dist
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from torchvision import transforms
12
+ import numpy as np
13
+ import argparse
14
+ import os
15
+ import sys
16
+ current_directory = os.getcwd()
17
+ sys.path.append(current_directory)
18
+ from utils.distributed import init_distributed_mode
19
+ from dataset.augmentation import center_crop_arr
20
+ from dataset.build import build_dataset
21
+ from tokenizer.tokenizer_image.vq_model import VQ_models
22
+ from condition.canny import CannyDetector
23
+ import cv2
24
+ from condition.midas.depth import MidasDetector
25
+
26
+ #################################################################################
27
+ # Training Loop #
28
+ #################################################################################
29
+ def main(args):
30
+
31
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
32
+ # Setup DDP:
33
+ if not args.debug:
34
+ init_distributed_mode(args)
35
+ rank = dist.get_rank()
36
+ device = rank % torch.cuda.device_count()
37
+ seed = args.global_seed * dist.get_world_size() + rank
38
+ torch.manual_seed(seed)
39
+ torch.cuda.set_device(device)
40
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
41
+ else:
42
+ device = 'cuda'
43
+ rank = 0
44
+
45
+ # Setup a feature folder:
46
+ if args.debug or rank == 0:
47
+ os.makedirs(args.code_path, exist_ok=True)
48
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_codes'), exist_ok=True)
49
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_labels'), exist_ok=True)
50
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_canny_imagesnpy'), exist_ok=True)
51
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_canny_images'), exist_ok=True)
52
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_depth_imagesnpy'), exist_ok=True)
53
+ os.makedirs(os.path.join(args.code_path, f'{args.dataset}{args.image_size}_depth_images'), exist_ok=True)
54
+ # create and load model
55
+ vq_model = VQ_models[args.vq_model](
56
+ codebook_size=args.codebook_size,
57
+ codebook_embed_dim=args.codebook_embed_dim)
58
+ vq_model.to(device)
59
+ vq_model.eval()
60
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
61
+ vq_model.load_state_dict(checkpoint["model"])
62
+ del checkpoint
63
+
64
+ # Setup data:
65
+ if args.ten_crop:
66
+ crop_size = int(args.image_size * args.crop_range)
67
+ transform = transforms.Compose([
68
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
69
+ transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
70
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
71
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
72
+ ])
73
+ else:
74
+ crop_size = args.image_size
75
+ transform = transforms.Compose([
76
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
77
+ transforms.ToTensor(),
78
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
79
+ ])
80
+ dataset = build_dataset(args, transform=transform)
81
+ if not args.debug:
82
+ sampler = DistributedSampler(
83
+ dataset,
84
+ num_replicas=dist.get_world_size(),
85
+ rank=rank,
86
+ shuffle=False,
87
+ seed=args.global_seed
88
+ )
89
+ else:
90
+ sampler = None
91
+ loader = DataLoader(
92
+ dataset,
93
+ batch_size=1, # important!
94
+ shuffle=False,
95
+ sampler=sampler,
96
+ num_workers=args.num_workers,
97
+ pin_memory=True,
98
+ drop_last=False
99
+ )
100
+ apply_canny = CannyDetector()
101
+ depth_model = MidasDetector(device=device)
102
+
103
+ from tqdm import tqdm
104
+ total = 0
105
+ for x, y in tqdm(loader):
106
+ x = x.to(device)
107
+ batch_size_per_gpu = x.shape[0]
108
+ if args.ten_crop:
109
+ x_all = x.flatten(0, 1)
110
+ num_aug = 10
111
+ else:
112
+ x_flip = torch.flip(x, dims=[-1])
113
+ x_all = torch.cat([x, x_flip])
114
+ num_aug = 2
115
+ y = y.to(device)
116
+
117
+ canny = []
118
+ depths = []
119
+ for i in range(x_all.shape[0]):
120
+ canny.append(apply_canny((255*(x_all[i]*0.5 + 0.5)).cpu().numpy().transpose(1,2,0).astype(np.uint8),low_threshold=args.min_threshold, high_threshold=args.max_threshold)[None,None,...])
121
+ img = (255*(x_all[i]*0.5 + 0.5)).permute(1,2,0)
122
+ depth = depth_model(img)
123
+ depths.append(depth[None,None,...])
124
+ depths = np.concatenate(depths, axis=0)
125
+ cannys = np.concatenate(canny, axis=0)
126
+ train_steps = rank + total
127
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_canny_imagesnpy/{train_steps}.npy', cannys)
128
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_depth_imagesnpy/{train_steps}.npy', depths)
129
+ with torch.no_grad():
130
+ _, _, [_, _, indices] = vq_model.encode(x_all)
131
+
132
+ codes = indices.reshape(x.shape[0], num_aug, -1)
133
+ x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
134
+ y = y.detach().cpu().numpy() # (1,)
135
+
136
+ train_steps = rank + total
137
+
138
+ cv2.imwrite(f'{args.code_path}/{args.dataset}{args.image_size}_canny_images/{train_steps}.png', cannys[0,0])
139
+ cv2.imwrite(f'{args.code_path}/{args.dataset}{args.image_size}_depth_images/{train_steps}.png', depths[0,0])
140
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_codes/{train_steps}.npy', x)
141
+ np.save(f'{args.code_path}/{args.dataset}{args.image_size}_labels/{train_steps}.npy', y)
142
+ if not args.debug:
143
+ total += dist.get_world_size()
144
+ else:
145
+ total += 1
146
+
147
+ dist.destroy_process_group()
148
+
149
+
150
+ if __name__ == "__main__":
151
+ parser = argparse.ArgumentParser()
152
+ parser.add_argument("--data-path", type=str, required=True)
153
+ parser.add_argument("--code-path", type=str, required=True)
154
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
155
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
156
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
157
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
158
+ parser.add_argument("--dataset", type=str, default='imagenet')
159
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
160
+ parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
161
+ parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
162
+ parser.add_argument("--global-seed", type=int, default=0)
163
+ parser.add_argument("--num-workers", type=int, default=24)
164
+ parser.add_argument("--debug", action='store_true')
165
+ parser.add_argument("--min-threshold", type=int, default=100)
166
+ parser.add_argument("--max-threshold", type=int, default=200)
167
+ args = parser.parse_args()
168
+ main(args)
autoregressive/train/extract_file_multigen.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/extract_features.py
3
+
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
+ import os
7
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
8
+ import torch
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ import torch.distributed as dist
12
+ from torch.utils.data import DataLoader
13
+ from torch.utils.data.distributed import DistributedSampler
14
+ from torchvision import transforms
15
+ import numpy as np
16
+ import argparse
17
+ import os
18
+ import sys
19
+ current_directory = os.getcwd()
20
+ sys.path.append(current_directory)
21
+ from utils.distributed import init_distributed_mode
22
+ from dataset.augmentation import center_crop_arr
23
+ from dataset.build import build_dataset
24
+ from tokenizer.tokenizer_image.vq_model import VQ_models
25
+ from condition.hed import HEDdetector, ControlNetHED_Apache2
26
+ import cv2
27
+ from torch.nn.parallel import DataParallel
28
+ from einops import rearrange
29
+ from datasets import load_dataset
30
+ from torchvision import transforms
31
+ from PIL import Image
32
+ from language.t5 import T5Embedder
33
+ from condition.canny import CannyDetector
34
+ from condition.hed import HEDdetector
35
+ from condition.lineart import LineArt
36
+ #################################################################################
37
+ # Training Loop #
38
+ #################################################################################
39
+ resolution = (512, 512)
40
+ image_transforms = transforms.Compose(
41
+ [
42
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
43
+ transforms.ToTensor(),
44
+ transforms.Normalize([0.5], [0.5]),
45
+ ]
46
+ )
47
+ conditioning_image_transforms = transforms.Compose(
48
+ [
49
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.BICUBIC, antialias=True),
50
+ transforms.ToTensor(),
51
+ ]
52
+ )
53
+ label_image_transforms = transforms.Compose(
54
+ [
55
+ transforms.Resize(resolution, interpolation=transforms.InterpolationMode.NEAREST, antialias=True),
56
+ ]
57
+ )
58
+
59
+ def collate_fn(examples):
60
+
61
+ pil_images = [example['image'].convert("RGB") for example in examples]
62
+ images = [image_transforms(image) for image in pil_images]
63
+ images = torch.stack(images)
64
+
65
+ conditioning_images = [example['control_depth'].convert("RGB") for example in examples]
66
+ conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
67
+ conditioning_images = torch.stack(conditioning_images)
68
+ captions = [example['text'] for example in examples]
69
+
70
+
71
+
72
+ return {
73
+ "images": images, # -1~1
74
+ "conditioning_images": conditioning_images, # 0~1
75
+ "captions": captions,
76
+ # "labels": labels
77
+ }
78
+
79
+ def main(args):
80
+
81
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
82
+ # Setup DDP:
83
+ if not args.debug:
84
+ init_distributed_mode(args)
85
+ rank = dist.get_rank()
86
+ device = rank % torch.cuda.device_count()
87
+ seed = args.global_seed * dist.get_world_size() + rank
88
+ torch.manual_seed(seed)
89
+ torch.cuda.set_device(device)
90
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
91
+ else:
92
+ device = 'cuda'
93
+ rank = 0
94
+
95
+ # Setup a feature folder:
96
+ if args.debug or rank == 0:
97
+ os.makedirs(args.code_path, exist_ok=True)
98
+ os.makedirs(os.path.join(args.code_path, f'code'), exist_ok=True)
99
+ os.makedirs(os.path.join(args.code_path, f'image'), exist_ok=True)
100
+ os.makedirs(os.path.join(args.code_path, f'control_depth'), exist_ok=True)
101
+ os.makedirs(os.path.join(args.code_path, f'caption_emb'), exist_ok=True)
102
+
103
+ # create and load model
104
+ vq_model = VQ_models[args.vq_model](
105
+ codebook_size=args.codebook_size,
106
+ codebook_embed_dim=args.codebook_embed_dim)
107
+ vq_model.to(device)
108
+ vq_model.eval()
109
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
110
+ vq_model.load_state_dict(checkpoint["model"])
111
+ del checkpoint
112
+
113
+ t5_model = T5Embedder(
114
+ device=device,
115
+ local_cache=True,
116
+ cache_dir=args.t5_path,
117
+ dir_or_name=args.t5_model_type,
118
+ model_max_length=args.t5_feature_max_len,
119
+ )
120
+
121
+ # Setup data:
122
+ if args.ten_crop:
123
+ crop_size = int(args.image_size * args.crop_range)
124
+ transform = transforms.Compose([
125
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
126
+ transforms.TenCrop(args.image_size), # this is a tuple of PIL Images
127
+ transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), # returns a 4D tensor
128
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
129
+ ])
130
+ else:
131
+ crop_size = args.image_size
132
+ transform = transforms.Compose([
133
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, crop_size)),
134
+ transforms.ToTensor(),
135
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
136
+ ])
137
+ dataset = load_dataset(
138
+ args.data_path,
139
+ cache_dir=None,
140
+ )
141
+ try:
142
+ if not args.debug:
143
+ sampler = DistributedSampler(
144
+ dataset[args.split],
145
+ num_replicas=dist.get_world_size(),
146
+ rank=rank,
147
+ shuffle=False,
148
+ seed=args.global_seed
149
+ )
150
+ else:
151
+ sampler = None
152
+ loader = DataLoader(
153
+ dataset[args.split],
154
+ batch_size=1, # important!
155
+ shuffle=False,
156
+ sampler=sampler,
157
+ num_workers=args.num_workers,
158
+ collate_fn=collate_fn,
159
+ pin_memory=True,
160
+ drop_last=False
161
+ )
162
+ except Exception as e:
163
+ pass
164
+
165
+ from tqdm import tqdm
166
+ total = 0
167
+ code_len = 1024
168
+ t5_feature_max_len = 120
169
+ t5_feature_dim = 2048
170
+ max_seq_length = t5_feature_max_len + code_len
171
+ for batch in tqdm(loader):
172
+
173
+ captions = batch['captions']
174
+
175
+ train_steps = rank + total
176
+ img_save_path = f'{args.code_path}/image/{train_steps}.png'
177
+ depth_save_path = f'{args.code_path}/control_depth/{train_steps}.png'
178
+
179
+ Image.fromarray((255*(batch['images'][0].numpy().transpose(1,2,0)*0.5+0.5)).astype('uint8'), mode='RGB').save(img_save_path)
180
+ Image.fromarray((255*batch['conditioning_images'][0].numpy().transpose(1,2,0)).astype('uint8'), mode='RGB').save(depth_save_path)
181
+
182
+ with torch.no_grad():
183
+ _, _, [_, _, indices] = vq_model.encode(batch['images'].to(device))
184
+
185
+ caption_emb, emb_mask = t5_model.get_text_embeddings(captions)
186
+ valid_num = int(emb_mask.sum().item())
187
+ caption_emb = caption_emb[:, :valid_num]
188
+
189
+ codes = indices.reshape(1, 1, -1)
190
+ x = codes.detach().cpu().numpy() # (1, num_aug, args.image_size//16 * args.image_size//16)
191
+ np.save(f'{args.code_path}/code/{train_steps}.npy', x)
192
+
193
+ caption_emb = caption_emb.to(torch.float32).detach().cpu().numpy()
194
+ caption_dict = {}
195
+ caption_dict['prompt'] = captions
196
+ caption_dict['caption_emb'] = caption_emb
197
+ np.savez(f'{args.code_path}/caption_emb/{train_steps}.npz', **caption_dict)
198
+ if not args.debug:
199
+ total += dist.get_world_size()
200
+ else:
201
+ total += 1
202
+
203
+ dist.destroy_process_group()
204
+
205
+
206
+ if __name__ == "__main__":
207
+ parser = argparse.ArgumentParser()
208
+ parser.add_argument("--data-path", type=str, required=True)
209
+ parser.add_argument("--code-path", type=str, required=True)
210
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
211
+ parser.add_argument("--vq-ckpt", type=str, required=True, help="ckpt path for vq model")
212
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
213
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
214
+ parser.add_argument("--dataset", type=str, default='imagenet')
215
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
216
+ parser.add_argument("--ten-crop", action='store_true', help="whether using random crop")
217
+ parser.add_argument("--crop-range", type=float, default=1.1, help="expanding range of center crop")
218
+ parser.add_argument("--global-seed", type=int, default=0)
219
+ parser.add_argument("--num-workers", type=int, default=24)
220
+ parser.add_argument("--debug", action='store_true')
221
+ parser.add_argument("--min-threshold", type=int, default=200)
222
+ parser.add_argument("--max-threshold", type=int, default=400)
223
+ parser.add_argument("--t5-path", type=str, default='checkpoints/t5-ckpt')
224
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
225
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
226
+ parser.add_argument("--split", type=str, default='train')
227
+ args = parser.parse_args()
228
+ main(args)
autoregressive/train/train_c2i.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/train.py
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ torch.backends.cudnn.allow_tf32 = True
7
+ import torch.distributed as dist
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from glob import glob
12
+ from copy import deepcopy
13
+ import os
14
+ import time
15
+ import inspect
16
+ import argparse
17
+
18
+ from utils.logger import create_logger
19
+ from utils.distributed import init_distributed_mode
20
+ from utils.ema import update_ema, requires_grad
21
+ from dataset.build import build_dataset
22
+ from autoregressive.models.gpt import GPT_models
23
+
24
+
25
+ #################################################################################
26
+ # Training Helper Functions #
27
+ #################################################################################
28
+ def creat_optimizer(model, weight_decay, learning_rate, betas, logger):
29
+ # start with all of the candidate parameters
30
+ param_dict = {pn: p for pn, p in model.named_parameters()}
31
+ # filter out those that do not require grad
32
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
33
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
34
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
35
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
36
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
37
+ optim_groups = [
38
+ {'params': decay_params, 'weight_decay': weight_decay},
39
+ {'params': nodecay_params, 'weight_decay': 0.0}
40
+ ]
41
+ num_decay_params = sum(p.numel() for p in decay_params)
42
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
43
+ logger.info(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
44
+ logger.info(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
45
+ # Create AdamW optimizer and use the fused version if it is available
46
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
47
+ extra_args = dict(fused=True) if fused_available else dict()
48
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
49
+ logger.info(f"using fused AdamW: {fused_available}")
50
+ return optimizer
51
+
52
+
53
+
54
+ #################################################################################
55
+ # Training Loop #
56
+ #################################################################################
57
+ def main(args):
58
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
59
+
60
+ # Setup DDP:
61
+ init_distributed_mode(args)
62
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
63
+ rank = dist.get_rank()
64
+ device = rank % torch.cuda.device_count()
65
+ seed = args.global_seed * dist.get_world_size() + rank
66
+ torch.manual_seed(seed)
67
+ torch.cuda.set_device(device)
68
+
69
+ # Setup an experiment folder:
70
+ if rank == 0:
71
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
72
+ experiment_index = len(glob(f"{args.results_dir}/*"))
73
+ model_string_name = args.gpt_model.replace("/", "-") # e.g., GPT-XL/2 --> GPT-XL-2 (for naming folders)
74
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
75
+ checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
76
+ os.makedirs(checkpoint_dir, exist_ok=True)
77
+ logger = create_logger(experiment_dir)
78
+ logger.info(f"Experiment directory created at {experiment_dir}")
79
+
80
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
81
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
82
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
83
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
84
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
85
+
86
+ else:
87
+ logger = create_logger(None)
88
+
89
+ # training args
90
+ logger.info(f"{args}")
91
+
92
+ # training env
93
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
94
+
95
+
96
+ # Setup model
97
+ if args.drop_path_rate > 0.0:
98
+ dropout_p = 0.0
99
+ else:
100
+ dropout_p = args.dropout_p
101
+ latent_size = args.image_size // args.downsample_size
102
+ model = GPT_models[args.gpt_model](
103
+ vocab_size=args.vocab_size,
104
+ block_size=latent_size ** 2,
105
+ num_classes=args.num_classes,
106
+ cls_token_num=args.cls_token_num,
107
+ model_type=args.gpt_type,
108
+ resid_dropout_p=dropout_p,
109
+ ffn_dropout_p=dropout_p,
110
+ drop_path_rate=args.drop_path_rate,
111
+ token_dropout_p=args.token_dropout_p,
112
+ ).to(device)
113
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
114
+
115
+ if args.ema:
116
+ ema = deepcopy(model).to(device) # Create an EMA of the model for use after training
117
+ requires_grad(ema, False)
118
+ logger.info(f"EMA Parameters: {sum(p.numel() for p in ema.parameters()):,}")
119
+
120
+ # Setup optimizer
121
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
122
+
123
+ # Setup data:
124
+ dataset = build_dataset(args)
125
+ sampler = DistributedSampler(
126
+ dataset,
127
+ num_replicas=dist.get_world_size(),
128
+ rank=rank,
129
+ shuffle=True,
130
+ seed=args.global_seed
131
+ )
132
+ loader = DataLoader(
133
+ dataset,
134
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
135
+ shuffle=False,
136
+ sampler=sampler,
137
+ num_workers=args.num_workers,
138
+ pin_memory=True,
139
+ drop_last=True
140
+ )
141
+ flip_info = 'with' if dataset.flip else 'without'
142
+ aug_info = 10 if 'ten_crop' in dataset.feature_dir else 1
143
+ aug_info = 2 * aug_info if dataset.aug_feature_dir is not None else aug_info
144
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.code_path}) "
145
+ f"{flip_info} flip augmentation and {aug_info} crop augmentation")
146
+
147
+ # Prepare models for training:
148
+ if args.gpt_ckpt:
149
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
150
+ model.load_state_dict(checkpoint["model"])
151
+ if args.ema:
152
+ ema.load_state_dict(checkpoint["ema"] if "ema" in checkpoint else checkpoint["model"])
153
+ optimizer.load_state_dict(checkpoint["optimizer"])
154
+ train_steps = checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
155
+ start_epoch = int(train_steps / int(len(dataset) / args.global_batch_size))
156
+ train_steps = int(start_epoch * int(len(dataset) / args.global_batch_size))
157
+ del checkpoint
158
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
159
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
160
+ else:
161
+ train_steps = 0
162
+ start_epoch = 0
163
+ if args.ema:
164
+ update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
165
+
166
+ if not args.no_compile:
167
+ logger.info("compiling the model... (may take several minutes)")
168
+ model = torch.compile(model) # requires PyTorch 2.0
169
+
170
+ model = DDP(model.to(device), device_ids=[args.gpu])
171
+ model.train() # important! This enables embedding dropout for classifier-free guidance
172
+ if args.ema:
173
+ ema.eval() # EMA model should always be in eval mode
174
+
175
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
176
+ # initialize a GradScaler. If enabled=False scaler is a no-op
177
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
178
+ # Variables for monitoring/logging purposes:
179
+ log_steps = 0
180
+ running_loss = 0
181
+ start_time = time.time()
182
+
183
+ logger.info(f"Training for {args.epochs} epochs...")
184
+ for epoch in range(start_epoch, args.epochs):
185
+ sampler.set_epoch(epoch)
186
+ logger.info(f"Beginning epoch {epoch}...")
187
+ for x, y in loader:
188
+ x = x.to(device, non_blocking=True)
189
+ y = y.to(device, non_blocking=True)
190
+ z_indices = x.reshape(x.shape[0], -1)
191
+ c_indices = y.reshape(-1)
192
+ assert z_indices.shape[0] == c_indices.shape[0]
193
+ with torch.cuda.amp.autocast(dtype=ptdtype):
194
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices)
195
+ # backward pass, with gradient scaling if training in fp16
196
+ scaler.scale(loss).backward()
197
+ if args.max_grad_norm != 0.0:
198
+ scaler.unscale_(optimizer)
199
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
200
+ # step the optimizer and scaler if training in fp16
201
+ scaler.step(optimizer)
202
+ scaler.update()
203
+ # flush the gradients as soon as we can, no need for this memory anymore
204
+ optimizer.zero_grad(set_to_none=True)
205
+ if args.ema:
206
+ update_ema(ema, model.module._orig_mod if not args.no_compile else model.module)
207
+
208
+ # Log loss values:
209
+ running_loss += loss.item()
210
+ log_steps += 1
211
+ train_steps += 1
212
+ if train_steps % args.log_every == 0:
213
+ # Measure training speed:
214
+ torch.cuda.synchronize()
215
+ end_time = time.time()
216
+ steps_per_sec = log_steps / (end_time - start_time)
217
+ # Reduce loss history over all processes:
218
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
219
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
220
+ avg_loss = avg_loss.item() / dist.get_world_size()
221
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
222
+ # Reset monitoring variables:
223
+ running_loss = 0
224
+ log_steps = 0
225
+ start_time = time.time()
226
+
227
+ # Save checkpoint:
228
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
229
+ if rank == 0:
230
+ if not args.no_compile:
231
+ model_weight = model.module._orig_mod.state_dict()
232
+ else:
233
+ model_weight = model.module.state_dict()
234
+ checkpoint = {
235
+ "model": model_weight,
236
+ "optimizer": optimizer.state_dict(),
237
+ "steps": train_steps,
238
+ "args": args
239
+ }
240
+ if args.ema:
241
+ checkpoint["ema"] = ema.state_dict()
242
+ if not args.no_local_save:
243
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
244
+ torch.save(checkpoint, checkpoint_path)
245
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
246
+
247
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
248
+ torch.save(checkpoint, cloud_checkpoint_path)
249
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
250
+ dist.barrier()
251
+
252
+ model.eval() # important! This disables randomized embedding dropout
253
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
254
+
255
+ logger.info("Done!")
256
+ dist.destroy_process_group()
257
+
258
+
259
+
260
+ if __name__ == "__main__":
261
+ parser = argparse.ArgumentParser()
262
+ parser.add_argument("--code-path", type=str, required=True)
263
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
264
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
265
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
266
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
267
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
268
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
269
+ parser.add_argument("--ema", action='store_true', help="whether using ema training")
270
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
271
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
272
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
273
+ parser.add_argument("--drop-path-rate", type=float, default=0.0, help="using stochastic depth decay")
274
+ parser.add_argument("--no-compile", action='store_true')
275
+ parser.add_argument("--results-dir", type=str, default="results")
276
+ parser.add_argument("--dataset", type=str, default='imagenet_code')
277
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
278
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
279
+ parser.add_argument("--num-classes", type=int, default=1000)
280
+ parser.add_argument("--epochs", type=int, default=300)
281
+ parser.add_argument("--lr", type=float, default=1e-4)
282
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use")
283
+ parser.add_argument("--beta1", type=float, default=0.9, help="beta1 parameter for the Adam optimizer")
284
+ parser.add_argument("--beta2", type=float, default=0.95, help="beta2 parameter for the Adam optimizer")
285
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
286
+ parser.add_argument("--global-batch-size", type=int, default=256)
287
+ parser.add_argument("--global-seed", type=int, default=0)
288
+ parser.add_argument("--num-workers", type=int, default=24)
289
+ parser.add_argument("--log-every", type=int, default=100)
290
+ parser.add_argument("--ckpt-every", type=int, default=5000)
291
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
292
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
293
+ args = parser.parse_args()
294
+ main(args)
autoregressive/train/train_c2i_canny.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/train.py
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ torch.backends.cudnn.allow_tf32 = True
7
+ import torch.distributed as dist
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from glob import glob
12
+ from copy import deepcopy
13
+ import os
14
+ import time
15
+ import inspect
16
+ import argparse
17
+ import sys
18
+ current_directory = os.getcwd()
19
+ sys.path.append(current_directory)
20
+ from utils.logger import create_logger
21
+ from utils.distributed import init_distributed_mode
22
+ from utils.ema import update_ema, requires_grad
23
+ from dataset.build import build_dataset
24
+ from autoregressive.models.gpt import GPT_models
25
+ from tokenizer.tokenizer_image.vq_model import VQ_models
26
+
27
+
28
+ #################################################################################
29
+ # Training Helper Functions #
30
+ #################################################################################
31
+ def creat_optimizer(model, weight_decay, learning_rate, betas, logger):
32
+ # start with all of the candidate parameters
33
+ param_dict = {pn: p for pn, p in model.named_parameters()}
34
+ # filter out those that do not require grad
35
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
36
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
37
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
38
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
39
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
40
+ optim_groups = [
41
+ {'params': decay_params, 'weight_decay': weight_decay},
42
+ {'params': nodecay_params, 'weight_decay': 0.0}
43
+ ]
44
+ num_decay_params = sum(p.numel() for p in decay_params)
45
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
46
+ logger.info(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
47
+ logger.info(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
48
+ # Create AdamW optimizer and use the fused version if it is available
49
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
50
+ extra_args = dict(fused=True) if fused_available else dict()
51
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
52
+ logger.info(f"using fused AdamW: {fused_available}")
53
+ return optimizer
54
+
55
+
56
+ #################################################################################
57
+ # Training Loop #
58
+ #################################################################################
59
+ def main(args):
60
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
61
+
62
+ # Setup DDP:
63
+ init_distributed_mode(args)
64
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
65
+ rank = dist.get_rank()
66
+ device = rank % torch.cuda.device_count()
67
+ seed = args.global_seed * dist.get_world_size() + rank
68
+ torch.manual_seed(seed)
69
+ torch.cuda.set_device(device)
70
+
71
+ # Setup an experiment folder:
72
+ if rank == 0:
73
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
74
+ experiment_index = len(glob(f"{args.results_dir}/*"))
75
+ model_string_name = args.gpt_model.replace("/", "-") # e.g., GPT-XL/2 --> GPT-XL-2 (for naming folders)
76
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
77
+ checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
78
+ os.makedirs(checkpoint_dir, exist_ok=True)
79
+ logger = create_logger(experiment_dir)
80
+ logger.info(f"Experiment directory created at {experiment_dir}")
81
+
82
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
83
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
84
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
85
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
86
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
87
+
88
+ else:
89
+ logger = create_logger(None)
90
+
91
+ # training args
92
+ logger.info(f"{args}")
93
+
94
+ # training env
95
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
96
+
97
+ # Setup model
98
+ if args.drop_path_rate > 0.0:
99
+ dropout_p = 0.0
100
+ else:
101
+ dropout_p = args.dropout_p
102
+ latent_size = args.image_size // args.downsample_size
103
+ model = GPT_models[args.gpt_model](
104
+ vocab_size=args.vocab_size,
105
+ block_size=latent_size ** 2,
106
+ num_classes=args.num_classes,
107
+ cls_token_num=args.cls_token_num,
108
+ model_type=args.gpt_type,
109
+ resid_dropout_p=dropout_p,
110
+ ffn_dropout_p=dropout_p,
111
+ drop_path_rate=args.drop_path_rate,
112
+ token_dropout_p=args.token_dropout_p,
113
+ condition_token_num=args.condition_token_num,
114
+ ).to(device)
115
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
116
+
117
+ if args.ema:
118
+ ema = deepcopy(model).to(device) # Create an EMA of the model for use after training
119
+ requires_grad(ema, False)
120
+ logger.info(f"EMA Parameters: {sum(p.numel() for p in ema.parameters()):,}")
121
+
122
+ # Setup optimizer
123
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
124
+
125
+ # Setup data:
126
+ dataset = build_dataset(args)
127
+ sampler = DistributedSampler(
128
+ dataset,
129
+ num_replicas=dist.get_world_size(),
130
+ rank=rank,
131
+ shuffle=True,
132
+ seed=args.global_seed
133
+ )
134
+ loader = DataLoader(
135
+ dataset,
136
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
137
+ shuffle=False,
138
+ sampler=sampler,
139
+ num_workers=args.num_workers,
140
+ pin_memory=True,
141
+ drop_last=True
142
+ )
143
+ flip_info = 'with' if dataset.flip else 'without'
144
+ aug_info = 10 if 'ten_crop' in dataset.feature_dir else 1
145
+ aug_info = 2 * aug_info if dataset.aug_feature_dir is not None else aug_info
146
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.code_path}) "
147
+ f"{flip_info} flip augmentation and {aug_info} crop augmentation")
148
+
149
+ # Prepare models for training:
150
+ if args.gpt_ckpt:
151
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
152
+ model.load_state_dict(checkpoint["model"],strict=False)
153
+ if args.ema:
154
+ ema.load_state_dict(checkpoint["ema"] if "ema" in checkpoint else checkpoint["model"])
155
+ #optimizer.load_state_dict(checkpoint["optimizer"])
156
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
157
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
158
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
159
+ del checkpoint
160
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
161
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
162
+ else:
163
+ train_steps = 0
164
+ start_epoch = 0
165
+ if args.ema:
166
+ update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
167
+
168
+ if not args.no_compile:
169
+ logger.info("compiling the model... (may take several minutes)")
170
+ vq_model = torch.compile(vq_model) # requires PyTorch 2.0
171
+ model = torch.compile(model) # requires PyTorch 2.0
172
+
173
+ model = DDP(model.to(device), device_ids=[args.gpu],find_unused_parameters=True)
174
+ model.train() # important! This enables embedding dropout for classifier-free guidance
175
+ if args.ema:
176
+ ema.eval() # EMA model should always be in eval mode
177
+
178
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
179
+ # initialize a GradScaler. If enabled=False scaler is a no-op
180
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
181
+ # Variables for monitoring/logging purposes:
182
+ log_steps = 0
183
+ running_loss = 0
184
+ start_time = time.time()
185
+
186
+ logger.info(f"Training for {args.epochs} epochs...")
187
+ for epoch in range(start_epoch, args.epochs):
188
+ sampler.set_epoch(epoch)
189
+ logger.info(f"Beginning epoch {epoch}...")
190
+ for batch in loader:
191
+ x = batch['img_code']
192
+ y = batch['labels']
193
+ condition_img = batch['condition_imgs']
194
+ x = x.to(device, non_blocking=True)
195
+ y = y.to(device, non_blocking=True)
196
+ condition_img = condition_img.to(device, non_blocking=True)
197
+ z_indices = x.reshape(x.shape[0], -1)
198
+ c_indices = y.reshape(-1)
199
+ assert z_indices.shape[0] == c_indices.shape[0]
200
+ with torch.cuda.amp.autocast(dtype=ptdtype):
201
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, condition=condition_img.repeat(1,3,1,1).to(ptdtype))
202
+ # backward pass, with gradient scaling if training in fp16
203
+ scaler.scale(loss).backward()
204
+ if args.max_grad_norm != 0.0:
205
+ scaler.unscale_(optimizer)
206
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
207
+ # step the optimizer and scaler if training in fp16
208
+ scaler.step(optimizer)
209
+ scaler.update()
210
+ # flush the gradients as soon as we can, no need for this memory anymore
211
+ optimizer.zero_grad(set_to_none=True)
212
+ if args.ema:
213
+ update_ema(ema, model.module._orig_mod if not args.no_compile else model.module)
214
+
215
+ # Log loss values:
216
+ running_loss += loss.item()
217
+ log_steps += 1
218
+ train_steps += 1
219
+ if train_steps % args.log_every == 0:
220
+ # Measure training speed:
221
+ torch.cuda.synchronize()
222
+ end_time = time.time()
223
+ steps_per_sec = log_steps / (end_time - start_time)
224
+ # Reduce loss history over all processes:
225
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
226
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
227
+ avg_loss = avg_loss.item() / dist.get_world_size()
228
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
229
+ # Reset monitoring variables:
230
+ running_loss = 0
231
+ log_steps = 0
232
+ start_time = time.time()
233
+
234
+ # Save checkpoint:
235
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
236
+ if rank == 0:
237
+ if not args.no_compile:
238
+ model_weight = model.module._orig_mod.state_dict()
239
+ else:
240
+ model_weight = model.module.state_dict()
241
+ checkpoint = {
242
+ "model": model_weight,
243
+ "steps": train_steps,
244
+ "args": args
245
+ }
246
+ if args.ema:
247
+ checkpoint["ema"] = ema.state_dict()
248
+ # if not args.no_local_save:
249
+ # checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
250
+ # torch.save(checkpoint, checkpoint_path)
251
+ # logger.info(f"Saved checkpoint to {checkpoint_path}")
252
+
253
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
254
+ torch.save(checkpoint, cloud_checkpoint_path)
255
+ logger.info(f"Saved checkpoint to {cloud_checkpoint_path}")
256
+ dist.barrier()
257
+ model.eval() # important! This disables randomized embedding dropout
258
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
259
+
260
+ logger.info("Done!")
261
+ dist.destroy_process_group()
262
+
263
+
264
+
265
+ if __name__ == "__main__":
266
+ parser = argparse.ArgumentParser()
267
+ parser.add_argument("--code-path", type=str, required=True)
268
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
269
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
270
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
271
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
272
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
273
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
274
+ parser.add_argument("--ema", action='store_true', help="whether using ema training")
275
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
276
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
277
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
278
+ parser.add_argument("--drop-path-rate", type=float, default=0.0, help="using stochastic depth decay")
279
+ parser.add_argument("--no-compile", action='store_true', default=True)
280
+ parser.add_argument("--results-dir", type=str, default="results")
281
+ parser.add_argument("--dataset", type=str, default='imagenet_code')
282
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
283
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
284
+ parser.add_argument("--num-classes", type=int, default=1000)
285
+ parser.add_argument("--epochs", type=int, default=20)
286
+ parser.add_argument("--lr", type=float, default=1e-4)
287
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use")
288
+ parser.add_argument("--beta1", type=float, default=0.9, help="beta1 parameter for the Adam optimizer")
289
+ parser.add_argument("--beta2", type=float, default=0.95, help="beta2 parameter for the Adam optimizer")
290
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
291
+ parser.add_argument("--global-batch-size", type=int, default=256)
292
+ parser.add_argument("--global-seed", type=int, default=0)
293
+ parser.add_argument("--num-workers", type=int, default=24)
294
+ parser.add_argument("--log-every", type=int, default=100)
295
+ parser.add_argument("--ckpt-every", type=int, default=25000)
296
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
297
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
298
+ parser.add_argument("--condition-type", type=str, default='canny', choices=["depth", "canny"])
299
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
300
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
301
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
302
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for resume training")
303
+ parser.add_argument("--condition-token-num", type=int, default=0)
304
+ parser.add_argument("--get-condition-img", type=bool, default=False)
305
+ args = parser.parse_args()
306
+ main(args)
autoregressive/train/train_c2i_depth.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/train.py
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ torch.backends.cudnn.allow_tf32 = True
7
+ import torch.distributed as dist
8
+ from torch.nn.parallel import DistributedDataParallel as DDP
9
+ from torch.utils.data import DataLoader
10
+ from torch.utils.data.distributed import DistributedSampler
11
+ from glob import glob
12
+ from copy import deepcopy
13
+ import os
14
+ import time
15
+ import inspect
16
+ import argparse
17
+ import sys
18
+ current_directory = os.getcwd()
19
+ sys.path.append(current_directory)
20
+ from utils.logger import create_logger
21
+ from utils.distributed import init_distributed_mode
22
+ from utils.ema import update_ema, requires_grad
23
+ from dataset.build import build_dataset
24
+ from autoregressive.models.gpt import GPT_models
25
+ # from autoregressive.models.gpt_cross import GPT_models
26
+ from tokenizer.tokenizer_image.vq_model import VQ_models
27
+ from autoregressive.models.generate import sample
28
+ from condition.hed import HEDdetector
29
+ import torch.nn.functional as F
30
+ #################################################################################
31
+ # Training Helper Functions #
32
+ #################################################################################
33
+ def creat_optimizer(model, weight_decay, learning_rate, betas, logger):
34
+ # start with all of the candidate parameters
35
+ param_dict = {pn: p for pn, p in model.named_parameters()}
36
+ # filter out those that do not require grad
37
+ param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
38
+ # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
39
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
40
+ decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
41
+ nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
42
+ optim_groups = [
43
+ {'params': decay_params, 'weight_decay': weight_decay},
44
+ {'params': nodecay_params, 'weight_decay': 0.0}
45
+ ]
46
+ num_decay_params = sum(p.numel() for p in decay_params)
47
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
48
+ logger.info(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
49
+ logger.info(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
50
+ # Create AdamW optimizer and use the fused version if it is available
51
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
52
+ extra_args = dict(fused=True) if fused_available else dict()
53
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
54
+ logger.info(f"using fused AdamW: {fused_available}")
55
+ return optimizer
56
+
57
+ #################################################################################
58
+ # Training Loop #
59
+ #################################################################################
60
+ def main(args):
61
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
62
+
63
+ # Setup DDP:
64
+ init_distributed_mode(args)
65
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
66
+ rank = dist.get_rank()
67
+ device = rank % torch.cuda.device_count()
68
+ seed = args.global_seed * dist.get_world_size() + rank
69
+ torch.manual_seed(seed)
70
+ torch.cuda.set_device(device)
71
+
72
+ # Setup an experiment folder:
73
+ if rank == 0:
74
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
75
+ experiment_index = len(glob(f"{args.results_dir}/*"))
76
+ model_string_name = args.gpt_model.replace("/", "-") # e.g., GPT-XL/2 --> GPT-XL-2 (for naming folders)
77
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
78
+ checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
79
+ os.makedirs(checkpoint_dir, exist_ok=True)
80
+ logger = create_logger(experiment_dir)
81
+ logger.info(f"Experiment directory created at {experiment_dir}")
82
+
83
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
84
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
85
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
86
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
87
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
88
+
89
+ else:
90
+ logger = create_logger(None)
91
+
92
+ # training args
93
+ logger.info(f"{args}")
94
+
95
+ # training env
96
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
97
+
98
+
99
+ # Setup model
100
+ if args.drop_path_rate > 0.0:
101
+ dropout_p = 0.0
102
+ else:
103
+ dropout_p = args.dropout_p
104
+ latent_size = args.image_size // args.downsample_size
105
+ model = GPT_models[args.gpt_model](
106
+ vocab_size=args.vocab_size,
107
+ block_size=latent_size ** 2,
108
+ num_classes=args.num_classes,
109
+ cls_token_num=args.cls_token_num,
110
+ model_type=args.gpt_type,
111
+ resid_dropout_p=dropout_p,
112
+ ffn_dropout_p=dropout_p,
113
+ drop_path_rate=args.drop_path_rate,
114
+ token_dropout_p=args.token_dropout_p,
115
+ condition_token_num=args.condition_token_num,
116
+ image_size=args.image_size,
117
+ ).to(device)
118
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
119
+
120
+ if args.ema:
121
+ ema = deepcopy(model).to(device) # Create an EMA of the model for use after training
122
+ requires_grad(ema, False)
123
+ logger.info(f"EMA Parameters: {sum(p.numel() for p in ema.parameters()):,}")
124
+
125
+
126
+ # Setup optimizer
127
+ optimizer = creat_optimizer(model, args.weight_decay, args.lr, (args.beta1, args.beta2), logger)
128
+
129
+ # Setup data:
130
+ dataset = build_dataset(args)
131
+ sampler = DistributedSampler(
132
+ dataset,
133
+ num_replicas=dist.get_world_size(),
134
+ rank=rank,
135
+ shuffle=True,
136
+ seed=args.global_seed
137
+ )
138
+ loader = DataLoader(
139
+ dataset,
140
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
141
+ shuffle=False,
142
+ sampler=sampler,
143
+ num_workers=args.num_workers,
144
+ pin_memory=True,
145
+ drop_last=True
146
+ )
147
+ flip_info = 'with' if dataset.flip else 'without'
148
+ aug_info = 10 if 'ten_crop' in dataset.feature_dir else 1
149
+ aug_info = 2 * aug_info if dataset.aug_feature_dir is not None else aug_info
150
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.code_path}) "
151
+ f"{flip_info} flip augmentation and {aug_info} crop augmentation")
152
+
153
+ # Prepare models for training:
154
+ if args.gpt_ckpt:
155
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
156
+ model.load_state_dict(checkpoint["model"],strict=False)
157
+ if args.ema:
158
+ ema.load_state_dict(checkpoint["ema"] if "ema" in checkpoint else checkpoint["model"])
159
+ train_steps = 0#checkpoint["steps"] if "steps" in checkpoint else int(args.gpt_ckpt.split('/')[-1].split('.')[0])
160
+ start_epoch = 0#int(train_steps / int(len(dataset) / args.global_batch_size))
161
+ train_steps = 0#int(start_epoch * int(len(dataset) / args.global_batch_size))
162
+ del checkpoint
163
+ logger.info(f"Resume training from checkpoint: {args.gpt_ckpt}")
164
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
165
+ else:
166
+ train_steps = 0
167
+ start_epoch = 0
168
+ if args.ema:
169
+ update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
170
+
171
+ if not args.no_compile:
172
+ logger.info("compiling the model... (may take several minutes)")
173
+ model = torch.compile(model) # requires PyTorch 2.0
174
+
175
+
176
+
177
+ model = DDP(model.to(device), device_ids=[args.gpu],find_unused_parameters=True)
178
+ model.train() # important! This enables embedding dropout for classifier-free guidance
179
+ if args.ema:
180
+ ema.eval() # EMA model should always be in eval mode
181
+
182
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
183
+ # initialize a GradScaler. If enabled=False scaler is a no-op
184
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
185
+ # Variables for monitoring/logging purposes:
186
+ log_steps = 0
187
+ running_loss = 0
188
+ start_time = time.time()
189
+ initial_params = copy.deepcopy(model.module.condition_embeddings.weight)
190
+ logger.info(f"Training for {args.epochs} epochs...")
191
+
192
+
193
+ for epoch in range(start_epoch, args.epochs):
194
+ sampler.set_epoch(epoch)
195
+ logger.info(f"Beginning epoch {epoch}...")
196
+ for batch in loader:
197
+ x = batch['img_code']
198
+ y = batch['labels']
199
+ condition_img = batch['condition_imgs']
200
+ x = x.to(device, non_blocking=True)
201
+ y = y.to(device, non_blocking=True)
202
+ condition_img = condition_img.to(device, non_blocking=True).repeat(1,3,1,1)
203
+ z_indices = x.reshape(x.shape[0], -1)
204
+ c_indices = y.reshape(-1)
205
+ batchsize = y.shape[0]
206
+ assert z_indices.shape[0] == c_indices.shape[0]
207
+ with torch.cuda.amp.autocast(dtype=ptdtype):
208
+ pred, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices, condition=condition_img.to(ptdtype))
209
+
210
+ # backward pass, with gradient scaling if training in fp16
211
+ scaler.scale(loss).backward()
212
+ if args.max_grad_norm != 0.0:
213
+ scaler.unscale_(optimizer)
214
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
215
+ # step the optimizer and scaler if training in fp16
216
+ scaler.step(optimizer)
217
+ scaler.update()
218
+ # flush the gradients as soon as we can, no need for this memory anymore
219
+ optimizer.zero_grad(set_to_none=True)
220
+ if args.ema:
221
+ update_ema(ema, model.module._orig_mod if not args.no_compile else model.module)
222
+
223
+ # Log loss values:
224
+ running_loss += loss.item()
225
+ log_steps += 1
226
+ train_steps += 1
227
+ if train_steps % args.log_every == 0:
228
+ # Measure training speed:
229
+ torch.cuda.synchronize()
230
+ end_time = time.time()
231
+ steps_per_sec = log_steps / (end_time - start_time)
232
+ # Reduce loss history over all processes:
233
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
234
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
235
+ avg_loss = avg_loss.item() / dist.get_world_size()
236
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
237
+ # Reset monitoring variables:
238
+ running_loss = 0
239
+ log_steps = 0
240
+ start_time = time.time()
241
+
242
+ # Save checkpoint:
243
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
244
+ if rank == 0:
245
+ if not args.no_compile:
246
+ model_weight = model.module._orig_mod.state_dict()
247
+ else:
248
+ model_weight = model.module.state_dict()
249
+ checkpoint = {
250
+ "model": model_weight,
251
+ "steps": train_steps,
252
+ "args": args
253
+ }
254
+ if args.ema:
255
+ checkpoint["ema"] = ema.state_dict()
256
+ # if not args.no_local_save:
257
+ # checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
258
+ # torch.save(checkpoint, checkpoint_path)
259
+ # logger.info(f"Saved checkpoint to {checkpoint_path}")
260
+
261
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
262
+ torch.save(checkpoint, cloud_checkpoint_path)
263
+ logger.info(f"Saved checkpoint to {cloud_checkpoint_path}")
264
+ dist.barrier()
265
+ model.eval() # important! This disables randomized embedding dropout
266
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
267
+
268
+ logger.info("Done!")
269
+ dist.destroy_process_group()
270
+
271
+
272
+
273
+ if __name__ == "__main__":
274
+ parser = argparse.ArgumentParser()
275
+ parser.add_argument("--code-path", type=str, required=True)
276
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
277
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
278
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
279
+ parser.add_argument("--gpt-ckpt", type=str, default=None, help="ckpt path for resume training")
280
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
281
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
282
+ parser.add_argument("--ema", action='store_true', help="whether using ema training")
283
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
284
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
285
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
286
+ parser.add_argument("--drop-path-rate", type=float, default=0.0, help="using stochastic depth decay")
287
+ parser.add_argument("--no-compile", action='store_true', default=True)
288
+ parser.add_argument("--results-dir", type=str, default="results")
289
+ parser.add_argument("--dataset", type=str, default='imagenet_code')
290
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
291
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
292
+ parser.add_argument("--num-classes", type=int, default=1000)
293
+ parser.add_argument("--epochs", type=int, default=15)
294
+ parser.add_argument("--lr", type=float, default=1e-4)
295
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use")
296
+ parser.add_argument("--beta1", type=float, default=0.9, help="beta1 parameter for the Adam optimizer")
297
+ parser.add_argument("--beta2", type=float, default=0.95, help="beta2 parameter for the Adam optimizer")
298
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
299
+ parser.add_argument("--global-batch-size", type=int, default=256)
300
+ parser.add_argument("--global-seed", type=int, default=0)
301
+ parser.add_argument("--num-workers", type=int, default=24)
302
+ parser.add_argument("--log-every", type=int, default=100)
303
+ parser.add_argument("--ckpt-every", type=int, default=25000)
304
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
305
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
306
+ parser.add_argument("--condition-type", type=str, default='depth', choices=["canny", "depth"])
307
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
308
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
309
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
310
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for resume training")
311
+ parser.add_argument("--condition-token-num", type=int, default=0)
312
+ parser.add_argument("--get-condition-img", type=bool, default=False)
313
+ args = parser.parse_args()
314
+ main(args)
autoregressive/train/train_c2i_fsdp.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # Large-DiT: https://github.com/Alpha-VLLM/LLaMA2-Accessory/blob/main/Large-DiT-ImageNet/train.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ import torch.nn as nn
7
+ import torch.distributed as dist
8
+ from torch.utils.data import DataLoader
9
+ from torch.utils.data.distributed import DistributedSampler
10
+ from torch.distributed.fsdp import (
11
+ FullyShardedDataParallel as FSDP,
12
+ ShardingStrategy, MixedPrecision, StateDictType, FullStateDictConfig
13
+ )
14
+ from torch.distributed.fsdp.wrap import lambda_auto_wrap_policy, size_based_auto_wrap_policy
15
+
16
+ import os
17
+ import time
18
+ import inspect
19
+ import functools
20
+ import argparse
21
+ import contextlib
22
+ from glob import glob
23
+ import wandb
24
+
25
+ from utils.logger import create_logger
26
+ from dataset.build import build_dataset
27
+ from autoregressive.models.gpt import GPT_models
28
+
29
+
30
+
31
+ def setup_fsdp_sync(model: nn.Module, args: argparse.Namespace, device) -> FSDP:
32
+ model = FSDP(
33
+ model,
34
+ auto_wrap_policy=functools.partial(
35
+ lambda_auto_wrap_policy,
36
+ lambda_fn=lambda m: m in model.get_fsdp_wrap_module_list(),
37
+ ),
38
+ # auto_wrap_policy=size_based_auto_wrap_policy,
39
+ # process_group=fs_init.get_data_parallel_group(),
40
+ device_id=device,
41
+ sharding_strategy={
42
+ "fsdp": ShardingStrategy.FULL_SHARD,
43
+ "sdp": ShardingStrategy.SHARD_GRAD_OP,
44
+ "hsdp": ShardingStrategy.HYBRID_SHARD,
45
+ }[args.data_parallel],
46
+ mixed_precision=MixedPrecision(
47
+ param_dtype={
48
+ "fp32": torch.float, "tf32": torch.float,
49
+ "bf16": torch.bfloat16, "fp16": torch.float16,
50
+ }[args.mixed_precision],
51
+ reduce_dtype={
52
+ "fp32": torch.float, "tf32": torch.float,
53
+ "bf16": torch.bfloat16, "fp16": torch.float16,
54
+ }[args.grad_precision or args.mixed_precision],
55
+ ),
56
+ sync_module_states=True,
57
+ limit_all_gathers=True,
58
+ use_orig_params=True,
59
+ )
60
+
61
+ torch.cuda.synchronize()
62
+
63
+ return model
64
+
65
+
66
+
67
+ def creat_optimizer_by_name(model, weight_decay, learning_rate, betas, global_rank, logger):
68
+ # start with all of the candidate parameters
69
+ all_param_dict = {pn: p for pn, p in model.named_parameters()}
70
+ # filter out those that do not require grad
71
+ param_dict = {pn: p for pn, p in all_param_dict.items() if p.requires_grad}
72
+
73
+ # create optim groups.
74
+ # Any parameters that is 2D will be weight decayed, otherwise no.
75
+ # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
76
+
77
+ # decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
78
+ # nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
79
+
80
+ # model params are flatten by fsdp, we need to set the params by its name
81
+ decay_params = [p for n, p in param_dict.items() if 'norm' not in n]
82
+ nodecay_params = [p for n, p in param_dict.items() if 'norm' in n]
83
+ optim_groups = [
84
+ {'params': decay_params, 'weight_decay': weight_decay},
85
+ {'params': nodecay_params, 'weight_decay': 0.0}
86
+ ]
87
+ num_decay_params = sum(p.numel() for p in decay_params)
88
+ num_nodecay_params = sum(p.numel() for p in nodecay_params)
89
+ logger.info(f"(rank {global_rank}) num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
90
+ logger.info(f"(rank {global_rank}) num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
91
+ print(f"(rank {global_rank}) num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
92
+ print(f"(rank {global_rank}) num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
93
+ # Create AdamW optimizer and use the fused version if it is available
94
+ fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
95
+ extra_args = dict(fused=True) if fused_available else dict()
96
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
97
+ logger.info(f"using fused AdamW: {fused_available}")
98
+ return optimizer
99
+
100
+
101
+
102
+ def main(args):
103
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
104
+ assert args.gpt_type == 'c2i', "FSDP only supports c2i currently."
105
+ # =======================================
106
+ # Initialize Distributed Training
107
+ # =======================================
108
+ dist.init_process_group("nccl")
109
+ # init_distributed_mode(args)
110
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
111
+ global_rank = dist.get_rank()
112
+ device = global_rank % torch.cuda.device_count()
113
+ seed = args.global_seed * dist.get_world_size() + global_rank
114
+ torch.manual_seed(seed)
115
+ torch.cuda.set_device(device)
116
+ print(f"Starting rank={global_rank}, device={device}, seed={seed}, world_size={dist.get_world_size()}.")
117
+
118
+
119
+ # =======================================
120
+ # Initialize logger and wandb
121
+ # =======================================
122
+ timestamp = None
123
+ if global_rank == 0:
124
+ timestamp = time.localtime()
125
+ timestamp = int(time.strftime("%Y%m%d%H%M%S", timestamp))
126
+ # Convert timestamp to a tensor for broadcasting
127
+ timestamp_tensor = torch.tensor([timestamp] if timestamp is not None else [0.0], dtype=torch.double).to(device)
128
+ # Broadcast the timestamp to all processes
129
+ dist.broadcast(timestamp_tensor, src=0)
130
+ # All processes receive the timestamp
131
+ timestamp = int(timestamp_tensor.item())
132
+ model_string_name = args.gpt_model.replace("/", "-") # e.g., GPT/XL --> GPT-XL (for naming folders)
133
+ experiment_dir = f"{args.results_dir}/{timestamp}-{model_string_name}"
134
+ cloud_checkpoint_dir = f"{args.cloud_save_path}/{timestamp}-{model_string_name}"
135
+ if global_rank == 0:
136
+ os.makedirs(experiment_dir, exist_ok=True) # in each local machine
137
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True) # in one shared file storage
138
+ logger = create_logger(experiment_dir)
139
+ else:
140
+ logger = create_logger(None)
141
+ logger.info(f"Experiment directory created at {experiment_dir}")
142
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
143
+
144
+ # training args
145
+ logger.info(f"{args}")
146
+
147
+ # wandb
148
+ if not args.no_wandb and global_rank == 0:
149
+ os.environ["WANDB_DIR"] = experiment_dir
150
+ wandb.init(
151
+ project=args.wandb_project,
152
+ name = f"{timestamp}-{model_string_name}",
153
+ config=vars(args)
154
+ )
155
+
156
+
157
+ # ======================================================
158
+ # Initialize model and resume
159
+ # ======================================================
160
+ if args.drop_path_rate > 0.0:
161
+ dropout_p = 0.0
162
+ else:
163
+ dropout_p = args.dropout_p
164
+ latent_size = args.image_size // args.downsample_size
165
+ model = GPT_models[args.gpt_model](
166
+ vocab_size=args.vocab_size,
167
+ block_size=latent_size ** 2,
168
+ num_classes=args.num_classes,
169
+ cls_token_num=args.cls_token_num,
170
+ model_type=args.gpt_type,
171
+ resid_dropout_p=dropout_p,
172
+ ffn_dropout_p=dropout_p,
173
+ drop_path_rate=args.drop_path_rate,
174
+ token_dropout_p=args.token_dropout_p,
175
+ ).to(device)
176
+ logger.info(f"GPT Parameters: {sum(p.numel() for p in model.parameters()):,}")
177
+
178
+ if args.gpt_resume:
179
+ if global_rank == 0: # other ranks receive weights in setup_fsdp_sync
180
+ logger.info(f"Resuming model weights from: {args.gpt_resume}")
181
+ model.load_state_dict(torch.load(os.path.join(
182
+ args.gpt_resume, "consolidated.pth",
183
+ ), map_location="cpu"), strict=True)
184
+
185
+ model = setup_fsdp_sync(model, args, device)
186
+
187
+
188
+ # ======================================================
189
+ # Initialize optimizer and resume
190
+ # ======================================================
191
+ optimizer = creat_optimizer_by_name(model, args.weight_decay, args.lr, (args.beta1, args.beta2), global_rank, logger)
192
+ if args.gpt_resume:
193
+ opt_state_world_size = len([
194
+ x for x in os.listdir(args.gpt_resume)
195
+ if x.startswith("optimizer.") and x.endswith(".pth")
196
+ ])
197
+ assert opt_state_world_size == dist.get_world_size(), (
198
+ f"Resuming from a checkpoint with unmatched world size "
199
+ f"({dist.get_world_size()} vs. {opt_state_world_size}) "
200
+ f"is currently not supported."
201
+ )
202
+ logger.info(f"Resuming optimizer states from: {args.gpt_resume}")
203
+ optimizer.load_state_dict(torch.load(os.path.join(
204
+ args.gpt_resume,
205
+ f"optimizer.{dist.get_rank():05d}-of-"
206
+ f"{dist.get_world_size():05d}.pth",
207
+ ), map_location="cpu"))
208
+
209
+
210
+
211
+ # ======================================================
212
+ # Initialize Dataloader
213
+ # ======================================================
214
+ dataset = build_dataset(args)
215
+ sampler = DistributedSampler(
216
+ dataset,
217
+ num_replicas=dist.get_world_size(),
218
+ rank=global_rank,
219
+ shuffle=True,
220
+ seed=args.global_seed
221
+ )
222
+ loader = DataLoader(
223
+ dataset,
224
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
225
+ shuffle=False,
226
+ sampler=sampler,
227
+ num_workers=args.num_workers,
228
+ pin_memory=True,
229
+ drop_last=True
230
+ )
231
+ flip_info = 'with' if dataset.flip else 'without'
232
+ aug_info = 10 if 'ten_crop' in dataset.feature_dir else 1
233
+ aug_info = 2 * aug_info if dataset.aug_feature_dir is not None else aug_info
234
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.code_path}) "
235
+ f"{flip_info} flip augmentation and {aug_info} crop augmentation")
236
+
237
+
238
+
239
+ # ======================================================
240
+ # Start training !!!
241
+ # ======================================================
242
+ if args.gpt_resume:
243
+ with open(os.path.join(args.gpt_resume, "resume_step.txt")) as f:
244
+ train_steps = int(f.read().strip())
245
+ start_epoch = int(train_steps / int(len(dataset) / args.global_batch_size))
246
+ train_steps = int(start_epoch * int(len(dataset) / args.global_batch_size))
247
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
248
+ else:
249
+ train_steps = 0
250
+ start_epoch = 0
251
+
252
+ model.train() # important! This enables embedding dropout for classifier-free guidance
253
+
254
+ # Variables for monitoring/logging purposes:
255
+ log_steps = 0
256
+ running_loss = 0
257
+ start_time = time.time()
258
+
259
+ logger.info(f"Training for {args.epochs} epochs...")
260
+ for epoch in range(start_epoch, args.epochs):
261
+ sampler.set_epoch(epoch)
262
+ logger.info(f"Beginning epoch {epoch}...")
263
+ for x, y in loader:
264
+ x = x.to(device, non_blocking=True)
265
+ y = y.to(device, non_blocking=True)
266
+ z_indices = x.reshape(x.shape[0], -1)
267
+ c_indices = y.reshape(-1)
268
+ assert z_indices.shape[0] == c_indices.shape[0]
269
+
270
+ optimizer.zero_grad()
271
+ with {
272
+ "bf16": torch.cuda.amp.autocast(dtype=torch.bfloat16),
273
+ "fp16": torch.cuda.amp.autocast(dtype=torch.float16),
274
+ "fp32": contextlib.nullcontext(),
275
+ "tf32": contextlib.nullcontext(),
276
+ }[args.mixed_precision]:
277
+ _, loss = model(cond_idx=c_indices, idx=z_indices[:,:-1], targets=z_indices)
278
+ loss.backward()
279
+
280
+ if args.max_grad_norm != 0.0:
281
+ # according to https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.FullyShardedDataParallel.clip_grad_norm_
282
+ # torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
283
+ model.clip_grad_norm_(args.max_grad_norm)
284
+ optimizer.step()
285
+
286
+
287
+ # Log loss values:
288
+ running_loss += loss.item()
289
+ log_steps += 1
290
+ train_steps += 1
291
+ if train_steps % args.log_every == 0:
292
+ # Measure training speed:
293
+ torch.cuda.synchronize()
294
+ end_time = time.time()
295
+ steps_per_sec = log_steps / (end_time - start_time)
296
+ # Reduce loss history over all processes:
297
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
298
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
299
+ avg_loss = avg_loss.item() / dist.get_world_size()
300
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
301
+ if not args.no_wandb and global_rank == 0:
302
+ wandb.log({"train_loss": avg_loss}, step=train_steps)
303
+
304
+ # Reset monitoring variables:
305
+ running_loss = 0
306
+ log_steps = 0
307
+ start_time = time.time()
308
+
309
+
310
+ # Save checkpoint:
311
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
312
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}"
313
+ os.makedirs(cloud_checkpoint_path, exist_ok=True)
314
+
315
+ ### saving model parameters
316
+ with FSDP.state_dict_type(
317
+ model,
318
+ StateDictType.FULL_STATE_DICT,
319
+ FullStateDictConfig(rank0_only=True, offload_to_cpu=True),
320
+ ):
321
+ consolidated_model_state_dict = model.state_dict()
322
+ if global_rank == 0:
323
+ consolidated_fn = "consolidated.pth"
324
+ torch.save(consolidated_model_state_dict,
325
+ os.path.join(cloud_checkpoint_path, consolidated_fn))
326
+ dist.barrier()
327
+ del consolidated_model_state_dict
328
+ logger.info(f"Saved consolidated to {cloud_checkpoint_path}")
329
+
330
+ ### saving optimizer
331
+ opt_state_fn = (
332
+ f"optimizer.{dist.get_rank():05d}-of-"
333
+ f"{dist.get_world_size():05d}.pth"
334
+ )
335
+ torch.save(optimizer.state_dict(), os.path.join(cloud_checkpoint_path, opt_state_fn))
336
+ dist.barrier()
337
+ logger.info(f"Saved optimizer to {cloud_checkpoint_path}")
338
+
339
+ ### saving training step
340
+ if global_rank == 0:
341
+ with open(os.path.join(cloud_checkpoint_path, "resume_step.txt"), "w") as f:
342
+ print(train_steps, file=f)
343
+ dist.barrier()
344
+ logger.info(f"Saved training step to {cloud_checkpoint_path}")
345
+
346
+ model.eval() # important! This disables randomized embedding dropout
347
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
348
+
349
+ logger.info("Done!")
350
+
351
+
352
+
353
+ if __name__ == "__main__":
354
+ parser = argparse.ArgumentParser()
355
+ parser.add_argument("--code-path", type=str, required=True)
356
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
357
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
358
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
359
+ parser.add_argument("--gpt-resume", type=str, default=None, help="model, optimizer and argument path for resume training")
360
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
361
+ parser.add_argument("--vocab-size", type=int, default=16384, help="vocabulary size of visual tokenizer")
362
+ parser.add_argument("--ema", action='store_true', help="whether using ema training")
363
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
364
+ parser.add_argument("--dropout-p", type=float, default=0.1, help="dropout_p of resid_dropout_p and ffn_dropout_p")
365
+ parser.add_argument("--token-dropout-p", type=float, default=0.1, help="dropout_p of token_dropout_p")
366
+ parser.add_argument("--drop-path-rate", type=float, default=0.0, help="using stochastic depth decay")
367
+ parser.add_argument("--results-dir", type=str, default="results")
368
+ parser.add_argument("--dataset", type=str, default='imagenet_code')
369
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
370
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
371
+ parser.add_argument("--num-classes", type=int, default=1000)
372
+ parser.add_argument("--epochs", type=int, default=300)
373
+ parser.add_argument("--lr", type=float, default=1e-4)
374
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use")
375
+ parser.add_argument("--beta1", type=float, default=0.9, help="beta1 parameter for the Adam optimizer")
376
+ parser.add_argument("--beta2", type=float, default=0.95, help="beta2 parameter for the Adam optimizer")
377
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
378
+ parser.add_argument("--global-batch-size", type=int, default=256)
379
+ parser.add_argument("--global-seed", type=int, default=0)
380
+ parser.add_argument("--num-workers", type=int, default=24)
381
+ parser.add_argument("--log-every", type=int, default=100)
382
+ parser.add_argument("--ckpt-every", type=int, default=5000)
383
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
384
+ parser.add_argument("--mixed-precision", type=str, choices=["fp32", "tf32", "fp16", "bf16"], default='bf16')
385
+ parser.add_argument("--data-parallel", type=str, choices=["sdp", "fsdp", "hsdp"], default="fsdp")
386
+ parser.add_argument("--grad-precision", type=str, choices=["fp32", "fp16", "bf16"])
387
+ parser.add_argument("--wandb-project", type=str, default='c2i_fsdp')
388
+ parser.add_argument("--no-wandb", action='store_true')
389
+ args = parser.parse_args()
390
+ main(args)
tokenizer/tokenizer_image/__pycache__/vq_model.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
tokenizer/tokenizer_image/cache/vgg.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78928a0af1e5f0fcb1f3b9e8f8c3a2a5a3de244d830ad5c1feddc79b8432868
3
+ size 7289
tokenizer/tokenizer_image/vq_model_hf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import PyTorchModelHubMixin
2
+
3
+ from tokenizer.tokenizer_image.vq_model import ModelArgs, VQModel
4
+
5
+ class VQModelHF(VQModel, PyTorchModelHubMixin, repo_url="https://github.com/FoundationVision/LlamaGen", license="mit", tags=["llamagen", "text-to-image"]):
6
+ pass
7
+
8
+ #################################################################################
9
+ # VQ Model Configs #
10
+ #################################################################################
11
+ def VQ_8(**kwargs):
12
+ return VQModelHF(ModelArgs(encoder_ch_mult=[1, 2, 2, 4], decoder_ch_mult=[1, 2, 2, 4], **kwargs))
13
+
14
+ def VQ_16(**kwargs):
15
+ return VQModelHF(ModelArgs(encoder_ch_mult=[1, 1, 2, 2, 4], decoder_ch_mult=[1, 1, 2, 2, 4], **kwargs))
16
+
17
+ VQ_models_HF = {'VQ-16': VQ_16, 'VQ-8': VQ_8}
tokenizer/tokenizer_image/vq_train.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # fast-DiT: https://github.com/chuanyangjin/fast-DiT/blob/main/train.py
3
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
4
+ import torch
5
+ # the first flag below was False when we tested this script but True makes A100 training a lot faster:
6
+ torch.backends.cuda.matmul.allow_tf32 = True
7
+ torch.backends.cudnn.allow_tf32 = True
8
+ import torch.distributed as dist
9
+ from torch.nn.parallel import DistributedDataParallel as DDP
10
+ from torch.utils.data import Dataset, DataLoader
11
+ from torch.utils.data.distributed import DistributedSampler
12
+ from torchvision.datasets import ImageFolder
13
+ from torchvision import transforms
14
+
15
+ import os
16
+ import time
17
+ import argparse
18
+ from glob import glob
19
+ from copy import deepcopy
20
+ # import sys
21
+ # sys.path.append('/data/vjuicefs_sz_cv_v2/11171709/ControlAR')
22
+ from utils.logger import create_logger
23
+ from utils.distributed import init_distributed_mode
24
+ from utils.ema import update_ema, requires_grad
25
+ from dataset.augmentation import random_crop_arr
26
+ from dataset.build import build_dataset
27
+ from tokenizer.tokenizer_image.vq_model import VQ_models
28
+ from tokenizer.tokenizer_image.vq_loss import VQLoss
29
+
30
+ import warnings
31
+ warnings.filterwarnings('ignore')
32
+
33
+ #################################################################################
34
+ # Training Loop #
35
+ #################################################################################
36
+
37
+ def main(args):
38
+ """
39
+ Trains a new model.
40
+ """
41
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
42
+
43
+ # Setup DDP:
44
+ init_distributed_mode(args)
45
+ assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
46
+ rank = dist.get_rank()
47
+ device = rank % torch.cuda.device_count()
48
+ seed = args.global_seed * dist.get_world_size() + rank
49
+ torch.manual_seed(seed)
50
+ torch.cuda.set_device(device)
51
+
52
+ # Setup an experiment folder:
53
+ if rank == 0:
54
+ os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
55
+ experiment_index = len(glob(f"{args.results_dir}/*"))
56
+ model_string_name = args.vq_model.replace("/", "-")
57
+ experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
58
+ checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
59
+ os.makedirs(checkpoint_dir, exist_ok=True)
60
+ logger = create_logger(experiment_dir)
61
+ logger.info(f"Experiment directory created at {experiment_dir}")
62
+
63
+ time_record = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
64
+ cloud_results_dir = f"{args.cloud_save_path}/{time_record}"
65
+ cloud_checkpoint_dir = f"{cloud_results_dir}/{experiment_index:03d}-{model_string_name}/checkpoints"
66
+ os.makedirs(cloud_checkpoint_dir, exist_ok=True)
67
+ logger.info(f"Experiment directory created in cloud at {cloud_checkpoint_dir}")
68
+
69
+ else:
70
+ logger = create_logger(None)
71
+
72
+ # training args
73
+ logger.info(f"{args}")
74
+
75
+ # training env
76
+ logger.info(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
77
+
78
+ # create and load model
79
+ vq_model = VQ_models[args.vq_model](
80
+ codebook_size=args.codebook_size,
81
+ codebook_embed_dim=args.codebook_embed_dim,
82
+ commit_loss_beta=args.commit_loss_beta,
83
+ entropy_loss_ratio=args.entropy_loss_ratio,
84
+ dropout_p=args.dropout_p,
85
+ )
86
+ logger.info(f"VQ Model Parameters: {sum(p.numel() for p in vq_model.parameters()):,}")
87
+ if args.ema:
88
+ ema = deepcopy(vq_model).to(device) # Create an EMA of the model for use after training
89
+ requires_grad(ema, False)
90
+ logger.info(f"VQ Model EMA Parameters: {sum(p.numel() for p in ema.parameters()):,}")
91
+ vq_model = vq_model.to(device)
92
+
93
+ vq_loss = VQLoss(
94
+ disc_start=args.disc_start,
95
+ disc_weight=args.disc_weight,
96
+ disc_type=args.disc_type,
97
+ disc_loss=args.disc_loss,
98
+ gen_adv_loss=args.gen_loss,
99
+ image_size=args.image_size,
100
+ perceptual_weight=args.perceptual_weight,
101
+ reconstruction_weight=args.reconstruction_weight,
102
+ reconstruction_loss=args.reconstruction_loss,
103
+ codebook_weight=args.codebook_weight,
104
+ ).to(device)
105
+ logger.info(f"Discriminator Parameters: {sum(p.numel() for p in vq_loss.discriminator.parameters()):,}")
106
+
107
+ # initialize a GradScaler. If enabled=False scaler is a no-op
108
+ scaler = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
109
+ scaler_disc = torch.cuda.amp.GradScaler(enabled=(args.mixed_precision =='fp16'))
110
+ # Setup optimizer
111
+ optimizer = torch.optim.Adam(vq_model.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
112
+ optimizer_disc = torch.optim.Adam(vq_loss.discriminator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
113
+
114
+ # Setup data:
115
+ transform = transforms.Compose([
116
+ transforms.Lambda(lambda pil_image: random_crop_arr(pil_image, args.image_size)),
117
+ transforms.RandomHorizontalFlip(),
118
+ transforms.ToTensor(),
119
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
120
+ ])
121
+ if args.dataset == 'imagenet_code':
122
+ dataset = build_dataset(args)
123
+ else:
124
+ dataset = build_dataset(args, transform=transform)
125
+ sampler = DistributedSampler(
126
+ dataset,
127
+ num_replicas=dist.get_world_size(),
128
+ rank=rank,
129
+ shuffle=True,
130
+ seed=args.global_seed
131
+ )
132
+ loader = DataLoader(
133
+ dataset,
134
+ batch_size=int(args.global_batch_size // dist.get_world_size()),
135
+ shuffle=False,
136
+ sampler=sampler,
137
+ num_workers=args.num_workers,
138
+ pin_memory=True,
139
+ drop_last=True
140
+ )
141
+ logger.info(f"Dataset contains {len(dataset):,} images ({args.data_path})")
142
+
143
+
144
+ # Prepare models for training:
145
+ if args.vq_ckpt:
146
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
147
+ vq_model.load_state_dict(checkpoint["model"])
148
+ if args.ema:
149
+ ema.load_state_dict(checkpoint["ema"])
150
+ optimizer.load_state_dict(checkpoint["optimizer"])
151
+ vq_loss.discriminator.load_state_dict(checkpoint["discriminator"])
152
+ optimizer_disc.load_state_dict(checkpoint["optimizer_disc"])
153
+ if not args.finetune:
154
+ train_steps = checkpoint["steps"] if "steps" in checkpoint else int(args.vq_ckpt.split('/')[-1].split('.')[0])
155
+ start_epoch = int(train_steps / int(len(dataset) / args.global_batch_size))
156
+ train_steps = int(start_epoch * int(len(dataset) / args.global_batch_size))
157
+ else:
158
+ train_steps = 0
159
+ start_epoch = 0
160
+ del checkpoint
161
+ logger.info(f"Resume training from checkpoint: {args.vq_ckpt}")
162
+ logger.info(f"Initial state: steps={train_steps}, epochs={start_epoch}")
163
+ else:
164
+ train_steps = 0
165
+ start_epoch = 0
166
+ if args.ema:
167
+ update_ema(ema, vq_model, decay=0) # Ensure EMA is initialized with synced weights
168
+
169
+ if args.compile:
170
+ logger.info("compiling the model... (may take several minutes)")
171
+ vq_model = torch.compile(vq_model) # requires PyTorch 2.0
172
+
173
+ vq_model = DDP(vq_model.to(device), device_ids=[args.gpu])
174
+ vq_model.train()
175
+ if args.ema:
176
+ ema.eval() # EMA model should always be in eval mode
177
+ vq_loss = DDP(vq_loss.to(device), device_ids=[args.gpu])
178
+ vq_loss.train()
179
+
180
+ ptdtype = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.mixed_precision]
181
+
182
+ # Variables for monitoring/logging purposes:
183
+ log_steps = 0
184
+ running_loss = 0
185
+ start_time = time.time()
186
+
187
+ logger.info(f"Training for {args.epochs} epochs...")
188
+ for epoch in range(start_epoch, args.epochs):
189
+ sampler.set_epoch(epoch)
190
+ logger.info(f"Beginning epoch {epoch}...")
191
+ for x, y in loader:
192
+ imgs = x.to(device, non_blocking=True)
193
+
194
+ # generator training
195
+ optimizer.zero_grad()
196
+ with torch.cuda.amp.autocast(dtype=ptdtype):
197
+ recons_imgs, codebook_loss = vq_model(imgs)
198
+ loss_gen = vq_loss(codebook_loss, imgs, recons_imgs, optimizer_idx=0, global_step=train_steps+1,
199
+ last_layer=vq_model.module.decoder.last_layer,
200
+ logger=logger, log_every=args.log_every)
201
+ scaler.scale(loss_gen).backward()
202
+ if args.max_grad_norm != 0.0:
203
+ scaler.unscale_(optimizer)
204
+ torch.nn.utils.clip_grad_norm_(vq_model.parameters(), args.max_grad_norm)
205
+ scaler.step(optimizer)
206
+ scaler.update()
207
+ if args.ema:
208
+ update_ema(ema, vq_model.module._orig_mod if args.compile else vq_model.module)
209
+
210
+ # discriminator training
211
+ optimizer_disc.zero_grad()
212
+ with torch.cuda.amp.autocast(dtype=ptdtype):
213
+ loss_disc = vq_loss(codebook_loss, imgs, recons_imgs, optimizer_idx=1, global_step=train_steps+1,
214
+ logger=logger, log_every=args.log_every)
215
+ scaler_disc.scale(loss_disc).backward()
216
+ if args.max_grad_norm != 0.0:
217
+ scaler_disc.unscale_(optimizer_disc)
218
+ torch.nn.utils.clip_grad_norm_(vq_loss.module.discriminator.parameters(), args.max_grad_norm)
219
+ scaler_disc.step(optimizer_disc)
220
+ scaler_disc.update()
221
+
222
+ # # Log loss values:
223
+ running_loss += loss_gen.item() + loss_disc.item()
224
+
225
+ log_steps += 1
226
+ train_steps += 1
227
+ if train_steps % args.log_every == 0:
228
+ # Measure training speed:
229
+ torch.cuda.synchronize()
230
+ end_time = time.time()
231
+ steps_per_sec = log_steps / (end_time - start_time)
232
+ # Reduce loss history over all processes:
233
+ avg_loss = torch.tensor(running_loss / log_steps, device=device)
234
+ dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
235
+ avg_loss = avg_loss.item() / dist.get_world_size()
236
+ logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
237
+ # Reset monitoring variables:
238
+ running_loss = 0
239
+ log_steps = 0
240
+ start_time = time.time()
241
+
242
+ # Save checkpoint:
243
+ if train_steps % args.ckpt_every == 0 and train_steps > 0:
244
+ if rank == 0:
245
+ if args.compile:
246
+ model_weight = vq_model.module._orig_mod.state_dict()
247
+ else:
248
+ model_weight = vq_model.module.state_dict()
249
+ checkpoint = {
250
+ "model": model_weight,
251
+ "optimizer": optimizer.state_dict(),
252
+ "discriminator": vq_loss.module.discriminator.state_dict(),
253
+ "optimizer_disc": optimizer_disc.state_dict(),
254
+ "steps": train_steps,
255
+ "args": args
256
+ }
257
+ if args.ema:
258
+ checkpoint["ema"] = ema.state_dict()
259
+ if not args.no_local_save:
260
+ checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
261
+ torch.save(checkpoint, checkpoint_path)
262
+ logger.info(f"Saved checkpoint to {checkpoint_path}")
263
+
264
+ cloud_checkpoint_path = f"{cloud_checkpoint_dir}/{train_steps:07d}.pt"
265
+ torch.save(checkpoint, cloud_checkpoint_path)
266
+ logger.info(f"Saved checkpoint in cloud to {cloud_checkpoint_path}")
267
+ dist.barrier()
268
+
269
+ vq_model.eval() # important! This disables randomized embedding dropout
270
+ # do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
271
+
272
+ logger.info("Done!")
273
+ dist.destroy_process_group()
274
+
275
+
276
+
277
+ if __name__ == "__main__":
278
+ parser = argparse.ArgumentParser()
279
+ parser.add_argument("--data-path", type=str, default=None)
280
+ parser.add_argument("--code-path", type=str, default=None)
281
+ parser.add_argument("--data-face-path", type=str, default=None, help="face datasets to improve vq model")
282
+ parser.add_argument("--cloud-save-path", type=str, required=True, help='please specify a cloud disk path, if not, local path')
283
+ parser.add_argument("--no-local-save", action='store_true', help='no save checkpoints to local path for limited disk volume')
284
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
285
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for resume training")
286
+ parser.add_argument("--finetune", action='store_true', help="finetune a pre-trained vq model")
287
+ parser.add_argument("--ema", action='store_true', help="whether using ema training")
288
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
289
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
290
+ parser.add_argument("--codebook-l2-norm", action='store_true', default=True, help="l2 norm codebook")
291
+ parser.add_argument("--codebook-weight", type=float, default=1.0, help="codebook loss weight for vector quantization")
292
+ parser.add_argument("--entropy-loss-ratio", type=float, default=0.0, help="entropy loss ratio in codebook loss")
293
+ parser.add_argument("--commit-loss-beta", type=float, default=0.25, help="commit loss beta in codebook loss")
294
+ parser.add_argument("--reconstruction-weight", type=float, default=1.0, help="reconstruction loss weight of image pixel")
295
+ parser.add_argument("--reconstruction-loss", type=str, default='l2', help="reconstruction loss type of image pixel")
296
+ parser.add_argument("--perceptual-weight", type=float, default=1.0, help="perceptual loss weight of LPIPS")
297
+ parser.add_argument("--disc-weight", type=float, default=0.5, help="discriminator loss weight for gan training")
298
+ parser.add_argument("--disc-start", type=int, default=20000, help="iteration to start discriminator training and loss")
299
+ parser.add_argument("--disc-type", type=str, choices=['patchgan', 'stylegan'], default='patchgan', help="discriminator type")
300
+ parser.add_argument("--disc-loss", type=str, choices=['hinge', 'vanilla', 'non-saturating'], default='hinge', help="discriminator loss")
301
+ parser.add_argument("--gen-loss", type=str, choices=['hinge', 'non-saturating'], default='hinge', help="generator loss for gan training")
302
+ parser.add_argument("--compile", action='store_true', default=False)
303
+ parser.add_argument("--dropout-p", type=float, default=0.0, help="dropout_p")
304
+ parser.add_argument("--results-dir", type=str, default="results_tokenizer_image")
305
+ parser.add_argument("--dataset", type=str, default='imagenet')
306
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
307
+ parser.add_argument("--epochs", type=int, default=40)
308
+ parser.add_argument("--lr", type=float, default=1e-4)
309
+ parser.add_argument("--weight-decay", type=float, default=5e-2, help="Weight decay to use.")
310
+ parser.add_argument("--beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
311
+ parser.add_argument("--beta2", type=float, default=0.95, help="The beta2 parameter for the Adam optimizer.")
312
+ parser.add_argument("--max-grad-norm", default=1.0, type=float, help="Max gradient norm.")
313
+ parser.add_argument("--global-batch-size", type=int, default=64)
314
+ parser.add_argument("--global-seed", type=int, default=0)
315
+ parser.add_argument("--num-workers", type=int, default=16)
316
+ parser.add_argument("--log-every", type=int, default=100)
317
+ parser.add_argument("--ckpt-every", type=int, default=5000)
318
+ parser.add_argument("--gradient-accumulation-steps", type=int, default=1)
319
+ parser.add_argument("--mixed-precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
320
+ parser.add_argument("--condition", type=str, default='hed')
321
+ parser.add_argument("--get-condition-img", type=bool, default=False)
322
+ args = parser.parse_args()
323
+ main(args)
tokenizer/vqgan/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Pretrained VQVAE Models
2
+
3
+ ### install
4
+ ```
5
+ pip install omegaconf
6
+ pip install einops
7
+ ```
8
+ * download all needed models from https://github.com/CompVis/taming-transformers and put in pretrained_models/
9
+ * pip install pytorch_lightning
10
+ * python3 tools/convert_pytorch_lightning_to_torch.py
11
+ * pip uninstall pytorch_lightning
12
+
13
+ ### demo
14
+ ```
15
+ cd ${THIS_REPO_ROOT}
16
+ python3 tokenizer/vqgan/taming_vqgan_demo.py
17
+ ```
18
+
19
+ ### acknowledge
20
+ Codes in this folder are modified from from https://github.com/CompVis/taming-transformers
21
+
tokenizer/vqgan/configs/vqgan_imagenet_f16_1024.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-06
3
+ target: taming.models.vqgan.VQModel
4
+ params:
5
+ embed_dim: 256
6
+ n_embed: 1024
7
+ ddconfig:
8
+ double_z: false
9
+ z_channels: 256
10
+ resolution: 256
11
+ in_channels: 3
12
+ out_ch: 3
13
+ ch: 128
14
+ ch_mult:
15
+ - 1
16
+ - 1
17
+ - 2
18
+ - 2
19
+ - 4
20
+ num_res_blocks: 2
21
+ attn_resolutions:
22
+ - 16
23
+ dropout: 0.0
24
+ lossconfig:
25
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
26
+ params:
27
+ disc_conditional: false
28
+ disc_in_channels: 3
29
+ disc_start: 0
30
+ disc_weight: 0.8
31
+ codebook_weight: 1.0
32
+
tokenizer/vqgan/configs/vqgan_imagenet_f16_16384.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 4.5e-06
3
+ target: taming.models.vqgan.VQModel
4
+ params:
5
+ embed_dim: 256
6
+ n_embed: 16384
7
+ monitor: val/rec_loss
8
+ ddconfig:
9
+ double_z: false
10
+ z_channels: 256
11
+ resolution: 256
12
+ in_channels: 3
13
+ out_ch: 3
14
+ ch: 128
15
+ ch_mult:
16
+ - 1
17
+ - 1
18
+ - 2
19
+ - 2
20
+ - 4
21
+ num_res_blocks: 2
22
+ attn_resolutions:
23
+ - 16
24
+ dropout: 0.0
25
+ lossconfig:
26
+ target: taming.modules.losses.vqperceptual.VQLPIPSWithDiscriminator
27
+ params:
28
+ disc_conditional: false
29
+ disc_in_channels: 3
30
+ disc_start: 0
31
+ disc_weight: 0.75
32
+ disc_num_layers: 2
33
+ codebook_weight: 1.0
34
+
tokenizer/vqgan/configs/vqgan_openimage_f8_16384.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ params:
3
+ embed_dim: 4
4
+ n_embed: 16384
5
+ ddconfig:
6
+ double_z: false
7
+ z_channels: 4
8
+ resolution: 256
9
+ in_channels: 3
10
+ out_ch: 3
11
+ ch: 128
12
+ ch_mult:
13
+ - 1
14
+ - 2
15
+ - 2
16
+ - 4
17
+ num_res_blocks: 2
18
+ attn_resolutions:
19
+ - 32
20
+ dropout: 0.0
tokenizer/vqgan/configs/vqgan_openimage_f8_256.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ params:
3
+ embed_dim: 4
4
+ n_embed: 256
5
+ ddconfig:
6
+ double_z: false
7
+ z_channels: 4
8
+ resolution: 256
9
+ in_channels: 3
10
+ out_ch: 3
11
+ ch: 128
12
+ ch_mult:
13
+ - 1
14
+ - 2
15
+ - 2
16
+ - 4
17
+ num_res_blocks: 2
18
+ attn_resolutions:
19
+ - 32
20
+ dropout: 0.0
tokenizer/vqgan/layer.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+
7
+
8
+ def nonlinearity(x):
9
+ # swish
10
+ return x*torch.sigmoid(x)
11
+
12
+
13
+ def Normalize(in_channels):
14
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
15
+
16
+
17
+ class Upsample(nn.Module):
18
+ def __init__(self, in_channels, with_conv):
19
+ super().__init__()
20
+ self.with_conv = with_conv
21
+ if self.with_conv:
22
+ self.conv = torch.nn.Conv2d(in_channels,
23
+ in_channels,
24
+ kernel_size=3,
25
+ stride=1,
26
+ padding=1)
27
+
28
+ def forward(self, x):
29
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
30
+ if self.with_conv:
31
+ x = self.conv(x)
32
+ return x
33
+
34
+
35
+ class Downsample(nn.Module):
36
+ def __init__(self, in_channels, with_conv):
37
+ super().__init__()
38
+ self.with_conv = with_conv
39
+ if self.with_conv:
40
+ # no asymmetric padding in torch conv, must do it ourselves
41
+ self.conv = torch.nn.Conv2d(in_channels,
42
+ in_channels,
43
+ kernel_size=3,
44
+ stride=2,
45
+ padding=0)
46
+
47
+ def forward(self, x):
48
+ if self.with_conv:
49
+ pad = (0,1,0,1)
50
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
51
+ x = self.conv(x)
52
+ else:
53
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
54
+ return x
55
+
56
+
57
+ class ResnetBlock(nn.Module):
58
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
59
+ dropout, temb_channels=512):
60
+ super().__init__()
61
+ self.in_channels = in_channels
62
+ out_channels = in_channels if out_channels is None else out_channels
63
+ self.out_channels = out_channels
64
+ self.use_conv_shortcut = conv_shortcut
65
+
66
+ self.norm1 = Normalize(in_channels)
67
+ self.conv1 = torch.nn.Conv2d(in_channels,
68
+ out_channels,
69
+ kernel_size=3,
70
+ stride=1,
71
+ padding=1)
72
+ if temb_channels > 0:
73
+ self.temb_proj = torch.nn.Linear(temb_channels,
74
+ out_channels)
75
+ self.norm2 = Normalize(out_channels)
76
+ self.dropout = torch.nn.Dropout(dropout)
77
+ self.conv2 = torch.nn.Conv2d(out_channels,
78
+ out_channels,
79
+ kernel_size=3,
80
+ stride=1,
81
+ padding=1)
82
+ if self.in_channels != self.out_channels:
83
+ if self.use_conv_shortcut:
84
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
85
+ out_channels,
86
+ kernel_size=3,
87
+ stride=1,
88
+ padding=1)
89
+ else:
90
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
91
+ out_channels,
92
+ kernel_size=1,
93
+ stride=1,
94
+ padding=0)
95
+
96
+ def forward(self, x, temb):
97
+ h = x
98
+ h = self.norm1(h)
99
+ h = nonlinearity(h)
100
+ h = self.conv1(h)
101
+
102
+ if temb is not None:
103
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
104
+
105
+ h = self.norm2(h)
106
+ h = nonlinearity(h)
107
+ h = self.dropout(h)
108
+ h = self.conv2(h)
109
+
110
+ if self.in_channels != self.out_channels:
111
+ if self.use_conv_shortcut:
112
+ x = self.conv_shortcut(x)
113
+ else:
114
+ x = self.nin_shortcut(x)
115
+
116
+ return x+h
117
+
118
+
119
+ class AttnBlock(nn.Module):
120
+ def __init__(self, in_channels):
121
+ super().__init__()
122
+ self.in_channels = in_channels
123
+
124
+ self.norm = Normalize(in_channels)
125
+ self.q = torch.nn.Conv2d(in_channels,
126
+ in_channels,
127
+ kernel_size=1,
128
+ stride=1,
129
+ padding=0)
130
+ self.k = torch.nn.Conv2d(in_channels,
131
+ in_channels,
132
+ kernel_size=1,
133
+ stride=1,
134
+ padding=0)
135
+ self.v = torch.nn.Conv2d(in_channels,
136
+ in_channels,
137
+ kernel_size=1,
138
+ stride=1,
139
+ padding=0)
140
+ self.proj_out = torch.nn.Conv2d(in_channels,
141
+ in_channels,
142
+ kernel_size=1,
143
+ stride=1,
144
+ padding=0)
145
+
146
+
147
+ def forward(self, x):
148
+ h_ = x
149
+ h_ = self.norm(h_)
150
+ q = self.q(h_)
151
+ k = self.k(h_)
152
+ v = self.v(h_)
153
+
154
+ # compute attention
155
+ b,c,h,w = q.shape
156
+ q = q.reshape(b,c,h*w)
157
+ q = q.permute(0,2,1) # b,hw,c
158
+ k = k.reshape(b,c,h*w) # b,c,hw
159
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
160
+ w_ = w_ * (int(c)**(-0.5))
161
+ w_ = torch.nn.functional.softmax(w_, dim=2)
162
+
163
+ # attend to values
164
+ v = v.reshape(b,c,h*w)
165
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
166
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
167
+ h_ = h_.reshape(b,c,h,w)
168
+
169
+ h_ = self.proj_out(h_)
170
+
171
+ return x+h_
172
+
173
+
174
+
175
+ class Encoder(nn.Module):
176
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
177
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
178
+ resolution, z_channels, double_z=True, **ignore_kwargs):
179
+ super().__init__()
180
+ self.ch = ch
181
+ self.temb_ch = 0
182
+ self.num_resolutions = len(ch_mult)
183
+ self.num_res_blocks = num_res_blocks
184
+ self.resolution = resolution
185
+ self.in_channels = in_channels
186
+
187
+ # downsampling
188
+ self.conv_in = torch.nn.Conv2d(in_channels,
189
+ self.ch,
190
+ kernel_size=3,
191
+ stride=1,
192
+ padding=1)
193
+
194
+ curr_res = resolution
195
+ in_ch_mult = (1,)+tuple(ch_mult)
196
+ self.down = nn.ModuleList()
197
+ for i_level in range(self.num_resolutions):
198
+ block = nn.ModuleList()
199
+ attn = nn.ModuleList()
200
+ block_in = ch*in_ch_mult[i_level]
201
+ block_out = ch*ch_mult[i_level]
202
+ for i_block in range(self.num_res_blocks):
203
+ block.append(ResnetBlock(in_channels=block_in,
204
+ out_channels=block_out,
205
+ temb_channels=self.temb_ch,
206
+ dropout=dropout))
207
+ block_in = block_out
208
+ if curr_res in attn_resolutions:
209
+ attn.append(AttnBlock(block_in))
210
+ down = nn.Module()
211
+ down.block = block
212
+ down.attn = attn
213
+ if i_level != self.num_resolutions-1:
214
+ down.downsample = Downsample(block_in, resamp_with_conv)
215
+ curr_res = curr_res // 2
216
+ self.down.append(down)
217
+
218
+ # middle
219
+ self.mid = nn.Module()
220
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
221
+ out_channels=block_in,
222
+ temb_channels=self.temb_ch,
223
+ dropout=dropout)
224
+ self.mid.attn_1 = AttnBlock(block_in)
225
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
226
+ out_channels=block_in,
227
+ temb_channels=self.temb_ch,
228
+ dropout=dropout)
229
+
230
+ # end
231
+ self.norm_out = Normalize(block_in)
232
+ self.conv_out = torch.nn.Conv2d(block_in,
233
+ 2*z_channels if double_z else z_channels,
234
+ kernel_size=3,
235
+ stride=1,
236
+ padding=1)
237
+
238
+
239
+ def forward(self, x):
240
+ #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
241
+
242
+ # timestep embedding
243
+ temb = None
244
+
245
+ # downsampling
246
+ hs = [self.conv_in(x)]
247
+ for i_level in range(self.num_resolutions):
248
+ for i_block in range(self.num_res_blocks):
249
+ h = self.down[i_level].block[i_block](hs[-1], temb)
250
+ if len(self.down[i_level].attn) > 0:
251
+ h = self.down[i_level].attn[i_block](h)
252
+ hs.append(h)
253
+ if i_level != self.num_resolutions-1:
254
+ hs.append(self.down[i_level].downsample(hs[-1]))
255
+
256
+ # middle
257
+ h = hs[-1]
258
+ h = self.mid.block_1(h, temb)
259
+ h = self.mid.attn_1(h)
260
+ h = self.mid.block_2(h, temb)
261
+
262
+ # end
263
+ h = self.norm_out(h)
264
+ h = nonlinearity(h)
265
+ h = self.conv_out(h)
266
+ return h
267
+
268
+
269
+ class Decoder(nn.Module):
270
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
271
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
272
+ resolution, z_channels, give_pre_end=False, **ignorekwargs):
273
+ super().__init__()
274
+ self.ch = ch
275
+ self.temb_ch = 0
276
+ self.num_resolutions = len(ch_mult)
277
+ self.num_res_blocks = num_res_blocks
278
+ self.resolution = resolution
279
+ self.in_channels = in_channels
280
+ self.give_pre_end = give_pre_end
281
+
282
+ # compute in_ch_mult, block_in and curr_res at lowest res
283
+ in_ch_mult = (1,)+tuple(ch_mult)
284
+ block_in = ch*ch_mult[self.num_resolutions-1]
285
+ curr_res = resolution // 2**(self.num_resolutions-1)
286
+ self.z_shape = (1,z_channels,curr_res,curr_res)
287
+ print("Working with z of shape {} = {} dimensions.".format(
288
+ self.z_shape, np.prod(self.z_shape)))
289
+
290
+ # z to block_in
291
+ self.conv_in = torch.nn.Conv2d(z_channels,
292
+ block_in,
293
+ kernel_size=3,
294
+ stride=1,
295
+ padding=1)
296
+
297
+ # middle
298
+ self.mid = nn.Module()
299
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
300
+ out_channels=block_in,
301
+ temb_channels=self.temb_ch,
302
+ dropout=dropout)
303
+ self.mid.attn_1 = AttnBlock(block_in)
304
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
305
+ out_channels=block_in,
306
+ temb_channels=self.temb_ch,
307
+ dropout=dropout)
308
+
309
+ # upsampling
310
+ self.up = nn.ModuleList()
311
+ for i_level in reversed(range(self.num_resolutions)):
312
+ block = nn.ModuleList()
313
+ attn = nn.ModuleList()
314
+ block_out = ch*ch_mult[i_level]
315
+ for i_block in range(self.num_res_blocks+1):
316
+ block.append(ResnetBlock(in_channels=block_in,
317
+ out_channels=block_out,
318
+ temb_channels=self.temb_ch,
319
+ dropout=dropout))
320
+ block_in = block_out
321
+ if curr_res in attn_resolutions:
322
+ attn.append(AttnBlock(block_in))
323
+ up = nn.Module()
324
+ up.block = block
325
+ up.attn = attn
326
+ if i_level != 0:
327
+ up.upsample = Upsample(block_in, resamp_with_conv)
328
+ curr_res = curr_res * 2
329
+ self.up.insert(0, up) # prepend to get consistent order
330
+
331
+ # end
332
+ self.norm_out = Normalize(block_in)
333
+ self.conv_out = torch.nn.Conv2d(block_in,
334
+ out_ch,
335
+ kernel_size=3,
336
+ stride=1,
337
+ padding=1)
338
+
339
+ def forward(self, z):
340
+ #assert z.shape[1:] == self.z_shape[1:]
341
+ self.last_z_shape = z.shape
342
+
343
+ # timestep embedding
344
+ temb = None
345
+
346
+ # z to block_in
347
+ h = self.conv_in(z)
348
+
349
+ # middle
350
+ h = self.mid.block_1(h, temb)
351
+ h = self.mid.attn_1(h)
352
+ h = self.mid.block_2(h, temb)
353
+
354
+ # upsampling
355
+ for i_level in reversed(range(self.num_resolutions)):
356
+ for i_block in range(self.num_res_blocks+1):
357
+ h = self.up[i_level].block[i_block](h, temb)
358
+ if len(self.up[i_level].attn) > 0:
359
+ h = self.up[i_level].attn[i_block](h)
360
+ if i_level != 0:
361
+ h = self.up[i_level].upsample(h)
362
+
363
+ # end
364
+ if self.give_pre_end:
365
+ return h
366
+
367
+ h = self.norm_out(h)
368
+ h = nonlinearity(h)
369
+ h = self.conv_out(h)
370
+ return h
371
+
372
+
tokenizer/vqgan/model.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from tokenizer.vqgan.layer import Encoder, Decoder
6
+ from tokenizer.vqgan.quantize import VectorQuantizer2 as VectorQuantizer
7
+
8
+
9
+ VQGAN_FROM_TAMING = {
10
+ 'vqgan_imagenet_f16_1024': (
11
+ 'tokenizer/vqgan/configs/vqgan_imagenet_f16_1024.yaml',
12
+ 'pretrained_models/vqgan_imagenet_f16_1024/ckpts/last.pth'),
13
+ 'vqgan_imagenet_f16_16384': (
14
+ 'tokenizer/vqgan/configs/vqgan_imagenet_f16_16384.yaml',
15
+ 'pretrained_models/vqgan_imagenet_f16_16384/ckpts/last.pth'),
16
+ 'vqgan_openimage_f8_256': (
17
+ 'tokenizer/vqgan/configs/vqgan_openimage_f8_256.yaml',
18
+ 'pretrained_models/vq-f8-n256/model.pth'),
19
+ 'vqgan_openimage_f8_16384': (
20
+ 'tokenizer/vqgan/configs/vqgan_openimage_f8_16384.yaml',
21
+ 'pretrained_models/vq-f8/model.pth'),
22
+ }
23
+
24
+ class VQModel(nn.Module):
25
+ def __init__(self,
26
+ ddconfig,
27
+ n_embed,
28
+ embed_dim,
29
+ ckpt_path=None,
30
+ ignore_keys=[],
31
+ image_key="image",
32
+ colorize_nlabels=None,
33
+ monitor=None,
34
+ remap=None,
35
+ sane_index_shape=False, # tell vector quantizer to return indices as bhw
36
+ **kwargs,
37
+ ):
38
+ super().__init__()
39
+ self.image_key = image_key
40
+ self.encoder = Encoder(**ddconfig)
41
+ self.decoder = Decoder(**ddconfig)
42
+ self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
43
+ remap=remap, sane_index_shape=sane_index_shape)
44
+ self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
45
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
46
+ if ckpt_path is not None:
47
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
48
+ self.image_key = image_key
49
+ if colorize_nlabels is not None:
50
+ assert type(colorize_nlabels)==int
51
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
52
+ if monitor is not None:
53
+ self.monitor = monitor
54
+
55
+ def init_from_ckpt(self, path, ignore_keys=list(), logging=True):
56
+ model_weight = torch.load(path, map_location="cpu")["state_dict"]
57
+ keys = list(model_weight.keys())
58
+ for k in keys:
59
+ for ik in ignore_keys:
60
+ if k.startswith(ik):
61
+ print("Deleting key {} from state_dict.".format(k))
62
+ del model_weight[k]
63
+ missing, unexpected = self.load_state_dict(model_weight, strict=False)
64
+ if logging:
65
+ print(f"Restored from {path}")
66
+ print(f"Missing Keys in State Dict: {missing}")
67
+ print(f"Unexpected Keys in State Dict: {unexpected}")
68
+
69
+ def encode(self, x):
70
+ h = self.encoder(x)
71
+ h = self.quant_conv(h)
72
+ quant, emb_loss, info = self.quantize(h)
73
+ return quant, emb_loss, info
74
+
75
+ def decode(self, quant):
76
+ quant = self.post_quant_conv(quant)
77
+ dec = self.decoder(quant)
78
+ return dec
79
+
80
+ def decode_code(self, code_b, shape, channel_first=True):
81
+ quant_b = self.quantize.get_codebook_entry(code_b, shape, channel_first)
82
+ dec = self.decode(quant_b)
83
+ return dec
84
+
85
+ def forward(self, input):
86
+ quant, diff, _ = self.encode(input)
87
+ dec = self.decode(quant)
88
+ return dec, diff
tokenizer/vqgan/quantize.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from torch import einsum
6
+ from einops import rearrange
7
+
8
+
9
+ class VectorQuantizer(nn.Module):
10
+ """
11
+ see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
12
+ ____________________________________________
13
+ Discretization bottleneck part of the VQ-VAE.
14
+ Inputs:
15
+ - n_e : number of embeddings
16
+ - e_dim : dimension of embedding
17
+ - beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
18
+ _____________________________________________
19
+ """
20
+
21
+ # NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for
22
+ # a fix and use legacy=False to apply that fix. VectorQuantizer2 can be
23
+ # used wherever VectorQuantizer has been used before and is additionally
24
+ # more efficient.
25
+ def __init__(self, n_e, e_dim, beta):
26
+ super(VectorQuantizer, self).__init__()
27
+ self.n_e = n_e
28
+ self.e_dim = e_dim
29
+ self.beta = beta
30
+
31
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
32
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
33
+
34
+ def forward(self, z):
35
+ """
36
+ Inputs the output of the encoder network z and maps it to a discrete
37
+ one-hot vector that is the index of the closest embedding vector e_j
38
+ z (continuous) -> z_q (discrete)
39
+ z.shape = (batch, channel, height, width)
40
+ quantization pipeline:
41
+ 1. get encoder input (B,C,H,W)
42
+ 2. flatten input to (B*H*W,C)
43
+ """
44
+ # reshape z -> (batch, height, width, channel) and flatten
45
+ z = z.permute(0, 2, 3, 1).contiguous()
46
+ z_flattened = z.view(-1, self.e_dim)
47
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
48
+
49
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
50
+ torch.sum(self.embedding.weight**2, dim=1) - 2 * \
51
+ torch.matmul(z_flattened, self.embedding.weight.t())
52
+
53
+ ## could possible replace this here
54
+ # #\start...
55
+ # find closest encodings
56
+ min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
57
+
58
+ min_encodings = torch.zeros(
59
+ min_encoding_indices.shape[0], self.n_e).to(z)
60
+ min_encodings.scatter_(1, min_encoding_indices, 1)
61
+
62
+ # dtype min encodings: torch.float32
63
+ # min_encodings shape: torch.Size([2048, 512])
64
+ # min_encoding_indices.shape: torch.Size([2048, 1])
65
+
66
+ # get quantized latent vectors
67
+ z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
68
+ #.........\end
69
+
70
+ # with:
71
+ # .........\start
72
+ #min_encoding_indices = torch.argmin(d, dim=1)
73
+ #z_q = self.embedding(min_encoding_indices)
74
+ # ......\end......... (TODO)
75
+
76
+ # compute loss for embedding
77
+ loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
78
+ torch.mean((z_q - z.detach()) ** 2)
79
+
80
+ # preserve gradients
81
+ z_q = z + (z_q - z).detach()
82
+
83
+ # perplexity
84
+ e_mean = torch.mean(min_encodings, dim=0)
85
+ perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
86
+
87
+ # reshape back to match original input shape
88
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
89
+
90
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
91
+
92
+ def get_codebook_entry(self, indices, shape):
93
+ # shape specifying (batch, height, width, channel)
94
+ # TODO: check for more easy handling with nn.Embedding
95
+ min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
96
+ min_encodings.scatter_(1, indices[:,None], 1)
97
+
98
+ # get quantized latent vectors
99
+ z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
100
+
101
+ if shape is not None:
102
+ z_q = z_q.view(shape)
103
+
104
+ # reshape back to match original input shape
105
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
106
+
107
+ return z_q
108
+
109
+
110
+ class VectorQuantizer2(nn.Module):
111
+ """
112
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
113
+ avoids costly matrix multiplications and allows for post-hoc remapping of indices.
114
+ """
115
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
116
+ # backwards compatibility we use the buggy version by default, but you can
117
+ # specify legacy=False to fix it.
118
+ def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
119
+ sane_index_shape=False, legacy=True):
120
+ super().__init__()
121
+ self.n_e = n_e
122
+ self.e_dim = e_dim
123
+ self.beta = beta
124
+ self.legacy = legacy
125
+
126
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
127
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
128
+
129
+ self.remap = remap
130
+ if self.remap is not None:
131
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
132
+ self.re_embed = self.used.shape[0]
133
+ self.unknown_index = unknown_index # "random" or "extra" or integer
134
+ if self.unknown_index == "extra":
135
+ self.unknown_index = self.re_embed
136
+ self.re_embed = self.re_embed+1
137
+ print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
138
+ f"Using {self.unknown_index} for unknown indices.")
139
+ else:
140
+ self.re_embed = n_e
141
+
142
+ self.sane_index_shape = sane_index_shape
143
+
144
+ def remap_to_used(self, inds):
145
+ ishape = inds.shape
146
+ assert len(ishape)>1
147
+ inds = inds.reshape(ishape[0],-1)
148
+ used = self.used.to(inds)
149
+ match = (inds[:,:,None]==used[None,None,...]).long()
150
+ new = match.argmax(-1)
151
+ unknown = match.sum(2)<1
152
+ if self.unknown_index == "random":
153
+ new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
154
+ else:
155
+ new[unknown] = self.unknown_index
156
+ return new.reshape(ishape)
157
+
158
+ def unmap_to_all(self, inds):
159
+ ishape = inds.shape
160
+ assert len(ishape)>1
161
+ inds = inds.reshape(ishape[0],-1)
162
+ used = self.used.to(inds)
163
+ if self.re_embed > self.used.shape[0]: # extra token
164
+ inds[inds>=self.used.shape[0]] = 0 # simply set to zero
165
+ back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
166
+ return back.reshape(ishape)
167
+
168
+ def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
169
+ assert temp is None or temp==1.0, "Only for interface compatible with Gumbel"
170
+ assert rescale_logits==False, "Only for interface compatible with Gumbel"
171
+ assert return_logits==False, "Only for interface compatible with Gumbel"
172
+ # reshape z -> (batch, height, width, channel) and flatten
173
+ z = rearrange(z, 'b c h w -> b h w c').contiguous()
174
+ z_flattened = z.view(-1, self.e_dim)
175
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
176
+
177
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
178
+ torch.sum(self.embedding.weight**2, dim=1) - 2 * \
179
+ torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
180
+
181
+ min_encoding_indices = torch.argmin(d, dim=1)
182
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
183
+ perplexity = None
184
+ min_encodings = None
185
+
186
+ # compute loss for embedding
187
+ if not self.legacy:
188
+ loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
189
+ torch.mean((z_q - z.detach()) ** 2)
190
+ else:
191
+ loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
192
+ torch.mean((z_q - z.detach()) ** 2)
193
+
194
+ # preserve gradients
195
+ z_q = z + (z_q - z).detach()
196
+
197
+ # reshape back to match original input shape
198
+ z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
199
+
200
+ if self.remap is not None:
201
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis
202
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
203
+ min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten
204
+
205
+ if self.sane_index_shape:
206
+ min_encoding_indices = min_encoding_indices.reshape(
207
+ z_q.shape[0], z_q.shape[2], z_q.shape[3])
208
+
209
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
210
+
211
+ def get_codebook_entry(self, indices, shape, channel_first=True):
212
+ # shape = (batch, channel, height, width) if channel_first else (batch, height, width, channel)
213
+ if self.remap is not None:
214
+ indices = indices.reshape(shape[0],-1) # add batch axis
215
+ indices = self.unmap_to_all(indices)
216
+ indices = indices.reshape(-1) # flatten again
217
+
218
+ # get quantized latent vectors
219
+ z_q = self.embedding(indices) # (b*h*w, c)
220
+
221
+ if shape is not None:
222
+ if channel_first:
223
+ z_q = z_q.reshape(shape[0], shape[2], shape[3], shape[1])
224
+ # reshape back to match original input shape
225
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
226
+ else:
227
+ z_q = z_q.view(shape)
228
+
229
+ return z_q
tokenizer/vqgan/reconstruction_vqgan_ddp.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.distributed as dist
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torch.utils.data.distributed import DistributedSampler
7
+ from torchvision.datasets import ImageFolder
8
+ from torchvision import transforms
9
+ from tqdm import tqdm
10
+ import os
11
+ from PIL import Image
12
+ import numpy as np
13
+ import itertools
14
+ import argparse
15
+ import random
16
+
17
+ from skimage.metrics import peak_signal_noise_ratio as psnr_loss
18
+ from skimage.metrics import structural_similarity as ssim_loss
19
+ from omegaconf import OmegaConf
20
+ from tokenizer.vqgan.model import VQModel
21
+ from tokenizer.vqgan.model import VQGAN_FROM_TAMING
22
+
23
+
24
+ class SingleFolderDataset(Dataset):
25
+ def __init__(self, directory, transform=None):
26
+ super().__init__()
27
+ self.directory = directory
28
+ self.transform = transform
29
+ self.image_paths = [os.path.join(directory, file_name) for file_name in os.listdir(directory)
30
+ if os.path.isfile(os.path.join(directory, file_name))]
31
+
32
+ def __len__(self):
33
+ return len(self.image_paths)
34
+
35
+ def __getitem__(self, idx):
36
+ image_path = self.image_paths[idx]
37
+ image = Image.open(image_path).convert('RGB')
38
+ if self.transform:
39
+ image = self.transform(image)
40
+ return image, torch.tensor(0)
41
+
42
+
43
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
44
+ """
45
+ Builds a single .npz file from a folder of .png samples.
46
+ """
47
+ samples = []
48
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
49
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
50
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
51
+ samples.append(sample_np)
52
+
53
+ random.shuffle(samples) # This is very important for IS(Inception Score) !!!
54
+ samples = np.stack(samples)
55
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
56
+ npz_path = f"{sample_dir}.npz"
57
+ np.savez(npz_path, arr_0=samples)
58
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
59
+ return npz_path
60
+
61
+
62
+ def center_crop_arr(pil_image, image_size):
63
+ """
64
+ Center cropping implementation from ADM.
65
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
66
+ """
67
+ while min(*pil_image.size) >= 2 * image_size:
68
+ pil_image = pil_image.resize(
69
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
70
+ )
71
+
72
+ scale = image_size / min(*pil_image.size)
73
+ pil_image = pil_image.resize(
74
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
75
+ )
76
+
77
+ arr = np.array(pil_image)
78
+ crop_y = (arr.shape[0] - image_size) // 2
79
+ crop_x = (arr.shape[1] - image_size) // 2
80
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
81
+
82
+
83
+ def main(args):
84
+ # Setup PyTorch:
85
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
86
+ torch.set_grad_enabled(False)
87
+
88
+ # Setup DDP:
89
+ dist.init_process_group("nccl")
90
+ rank = dist.get_rank()
91
+ device = rank % torch.cuda.device_count()
92
+ seed = args.global_seed * dist.get_world_size() + rank
93
+ torch.manual_seed(seed)
94
+ torch.cuda.set_device(device)
95
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
96
+
97
+ # create and load vqgan
98
+ cfg, ckpt = VQGAN_FROM_TAMING[args.vqgan]
99
+ config = OmegaConf.load(cfg)
100
+ vq_model = VQModel(**config.model.get("params", dict())).to(device)
101
+ vq_model.init_from_ckpt(ckpt, logging=False)
102
+ vq_model.eval()
103
+
104
+ # Create folder to save samples:
105
+ folder_name = f"{args.vqgan}-{args.dataset}-size-{args.image_size}-seed-{args.global_seed}"
106
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
107
+ if rank == 0:
108
+ os.makedirs(sample_folder_dir, exist_ok=True)
109
+ print(f"Saving .png samples at {sample_folder_dir}")
110
+ dist.barrier()
111
+
112
+ # Setup data:
113
+ transform = transforms.Compose([
114
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
115
+ transforms.ToTensor(),
116
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
117
+ ])
118
+
119
+ if args.dataset == 'imagenet':
120
+ dataset = ImageFolder(args.data_path, transform=transform)
121
+ num_fid_samples = 50000
122
+ elif args.dataset == 'coco':
123
+ dataset = SingleFolderDataset(args.data_path, transform=transform)
124
+ num_fid_samples = 5000
125
+ else:
126
+ raise Exception("please check dataset")
127
+
128
+ sampler = DistributedSampler(
129
+ dataset,
130
+ num_replicas=dist.get_world_size(),
131
+ rank=rank,
132
+ shuffle=False,
133
+ seed=args.global_seed
134
+ )
135
+ loader = DataLoader(
136
+ dataset,
137
+ batch_size=args.per_proc_batch_size,
138
+ shuffle=False,
139
+ sampler=sampler,
140
+ num_workers=args.num_workers,
141
+ pin_memory=True,
142
+ drop_last=False
143
+ )
144
+
145
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
146
+ n = args.per_proc_batch_size
147
+ global_batch_size = n * dist.get_world_size()
148
+
149
+ psnr_val_rgb = []
150
+ ssim_val_rgb = []
151
+ loader = tqdm(loader) if rank == 0 else loader
152
+ total = 0
153
+ for x, _ in loader:
154
+ rgb_gts = x
155
+ rgb_gts = (rgb_gts.permute(0, 2, 3, 1).to("cpu").numpy() + 1.0) / 2.0 # rgb_gt value is between [0, 1]
156
+ x = x.to(device)
157
+ with torch.no_grad():
158
+ latent, _, [_, _, indices] = vq_model.encode(x)
159
+ samples = vq_model.decode_code(indices, latent.shape) # output value is between [-1, 1]
160
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
161
+
162
+ # Save samples to disk as individual .png files
163
+ for i, (sample, rgb_gt) in enumerate(zip(samples, rgb_gts)):
164
+ index = i * dist.get_world_size() + rank + total
165
+ Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
166
+ # metric
167
+ rgb_restored = sample.astype(np.float32) / 255. # rgb_restored value is between [0, 1]
168
+ psnr = psnr_loss(rgb_restored, rgb_gt)
169
+ ssim = ssim_loss(rgb_restored, rgb_gt, multichannel=True, data_range=2.0, channel_axis=-1)
170
+ psnr_val_rgb.append(psnr)
171
+ ssim_val_rgb.append(ssim)
172
+ total += global_batch_size
173
+
174
+ # ------------------------------------
175
+ # Summary
176
+ # ------------------------------------
177
+ # Make sure all processes have finished saving their samples
178
+ dist.barrier()
179
+ world_size = dist.get_world_size()
180
+ gather_psnr_val = [None for _ in range(world_size)]
181
+ gather_ssim_val = [None for _ in range(world_size)]
182
+ dist.all_gather_object(gather_psnr_val, psnr_val_rgb)
183
+ dist.all_gather_object(gather_ssim_val, ssim_val_rgb)
184
+
185
+ if rank == 0:
186
+ gather_psnr_val = list(itertools.chain(*gather_psnr_val))
187
+ gather_ssim_val = list(itertools.chain(*gather_ssim_val))
188
+ psnr_val_rgb = sum(gather_psnr_val) / len(gather_psnr_val)
189
+ ssim_val_rgb = sum(gather_ssim_val) / len(gather_ssim_val)
190
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb))
191
+
192
+ result_file = f"{sample_folder_dir}_results.txt"
193
+ print("writing results to {}".format(result_file))
194
+ with open(result_file, 'w') as f:
195
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb), file=f)
196
+
197
+ create_npz_from_sample_folder(sample_folder_dir, num_fid_samples)
198
+ print("Done.")
199
+
200
+ dist.barrier()
201
+ dist.destroy_process_group()
202
+
203
+
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser()
206
+ parser.add_argument("--data-path", type=str, required=True)
207
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco'], default='imagenet')
208
+ parser.add_argument("--vqgan", type=str, choices=list(VQGAN_FROM_TAMING.keys()), default="vqgan_imagenet_f16_16384")
209
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
210
+ parser.add_argument("--sample-dir", type=str, default="reconstructions")
211
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
212
+ parser.add_argument("--global-seed", type=int, default=0)
213
+ parser.add_argument("--num-workers", type=int, default=4)
214
+ args = parser.parse_args()
215
+ main(args)
tokenizer/vqgan/taming_vqgan_demo.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from PIL import Image
6
+ from omegaconf import OmegaConf
7
+ from tokenizer.vqgan.model import VQModel
8
+ from tokenizer.vqgan.model import VQGAN_FROM_TAMING
9
+
10
+ # before running demo, make sure to:
11
+ # (1) download all needed models from https://github.com/CompVis/taming-transformers and put in pretrained_models/
12
+ # (2) pip install pytorch_lightning
13
+ # (3) python3 tools/convert_pytorch_lightning_to_torch.py
14
+ # (4) pip uninstall pytorch_lightning
15
+
16
+
17
+ def main(args):
18
+ # Setup PyTorch:
19
+ torch.manual_seed(args.seed)
20
+ torch.set_grad_enabled(False)
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+
23
+ # create and load model
24
+ cfg, ckpt = VQGAN_FROM_TAMING[args.vqgan]
25
+ config = OmegaConf.load(cfg)
26
+ model = VQModel(**config.model.get("params", dict()))
27
+ model.init_from_ckpt(ckpt)
28
+ model.to(device)
29
+ model.eval()
30
+
31
+ # load image
32
+ img_path = args.image_path
33
+ out_path = args.image_path.replace('.jpg', '_vqgan.jpg').replace('.jpeg', '_vqgan.jpeg').replace('.png', '_vqgan.png')
34
+ input_size = args.image_size
35
+ img = Image.open(img_path).convert("RGB")
36
+
37
+ # preprocess
38
+ size_org = img.size
39
+ img = img.resize((input_size, input_size))
40
+ img = np.array(img) / 255.
41
+ x = 2.0 * img - 1.0 # x value is between [-1, 1]
42
+ x = torch.tensor(x)
43
+ x = x.unsqueeze(dim=0)
44
+ x = torch.einsum('nhwc->nchw', x)
45
+ x_input = x.float().to("cuda")
46
+
47
+ # inference
48
+ with torch.no_grad():
49
+ latent, _, [_, _, indices] = model.encode(x_input)
50
+ output = model.decode_code(indices, latent.shape) # output value is between [-1, 1]
51
+
52
+ # postprocess
53
+ output = F.interpolate(output, size=[size_org[1], size_org[0]], mode='bilinear').permute(0, 2, 3, 1)[0]
54
+ sample = torch.clamp(127.5 * output + 128.0, 0, 255).to("cpu", dtype=torch.uint8).numpy()
55
+
56
+ # save
57
+ Image.fromarray(sample).save(out_path)
58
+ print("Reconstructed image is saved to {}".format(out_path))
59
+
60
+
61
+ if __name__ == "__main__":
62
+ parser = argparse.ArgumentParser()
63
+ parser.add_argument("--image-path", type=str, default="assets/example.jpg")
64
+ parser.add_argument("--vqgan", type=str, choices=list(VQGAN_FROM_TAMING.keys()), default="vqgan_openimage_f8_16384")
65
+ parser.add_argument("--image-size", type=int, choices=[256, 512, 1024], default=512)
66
+ parser.add_argument("--seed", type=int, default=0)
67
+ args = parser.parse_args()
68
+ main(args)
tools/check_image_codes.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import numpy as np
4
+
5
+ from tokenizer.tokenizer_image.vq_model import VQ_models
6
+ from torchvision.utils import save_image
7
+
8
+
9
+ def main(args):
10
+ # Setup PyTorch:
11
+ torch.manual_seed(args.seed)
12
+ torch.set_grad_enabled(False)
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+
15
+ # create and load model
16
+ vq_model = VQ_models[args.vq_model](
17
+ codebook_size=args.codebook_size,
18
+ codebook_embed_dim=args.codebook_embed_dim)
19
+ vq_model.to(device)
20
+ vq_model.eval()
21
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
22
+ vq_model.load_state_dict(checkpoint["model"])
23
+ del checkpoint
24
+
25
+ # load image code
26
+ latent_dim = args.codebook_embed_dim
27
+ latent_size = args.image_size // args.downsample_size
28
+ codes = torch.from_numpy(np.load(args.code_path)).to(device)
29
+ if codes.ndim == 3: # flip augmentation
30
+ qzshape = (codes.shape[1], latent_dim, latent_size, latent_size)
31
+ else:
32
+ qzshape = (1, latent_dim, latent_size, latent_size)
33
+ index_sample = codes.reshape(-1)
34
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
35
+
36
+ # save
37
+ out_path = "sample_image_code.png"
38
+ nrow = max(4, int(codes.shape[1]//2))
39
+ save_image(samples, out_path, nrow=nrow, normalize=True, value_range=(-1, 1))
40
+ print("Reconstructed image is saved to {}".format(out_path))
41
+
42
+
43
+
44
+ if __name__ == "__main__":
45
+ parser = argparse.ArgumentParser()
46
+ parser.add_argument("--code-path", type=str, required=True)
47
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
48
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
49
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
50
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
51
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512], default=256)
52
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
53
+ parser.add_argument("--seed", type=int, default=0)
54
+ args = parser.parse_args()
55
+ main(args)
tools/convert_pytorch_lightning_to_torch.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ MODEL_PATH = 'pretrained_models'
5
+ pt_lightnings = [
6
+ 'vqgan_imagenet_f16_1024/ckpts/last.ckpt',
7
+ 'vqgan_imagenet_f16_16384/ckpts/last.ckpt',
8
+ 'vq-f8-n256/model.ckpt',
9
+ 'vq-f8/model.ckpt',
10
+ ]
11
+ pts = [
12
+ 'vqgan_imagenet_f16_1024/ckpts/last.pth',
13
+ 'vqgan_imagenet_f16_16384/ckpts/last.pth',
14
+ 'vq-f8-n256/model.pth',
15
+ 'vq-f8/model.pth',
16
+ ]
17
+
18
+ for pt_l, pt in zip(pt_lightnings, pts):
19
+ pt_l_weight = torch.load(os.path.join(MODEL_PATH, pt_l), map_location='cpu')
20
+ pt_weight = {
21
+ 'state_dict': pt_l_weight['state_dict']
22
+ }
23
+ pt_path = os.path.join(MODEL_PATH, pt)
24
+ torch.save(pt_weight, pt_path)
25
+ print(f'saving to {pt_path}')
tools/draw_figure.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+
4
+ font_size = 14
5
+
6
+ def fid_scaling_law_no_cfg():
7
+ # data
8
+ steps = np.array([50, 100, 200, 300,])
9
+ loss_b = np.array([41.025, 33.442, 32.105, 32.196])
10
+ loss_l = np.array([25.889, 24.654, 19.742, 19.070])
11
+ loss_xl = np.array([19.820, 18.037, 14.772, 15.549])
12
+
13
+ steps_ = np.array([50, 200, 300,])
14
+ loss_xxl = np.array([17.195, 13.997, 14.648])
15
+ loss_3b = np.array([16.431, 9.949, 9.380])
16
+ # Plot
17
+ plt.figure(figsize=(6, 4))
18
+
19
+ plt.plot(steps, loss_b, 'o-', label='B', color='red')
20
+ plt.plot(steps, loss_l, 'o-', label='L', color='orange')
21
+ plt.plot(steps, loss_xl, 'o-', label='XL', color='green')
22
+ plt.plot(steps_, loss_xxl, 'o-', label='XXL', color='blue')
23
+ plt.plot(steps_, loss_3b, 'o-', label='3B', color='purple')
24
+
25
+ plt.xlabel('Training Epochs', fontsize=font_size)
26
+ plt.ylabel('FID', fontsize=font_size)
27
+ # plt.grid(True)
28
+ # plt.yscale('log')
29
+
30
+ # Customize the plot to match the appearance of the provided figure
31
+ plt.legend(loc='upper right', framealpha=0.5, fontsize=font_size, facecolor='white')
32
+
33
+ # Customizing the x and y axis ticks (to match the example's steps)
34
+ # plt.xticks(np.linspace(0, 800000, 5), ['0', '200K', '400K', '600K', '800K'])
35
+ plt.yticks(np.arange(5, 50, step=5))
36
+
37
+ # Show plot
38
+ plt.tight_layout()
39
+ plt.savefig('fid_scaling_law_no_cfg.png', dpi=600)
40
+
41
+
42
+
43
+ def fid_scaling_law_cfg():
44
+ # data
45
+ steps = np.array([50, 100, 200, 300,])
46
+ loss_b_cfg = np.array([8.309, 7.256, 6.542, 6.249])
47
+ loss_l_cfg = np.array([4.240, 3.705, 3.220, 3.075])
48
+ loss_xl_cfg = np.array([3.420, 3.089, 2.617, 2.629])
49
+
50
+ steps_ = np.array([50, 200, 300,])
51
+ loss_xxl_cfg = np.array([2.893, 2.331, 2.340])
52
+ loss_3b_cfg = np.array([2.611, 2.381, 2.329])
53
+ # Plot
54
+ plt.figure(figsize=(6, 4))
55
+
56
+ plt.plot(steps, loss_b_cfg, 'o-', label='B', color='red')
57
+ plt.plot(steps, loss_l_cfg, 'o-', label='L', color='orange')
58
+ plt.plot(steps, loss_xl_cfg, 'o-', label='XL', color='green')
59
+ plt.plot(steps_, loss_xxl_cfg, 'o-', label='XXL', color='blue')
60
+ plt.plot(steps_, loss_3b_cfg, 'o-', label='3B', color='purple')
61
+
62
+ plt.xlabel('Training Epochs', fontsize=font_size)
63
+ plt.ylabel('FID', fontsize=font_size)
64
+ # plt.grid(True)
65
+ # plt.yscale('log')
66
+
67
+ # Customize the plot to match the appearance of the provided figure
68
+ plt.legend(loc='upper right', framealpha=0.5, fontsize=font_size, facecolor='white')
69
+
70
+ # Customizing the x and y axis ticks (to match the example's steps)
71
+ # plt.xticks(np.linspace(0, 800000, 5), ['0', '200K', '400K', '600K', '800K'])
72
+ plt.yticks(np.arange(2, 9, step=1))
73
+
74
+ # Show plot
75
+ plt.tight_layout()
76
+ plt.savefig('fid_scaling_law_cfg.png', dpi=600)
77
+
78
+
79
+
80
+ def sample_topk():
81
+ # Data
82
+ top_k = np.array([16384, 10000, 8000, 6000, 4000, 2000, 1000])
83
+ fid_values = np.array([3.075, 3.369, 3.643, 3.969, 4.635, 5.998, 7.428])
84
+ inception_scores = np.array([256.067, 265.222, 268.237, 270.159, 271.455, 267.278, 251.268])
85
+
86
+ fig, ax1 = plt.subplots()
87
+ # Create first y-axis
88
+ ax1.set_xlabel('top-k', fontsize=font_size)
89
+ ax1.set_ylabel('FID', color='teal', fontsize=font_size)
90
+ ax1.plot(top_k, fid_values, 'o-', color='teal', label="FID")
91
+ ax1.tick_params(axis='y', labelcolor='teal')
92
+ ax1.tick_params(axis='x')
93
+
94
+ # Create second y-axis
95
+ ax2 = ax1.twinx()
96
+ ax2.set_ylabel('Inception Score', color='brown', fontsize=font_size)
97
+ ax2.plot(top_k, inception_scores, 'o-', color='brown', label="Inception Score")
98
+ ax2.tick_params(axis='y', labelcolor='brown')
99
+
100
+ # Adding a legend
101
+ fig.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), bbox_transform=ax1.transAxes, fontsize=font_size)
102
+
103
+ fig.tight_layout() # Adjust layout to prevent overlap
104
+ plt.savefig('effect_topk.png', dpi=600)
105
+
106
+
107
+
108
+ def sample_cfg():
109
+ # Data
110
+ cfg = np.array([1.5, 1.75, 2.00, 2.25])
111
+ fid_values = np.array([4.743, 3.151, 3.075, 3.620])
112
+ inception_scores = np.array([165.381, 214.152, 256.067, 291.695])
113
+
114
+ plt.figure(figsize=(10, 4))
115
+ fig, ax1 = plt.subplots()
116
+ # Create first y-axis
117
+ ax1.set_xlabel('cfg', fontsize=font_size)
118
+ ax1.set_ylabel('FID', color='teal', fontsize=font_size)
119
+ ax1.plot(cfg, fid_values, 'o-', color='teal', label="FID")
120
+ ax1.tick_params(axis='y', labelcolor='teal')
121
+ ax1.tick_params(axis='x')
122
+
123
+ # Create second y-axis
124
+ ax2 = ax1.twinx()
125
+ ax2.set_ylabel('Inception Score', color='brown', fontsize=font_size)
126
+ ax2.plot(cfg, inception_scores, 'o-', color='brown', label="Inception Score")
127
+ ax2.tick_params(axis='y', labelcolor='brown')
128
+
129
+ # Adding a legend
130
+ fig.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), bbox_transform=ax1.transAxes, fontsize=font_size)
131
+
132
+ fig.tight_layout() # Adjust layout to prevent overlap
133
+ plt.savefig('effect_cfg.png', dpi=600)
134
+
135
+
136
+
137
+ if __name__ == "__main__":
138
+ fid_scaling_law_no_cfg()
139
+ fid_scaling_law_cfg()
140
+ sample_cfg()
141
+ sample_topk()
tools/imagenet_en_cn.py ADDED
@@ -0,0 +1,1002 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMAGENET_1K_CLASSES = {
2
+ 0: 'tench, Tinca tinca [丁鲷]',
3
+ 1: 'goldfish, Carassius auratus [金鱼]',
4
+ 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias [大白鲨]',
5
+ 3: 'tiger shark, Galeocerdo cuvieri [虎鲨]',
6
+ 4: 'hammerhead, hammerhead shark [锤头鲨]',
7
+ 5: 'electric ray, crampfish, numbfish, torpedo [电鳐]',
8
+ 6: 'stingray [黄貂鱼]',
9
+ 7: 'cock [公鸡]',
10
+ 8: 'hen [母鸡]',
11
+ 9: 'ostrich, Struthio camelus [鸵鸟]',
12
+ 10: 'brambling, Fringilla montifringilla [燕雀]',
13
+ 11: 'goldfinch, Carduelis carduelis [金翅雀]',
14
+ 12: 'house finch, linnet, Carpodacus mexicanus [家朱雀]',
15
+ 13: 'junco, snowbird [灯芯草雀]',
16
+ 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea [靛蓝雀,靛蓝鸟]',
17
+ 15: 'robin, American robin, Turdus migratorius [蓝鹀]',
18
+ 16: 'bulbul [夜莺]',
19
+ 17: 'jay [松鸦]',
20
+ 18: 'magpie [喜鹊]',
21
+ 19: 'chickadee [山雀]',
22
+ 20: 'water ouzel, dipper [河鸟]',
23
+ 21: 'kite [鸢(猛禽)]',
24
+ 22: 'bald eagle, American eagle, Haliaeetus leucocephalus [秃头鹰]',
25
+ 23: 'vulture [秃鹫]',
26
+ 24: 'great grey owl, great gray owl, Strix nebulosa [大灰猫头鹰]',
27
+ 25: 'European fire salamander, Salamandra salamandra [欧洲火蝾螈]',
28
+ 26: 'common newt, Triturus vulgaris [普通蝾螈]',
29
+ 27: 'eft [水蜥]',
30
+ 28: 'spotted salamander, Ambystoma maculatum [斑点蝾螈]',
31
+ 29: 'axolotl, mud puppy, Ambystoma mexicanum [蝾螈,泥狗]',
32
+ 30: 'bullfrog, Rana catesbeiana [牛蛙]',
33
+ 31: 'tree frog, tree-frog [树蛙]',
34
+ 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui [尾蛙,铃蟾蜍,肋蟾蜍,尾蟾蜍]',
35
+ 33: 'loggerhead, loggerhead turtle, Caretta caretta [红海龟]',
36
+ 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea [皮革龟]',
37
+ 35: 'mud turtle [泥龟]',
38
+ 36: 'terrapin [淡水龟]',
39
+ 37: 'box turtle, box tortoise [箱龟]',
40
+ 38: 'banded gecko [带状壁虎]',
41
+ 39: 'common iguana, iguana, Iguana iguana [普通鬣蜥]',
42
+ 40: 'American chameleon, anole, Anolis carolinensis [美国变色龙]',
43
+ 41: 'whiptail, whiptail lizard [鞭尾蜥蜴]',
44
+ 42: 'agama [飞龙科蜥蜴]',
45
+ 43: 'frilled lizard, Chlamydosaurus kingi [褶边蜥蜴]',
46
+ 44: 'alligator lizard [鳄鱼蜥蜴]',
47
+ 45: 'Gila monster, Heloderma suspectum [毒蜥]',
48
+ 46: 'green lizard, Lacerta viridis [绿蜥蜴]',
49
+ 47: 'African chameleon, Chamaeleo chamaeleon [非洲变色龙]',
50
+ 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis [科莫多蜥蜴]',
51
+ 49: 'African crocodile, Nile crocodile, Crocodylus niloticus [非洲鳄,尼罗河鳄鱼]',
52
+ 50: 'American alligator, Alligator mississipiensis [美国鳄鱼,鳄鱼]',
53
+ 51: 'triceratops [三角龙]',
54
+ 52: 'thunder snake, worm snake, Carphophis amoenus [雷蛇,蠕虫蛇]',
55
+ 53: 'ringneck snake, ring-necked snake, ring snake [环蛇,环颈蛇]',
56
+ 54: 'hognose snake, puff adder, sand viper [希腊蛇]',
57
+ 55: 'green snake, grass snake [绿蛇,草蛇]',
58
+ 56: 'king snake, kingsnake [国王蛇]',
59
+ 57: 'garter snake, grass snake [袜带蛇,草蛇]',
60
+ 58: 'water snake [水蛇]',
61
+ 59: 'vine snake [藤蛇]',
62
+ 60: 'night snake, Hypsiglena torquata [夜蛇]',
63
+ 61: 'boa constrictor, Constrictor constrictor [大蟒蛇]',
64
+ 62: 'rock python, rock snake, Python sebae [岩石蟒蛇,岩蛇,蟒蛇]',
65
+ 63: 'Indian cobra, Naja naja [印度眼镜蛇]',
66
+ 64: 'green mamba [绿曼巴]',
67
+ 65: 'sea snake [海蛇]',
68
+ 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus [角腹蛇]',
69
+ 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus [菱纹响尾蛇]',
70
+ 68: 'sidewinder, horned rattlesnake, Crotalus cerastes [角响尾蛇]',
71
+ 69: 'trilobite [三叶虫]',
72
+ 70: 'harvestman, daddy longlegs, Phalangium opilio [盲蜘蛛]',
73
+ 71: 'scorpion [蝎子]',
74
+ 72: 'black and gold garden spider, Argiope aurantia [黑金花园蜘蛛]',
75
+ 73: 'barn spider, Araneus cavaticus [谷仓蜘蛛]',
76
+ 74: 'garden spider, Aranea diademata [花园蜘蛛]',
77
+ 75: 'black widow, Latrodectus mactans [黑寡妇蜘蛛]',
78
+ 76: 'tarantula [狼蛛]',
79
+ 77: 'wolf spider, hunting spider [狼蜘蛛,狩猎蜘蛛]',
80
+ 78: 'tick [壁虱]',
81
+ 79: 'centipede [蜈蚣]',
82
+ 80: 'black grouse [黑松鸡]',
83
+ 81: 'ptarmigan [松鸡,雷鸟]',
84
+ 82: 'ruffed grouse, partridge, Bonasa umbellus [披肩鸡,披肩榛鸡]',
85
+ 83: 'prairie chicken, prairie grouse, prairie fowl [草原鸡,草原松鸡]',
86
+ 84: 'peacock [孔雀]',
87
+ 85: 'quail [鹌鹑]',
88
+ 86: 'partridge [鹧鸪]',
89
+ 87: 'African grey, African gray, Psittacus erithacus [非洲灰鹦鹉]',
90
+ 88: 'macaw [金刚鹦鹉]',
91
+ 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita [硫冠鹦鹉]',
92
+ 90: 'lorikeet [短尾鹦鹉]',
93
+ 91: 'coucal [褐翅鸦鹃]',
94
+ 92: 'bee eater [蜜蜂]',
95
+ 93: 'hornbill [犀鸟]',
96
+ 94: 'hummingbird [蜂鸟]',
97
+ 95: 'jacamar [鹟䴕]',
98
+ 96: 'toucan [犀鸟]',
99
+ 97: 'drake [野鸭]',
100
+ 98: 'red-breasted merganser, Mergus serrator [���胸秋沙鸭]',
101
+ 99: 'goose [鹅]',
102
+ 100: 'black swan, Cygnus atratus [黑天鹅]',
103
+ 101: 'tusker [大象]',
104
+ 102: 'echidna, spiny anteater, anteater [针鼹鼠]',
105
+ 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus [鸭嘴兽]',
106
+ 104: 'wallaby, brush kangaroo [沙袋鼠]',
107
+ 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus [考拉,考拉熊]',
108
+ 106: 'wombat [袋熊]',
109
+ 107: 'jellyfish [水母]',
110
+ 108: 'sea anemone, anemone [海葵]',
111
+ 109: 'brain coral [脑珊瑚]',
112
+ 110: 'flatworm, platyhelminth [扁形虫扁虫]',
113
+ 111: 'nematode, nematode worm, roundworm [线虫,蛔虫]',
114
+ 112: 'conch [海螺]',
115
+ 113: 'snail [蜗牛]',
116
+ 114: 'slug [鼻涕虫]',
117
+ 115: 'sea slug, nudibranch [海参]',
118
+ 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore [石鳖]',
119
+ 117: 'chambered nautilus, pearly nautilus, nautilus [鹦鹉螺]',
120
+ 118: 'Dungeness crab, Cancer magister [珍宝蟹]',
121
+ 119: 'rock crab, Cancer irroratus [石蟹]',
122
+ 120: 'fiddler crab [招潮蟹]',
123
+ 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica [帝王蟹,阿拉斯加蟹,阿拉斯加帝王蟹]',
124
+ 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus [美国龙虾,缅因州龙虾]',
125
+ 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish [大螯虾]',
126
+ 124: 'crayfish, crawfish, crawdad, crawdaddy [小龙虾]',
127
+ 125: 'hermit crab [寄居蟹]',
128
+ 126: 'isopod [等足目动物(明虾和螃蟹近亲)]',
129
+ 127: 'white stork, Ciconia ciconia [白鹳]',
130
+ 128: 'black stork, Ciconia nigra [黑鹳]',
131
+ 129: 'spoonbill [鹭]',
132
+ 130: 'flamingo [火烈鸟]',
133
+ 131: 'little blue heron, Egretta caerulea [小蓝鹭]',
134
+ 132: 'American egret, great white heron, Egretta albus [美国鹭,大白鹭]',
135
+ 133: 'bittern [麻鸦]',
136
+ 134: 'crane [鹤]',
137
+ 135: 'limpkin, Aramus pictus [秧鹤]',
138
+ 136: 'European gallinule, Porphyrio porphyrio [欧洲水鸡,紫水鸡]',
139
+ 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana [沼泽泥母鸡,水母鸡]',
140
+ 138: 'bustard [鸨]',
141
+ 139: 'ruddy turnstone, Arenaria interpres [红翻石鹬]',
142
+ 140: 'red-backed sandpiper, dunlin, Erolia alpina [红背鹬,黑腹滨鹬]',
143
+ 141: 'redshank, Tringa totanus [红脚鹬]',
144
+ 142: 'dowitcher [半蹼鹬]',
145
+ 143: 'oystercatcher, oyster catcher [蛎鹬]',
146
+ 144: 'pelican [鹈鹕]',
147
+ 145: 'king penguin, Aptenodytes patagonica [国王企鹅]',
148
+ 146: 'albatross, mollymawk [信天翁,大海鸟]',
149
+ 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus [灰鲸]',
150
+ 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca [杀人鲸,逆戟鲸,虎鲸]',
151
+ 149: 'dugong, Dugong dugon [海牛]',
152
+ 150: 'sea lion [海狮]',
153
+ 151: 'Chihuahua [奇瓦瓦]',
154
+ 152: 'Japanese spaniel [日本猎犬]',
155
+ 153: 'Maltese dog, Maltese terrier, Maltese [马尔济斯犬]',
156
+ 154: 'Pekinese, Pekingese, Peke [狮子狗]',
157
+ 155: 'Shih-Tzu [西施犬]',
158
+ 156: 'Blenheim spaniel [布莱尼姆猎犬]',
159
+ 157: 'papillon [巴比狗]',
160
+ 158: 'toy terrier [玩具犬]',
161
+ 159: 'Rhodesian ridgeback [罗得西亚长背猎狗]',
162
+ 160: 'Afghan hound, Afghan [阿富汗猎犬]',
163
+ 161: 'basset, basset hound [猎犬]',
164
+ 162: 'beagle [比格犬,猎兔犬]',
165
+ 163: 'bloodhound, sleuthhound [侦探犬]',
166
+ 164: 'bluetick [蓝色快狗]',
167
+ 165: 'black-and-tan coonhound [黑褐猎浣熊犬]',
168
+ 166: 'Walker hound, Walker foxhound [沃克猎犬]',
169
+ 167: 'English foxhound [英国猎狐犬]',
170
+ 168: 'redbone [美洲赤狗]',
171
+ 169: 'borzoi, Russian wolfhound [俄罗斯猎狼犬]',
172
+ 170: 'Irish wolfhound [爱尔兰猎狼犬]',
173
+ 171: 'Italian greyhound [意大利灰狗]',
174
+ 172: 'whippet [惠比特犬]',
175
+ 173: 'Ibizan hound, Ibizan Podenco [依比沙猎犬]',
176
+ 174: 'Norwegian elkhound, elkhound [挪威猎犬]',
177
+ 175: 'otterhound, otter hound [奥达猎犬,水獭猎犬]',
178
+ 176: 'Saluki, gazelle hound [沙克犬,瞪羚猎犬]',
179
+ 177: 'Scottish deerhound, deerhound [苏格兰猎鹿犬,猎鹿犬]',
180
+ 178: 'Weimaraner [威玛猎犬]',
181
+ 179: 'Staffordshire bullterrier, Staffordshire bull terrier [斯塔福德郡牛头梗,斯塔福德郡斗牛梗]',
182
+ 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier [美国斯塔福德郡梗,美国比特斗牛梗,斗牛梗]',
183
+ 181: 'Bedlington terrier [贝德灵顿梗]',
184
+ 182: 'Border terrier [边境梗]',
185
+ 183: 'Kerry blue terrier [凯丽蓝梗]',
186
+ 184: 'Irish terrier [爱尔兰梗]',
187
+ 185: 'Norfolk terrier [诺福克梗]',
188
+ 186: 'Norwich terrier [诺维奇梗]',
189
+ 187: 'Yorkshire terrier [约克郡梗]',
190
+ 188: 'wire-haired fox terrier [刚毛猎狐梗]',
191
+ 189: 'Lakeland terrier [莱克兰梗]',
192
+ 190: 'Sealyham terrier, Sealyham [锡利哈姆梗]',
193
+ 191: 'Airedale, Airedale terrier [艾尔谷犬]',
194
+ 192: 'cairn, cairn terrier [凯恩梗]',
195
+ 193: 'Australian terrier [澳大利亚梗]',
196
+ 194: 'Dandie Dinmont, Dandie Dinmont terrier [丹迪丁蒙梗]',
197
+ 195: 'Boston bull, Boston terrier [波士顿梗]',
198
+ 196: 'miniature schnauzer [迷你雪纳瑞犬]',
199
+ 197: 'giant schnauzer [巨型雪纳瑞犬]',
200
+ 198: 'standard schnauzer [标准雪纳瑞犬]',
201
+ 199: 'Scotch terrier, Scottish terrier, Scottie [苏格兰梗]',
202
+ 200: 'Tibetan terrier, chrysanthemum dog [西藏梗,菊花狗]',
203
+ 201: 'silky terrier, Sydney silky [丝毛梗]',
204
+ 202: 'soft-coated wheaten terrier [软毛麦色梗]',
205
+ 203: 'West Highland white terrier [西高地白梗]',
206
+ 204: 'Lhasa, Lhasa apso [拉萨阿普索犬]',
207
+ 205: 'flat-coated retriever [平毛寻回犬]',
208
+ 206: 'curly-coated retriever [卷毛寻回犬]',
209
+ 207: 'golden retriever [金毛猎犬]',
210
+ 208: 'Labrador retriever [拉布拉多猎犬]',
211
+ 209: 'Chesapeake Bay retriever [乞沙比克猎犬]',
212
+ 210: 'German short-haired pointer [德国短毛猎犬]',
213
+ 211: 'vizsla, Hungarian pointer [维兹拉犬]',
214
+ 212: 'English setter [英国谍犬]',
215
+ 213: 'Irish setter, red setter [爱尔兰雪达犬,红色猎犬]',
216
+ 214: 'Gordon setter [戈登雪达犬]',
217
+ 215: 'Brittany spaniel [布列塔尼犬猎犬]',
218
+ 216: 'clumber, clumber spaniel [黄毛,黄毛猎犬]',
219
+ 217: 'English springer, English springer spaniel [英国史宾格犬]',
220
+ 218: 'Welsh springer spaniel [威尔士史宾格犬]',
221
+ 219: 'cocker spaniel, English cocker spaniel, cocker [可卡犬,英国可卡犬]',
222
+ 220: 'Sussex spaniel [萨塞克斯猎犬]',
223
+ 221: 'Irish water spaniel [爱尔兰水猎犬]',
224
+ 222: 'kuvasz [哥威斯犬]',
225
+ 223: 'schipperke [舒柏奇犬]',
226
+ 224: 'groenendael [比利时牧羊犬]',
227
+ 225: 'malinois [马里努阿犬]',
228
+ 226: 'briard [伯瑞犬]',
229
+ 227: 'kelpie [凯尔皮犬]',
230
+ 228: 'komondor [匈牙利牧羊犬]',
231
+ 229: 'Old English sheepdog, bobtail [老英国牧羊犬]',
232
+ 230: 'Shetland sheepdog, Shetland sheep dog, Shetland [喜乐蒂牧羊犬]',
233
+ 231: 'collie [牧羊犬]',
234
+ 232: 'Border collie [边境牧羊犬]',
235
+ 233: 'Bouvier des Flandres, Bouviers des Flandres [法兰德斯牧牛狗]',
236
+ 234: 'Rottweiler [罗特韦尔犬]',
237
+ 235: 'German shepherd, German shepherd dog, German police dog, alsatian [德国牧羊犬,德国警犬,阿尔萨斯]',
238
+ 236: 'Doberman, Doberman pinscher [多伯曼犬,杜宾犬]',
239
+ 237: 'miniature pinscher [迷你杜宾犬]',
240
+ 238: 'Greater Swiss Mountain dog [大瑞士山地犬]',
241
+ 239: 'Bernese mountain dog [伯恩山犬]',
242
+ 240: 'Appenzeller [Appenzeller狗]',
243
+ 241: 'EntleBucher [EntleBucher狗]',
244
+ 242: 'boxer [拳师狗]',
245
+ 243: 'bull mastiff [斗牛獒]',
246
+ 244: 'Tibetan mastiff [藏獒]',
247
+ 245: 'French bulldog [法国斗牛犬]',
248
+ 246: 'Great Dane [大丹犬]',
249
+ 247: 'Saint Bernard, St Bernard [圣伯纳德狗]',
250
+ 248: 'Eskimo dog, husky [爱斯基摩犬,哈士奇]',
251
+ 249: 'malamute, malemute, Alaskan malamute [雪橇犬,阿拉斯加爱斯基摩狗]',
252
+ 250: 'Siberian husky [哈士奇]',
253
+ 251: 'dalmatian, coach dog, carriage dog [达尔马提亚,教练车狗]',
254
+ 252: 'affenpinscher, monkey pinscher, monkey dog [狮毛狗]',
255
+ 253: 'basenji [巴辛吉狗]',
256
+ 254: 'pug, pug-dog [哈巴狗,狮子狗]',
257
+ 255: 'Leonberg [莱昂贝格狗]',
258
+ 256: 'Newfoundland, Newfoundland dog [纽芬兰岛狗]',
259
+ 257: 'Great Pyrenees [大白熊犬]',
260
+ 258: 'Samoyed, Samoyede [萨摩耶犬]',
261
+ 259: 'Pomeranian [博美犬]',
262
+ 260: 'chow, chow chow [松狮,松狮]',
263
+ 261: 'keeshond [荷兰卷尾狮毛狗]',
264
+ 262: 'Brabancon griffon [布鲁塞尔格林芬犬]',
265
+ 263: 'Pembroke, Pembroke Welsh corgi [彭布洛克威尔士科基犬]',
266
+ 264: 'Cardigan, Cardigan Welsh corgi [威尔士柯基犬]',
267
+ 265: 'toy poodle [玩具贵宾犬]',
268
+ 266: 'miniature poodle [迷你贵宾犬]',
269
+ 267: 'standard poodle [标准贵宾犬]',
270
+ 268: 'Mexican hairless [墨西哥无毛犬]',
271
+ 269: 'timber wolf, grey wolf, gray wolf, Canis lupus [灰狼]',
272
+ 270: 'white wolf, Arctic wolf, Canis lupus tundrarum [白狼,北极狼]',
273
+ 271: 'red wolf, maned wolf, Canis rufus, Canis niger [红太狼,鬃狼,犬犬鲁弗斯]',
274
+ 272: 'coyote, prairie wolf, brush wolf, Canis latrans [狼,草原狼,刷狼,郊狼]',
275
+ 273: 'dingo, warrigal, warragal, Canis dingo [澳洲野狗,澳大利亚野犬]',
276
+ 274: 'dhole, Cuon alpinus [豺]',
277
+ 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus [非洲猎犬,土狼犬]',
278
+ 276: 'hyena, hyaena [鬣狗]',
279
+ 277: 'red fox, Vulpes vulpes [红狐狸]',
280
+ 278: 'kit fox, Vulpes macrotis [沙狐]',
281
+ 279: 'Arctic fox, white fox, Alopex lagopus [北极狐狸,白狐狸]',
282
+ 280: 'grey fox, gray fox, Urocyon cinereoargenteus [灰狐狸]',
283
+ 281: 'tabby, tabby cat [虎斑猫]',
284
+ 282: 'tiger cat [山猫,虎猫]',
285
+ 283: 'Persian cat [波斯猫]',
286
+ 284: 'Siamese cat, Siamese [暹罗暹罗猫,]',
287
+ 285: 'Egyptian cat [埃及猫]',
288
+ 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor [美洲狮,美洲豹]',
289
+ 287: 'lynx, catamount [猞猁,山猫]',
290
+ 288: 'leopard, Panthera pardus [豹子]',
291
+ 289: 'snow leopard, ounce, Panthera uncia [雪豹]',
292
+ 290: 'jaguar, panther, Panthera onca, Felis onca [美洲虎]',
293
+ 291: 'lion, king of beasts, Panthera leo [狮子]',
294
+ 292: 'tiger, Panthera tigris [老虎]',
295
+ 293: 'cheetah, chetah, Acinonyx jubatus [猎豹]',
296
+ 294: 'brown bear, bruin, Ursus arctos [棕熊]',
297
+ 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus [美洲黑熊]',
298
+ 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus [冰熊,北极熊]',
299
+ 297: 'sloth bear, Melursus ursinus, Ursus ursinus [懒熊]',
300
+ 298: 'mongoose [猫鼬]',
301
+ 299: 'meerkat, mierkat [猫鼬,海猫]',
302
+ 300: 'tiger beetle [虎甲虫]',
303
+ 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle [瓢虫]',
304
+ 302: 'ground beetle, carabid beetle [土鳖虫]',
305
+ 303: 'long-horned beetle, longicorn, longicorn beetle [天牛]',
306
+ 304: 'leaf beetle, chrysomelid [龟甲虫]',
307
+ 305: 'dung beetle [粪甲虫]',
308
+ 306: 'rhinoceros beetle [犀牛甲虫]',
309
+ 307: 'weevil [象甲]',
310
+ 308: 'fly [苍蝇]',
311
+ 309: 'bee [蜜蜂]',
312
+ 310: 'ant, emmet, pismire [蚂蚁]',
313
+ 311: 'grasshopper, hopper [蚱蜢]',
314
+ 312: 'cricket [蟋蟀]',
315
+ 313: 'walking stick, walkingstick, stick insect [竹节虫]',
316
+ 314: 'cockroach, roach [蟑螂]',
317
+ 315: 'mantis, mantid [螳螂]',
318
+ 316: 'cicada, cicala [蝉]',
319
+ 317: 'leafhopper [叶蝉]',
320
+ 318: 'lacewing, lacewing fly [草蜻蛉]',
321
+ 319: 'dragonfly, darning needle, devils darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk [蜻蜓]',
322
+ 320: 'damselfly [豆娘,蜻蛉]',
323
+ 321: 'admiral [优红蛱蝶]',
324
+ 322: 'ringlet, ringlet butterfly [小环蝴蝶]',
325
+ 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus [君主蝴蝶,大斑蝶]',
326
+ 324: 'cabbage butterfly [菜粉蝶]',
327
+ 325: 'sulphur butterfly, sulfur butterfly [白蝴蝶]',
328
+ 326: 'lycaenid, lycaenid butterfly [灰蝶]',
329
+ 327: 'starfish, sea star [海星]',
330
+ 328: 'sea urchin [海胆]',
331
+ 329: 'sea cucumber, holothurian [海参,海黄瓜]',
332
+ 330: 'wood rabbit, cottontail, cottontail rabbit [野兔]',
333
+ 331: 'hare [兔]',
334
+ 332: 'Angora, Angora rabbit [安哥拉兔]',
335
+ 333: 'hamster [仓鼠]',
336
+ 334: 'porcupine, hedgehog [刺猬,豪猪,]',
337
+ 335: 'fox squirrel, eastern fox squirrel, Sciurus niger [黑松鼠]',
338
+ 336: 'marmot [土拨鼠]',
339
+ 337: 'beaver [海狸]',
340
+ 338: 'guinea pig, Cavia cobaya [豚鼠,豚鼠]',
341
+ 339: 'sorrel [栗色马]',
342
+ 340: 'zebra [斑马]',
343
+ 341: 'hog, pig, grunter, squealer, Sus scrofa [猪]',
344
+ 342: 'wild boar, boar, Sus scrofa [野猪]',
345
+ 343: 'warthog [疣猪]',
346
+ 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius [河马]',
347
+ 345: 'ox [牛]',
348
+ 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis [水牛,亚洲水牛]',
349
+ 347: 'bison [野牛]',
350
+ 348: 'ram, tup [公羊]',
351
+ 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis [大角羊,洛矶山大角羊]',
352
+ 350: 'ibex, Capra ibex [山羊]',
353
+ 351: 'hartebeest [狷羚]',
354
+ 352: 'impala, Aepyceros melampus [黑斑羚]',
355
+ 353: 'gazelle [瞪羚]',
356
+ 354: 'Arabian camel, dromedary, Camelus dromedarius [阿拉伯单峰骆驼,骆驼]',
357
+ 355: 'llama [骆驼]',
358
+ 356: 'weasel [黄鼠狼]',
359
+ 357: 'mink [水貂]',
360
+ 358: 'polecat, fitch, foulmart, foumart, Mustela putorius [臭猫]',
361
+ 359: 'black-footed ferret, ferret, Mustela nigripes [黑足鼬]',
362
+ 360: 'otter [水獭]',
363
+ 361: 'skunk, polecat, wood pussy [臭鼬,木猫]',
364
+ 362: 'badger [獾]',
365
+ 363: 'armadillo [犰狳]',
366
+ 364: 'three-toed sloth, ai, Bradypus tridactylus [树懒]',
367
+ 365: 'orangutan, orang, orangutang, Pongo pygmaeus [猩猩,婆罗洲猩猩]',
368
+ 366: 'gorilla, Gorilla gorilla [大猩猩]',
369
+ 367: 'chimpanzee, chimp, Pan troglodytes [黑猩猩]',
370
+ 368: 'gibbon, Hylobates lar [长臂猿]',
371
+ 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus [合趾猿长臂猿,合趾猿]',
372
+ 370: 'guenon, guenon monkey [长尾猴]',
373
+ 371: 'patas, hussar monkey, Erythrocebus patas [赤猴]',
374
+ 372: 'baboon [狒狒]',
375
+ 373: 'macaque [恒河猴,猕猴]',
376
+ 374: 'langur [白头叶猴]',
377
+ 375: 'colobus, colobus monkey [疣猴]',
378
+ 376: 'proboscis monkey, Nasalis larvatus [长鼻猴]',
379
+ 377: 'marmoset [狨(美洲产小型长尾猴)]',
380
+ 378: 'capuchin, ringtail, Cebus capucinus [卷尾猴]',
381
+ 379: 'howler monkey, howler [吼猴]',
382
+ 380: 'titi, titi monkey [伶猴]',
383
+ 381: 'spider monkey, Ateles geoffroyi [蜘蛛猴]',
384
+ 382: 'squirrel monkey, Saimiri sciureus [松鼠猴]',
385
+ 383: 'Madagascar cat, ring-tailed lemur, Lemur catta [马达加斯加环尾狐猴,鼠狐猴]',
386
+ 384: 'indri, indris, Indri indri, Indri brevicaudatus [大狐猴,马达加斯加大狐猴]',
387
+ 385: 'Indian elephant, Elephas maximus [印度大象,亚洲象]',
388
+ 386: 'African elephant, Loxodonta africana [非洲象,非洲象]',
389
+ 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens [小熊猫]',
390
+ 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca [大熊猫]',
391
+ 389: 'barracouta, snoek [杖鱼]',
392
+ 390: 'eel [鳗鱼]',
393
+ 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch [银鲑,银鲑鱼]',
394
+ 392: 'rock beauty, Holocanthus tricolor [三色刺蝶鱼]',
395
+ 393: 'anemone fish [海葵鱼]',
396
+ 394: 'sturgeon [鲟鱼]',
397
+ 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus [雀鳝]',
398
+ 396: 'lionfish [狮子鱼]',
399
+ 397: 'puffer, pufferfish, blowfish, globefish [河豚]',
400
+ 398: 'abacus [算盘]',
401
+ 399: 'abaya [长袍]',
402
+ 400: 'academic gown, academic robe, judge robe [学位袍]',
403
+ 401: 'accordion, piano accordion, squeeze box [手风琴]',
404
+ 402: 'acoustic guitar [原声吉他]',
405
+ 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier [航空母舰]',
406
+ 404: 'airliner [客机]',
407
+ 405: 'airship, dirigible [飞艇]',
408
+ 406: 'altar [祭坛]',
409
+ 407: 'ambulance [救护车]',
410
+ 408: 'amphibian, amphibious vehicle [水陆两用车]',
411
+ 409: 'analog clock [模拟时钟]',
412
+ 410: 'apiary, bee house [蜂房]',
413
+ 411: 'apron [围裙]',
414
+ 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin [垃圾桶]',
415
+ 413: 'assault rifle, assault gun [攻击步枪,枪]',
416
+ 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack [背包]',
417
+ 415: 'bakery, bakeshop, bakehouse [面包店,面包铺,]',
418
+ 416: 'balance beam, beam [平衡木]',
419
+ 417: 'balloon [热气球]',
420
+ 418: 'ballpoint, ballpoint pen, ballpen, Biro [圆珠笔]',
421
+ 419: 'Band Aid [创可贴]',
422
+ 420: 'banjo [班卓琴]',
423
+ 421: 'bannister, banister, balustrade, balusters, handrail [栏杆,楼梯扶手]',
424
+ 422: 'barbell [杠铃]',
425
+ 423: 'barber chair [理发师的椅子]',
426
+ 424: 'barbershop [理发店]',
427
+ 425: 'barn [牲口棚]',
428
+ 426: 'barometer [晴雨表]',
429
+ 427: 'barrel, cask [圆筒]',
430
+ 428: 'barrow, garden cart, lawn cart, wheelbarrow [园地小车,手推车]',
431
+ 429: 'baseball [棒球]',
432
+ 430: 'basketball [篮球]',
433
+ 431: 'bassinet [婴儿床]',
434
+ 432: 'bassoon [巴松管,低音管]',
435
+ 433: 'bathing cap, swimming cap [游泳帽]',
436
+ 434: 'bath towel [沐浴毛巾]',
437
+ 435: 'bathtub, bathing tub, bath, tub [浴缸,澡盆]',
438
+ 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon [沙滩车,旅行车]',
439
+ 437: 'beacon, lighthouse, beacon light, pharos [灯塔]',
440
+ 438: 'beaker [高脚杯]',
441
+ 439: 'bearskin, busby, shako [熊皮高帽]',
442
+ 440: 'beer bottle [啤酒瓶]',
443
+ 441: 'beer glass [啤酒杯]',
444
+ 442: 'bell cote, bell cot [钟塔]',
445
+ 443: 'bib [(小儿用的)围嘴]',
446
+ 444: 'bicycle-built-for-two, tandem bicycle, tandem [串联自行车,]',
447
+ 445: 'bikini, two-piece [比基尼]',
448
+ 446: 'binder, ring-binder [装订册]',
449
+ 447: 'binoculars, field glasses, opera glasses [双筒望远镜]',
450
+ 448: 'birdhouse [鸟舍]',
451
+ 449: 'boathouse [船库]',
452
+ 450: 'bobsled, bobsleigh, bob [雪橇]',
453
+ 451: 'bolo tie, bolo, bola tie, bola [饰扣式领带]',
454
+ 452: 'bonnet, poke bonnet [阔边女帽]',
455
+ 453: 'bookcase [书橱]',
456
+ 454: 'bookshop, bookstore, bookstall [书店,书摊]',
457
+ 455: 'bottlecap [瓶盖]',
458
+ 456: 'bow [弓箭]',
459
+ 457: 'bow tie, bow-tie, bowtie [蝴蝶结领结]',
460
+ 458: 'brass, memorial tablet, plaque [铜制牌位]',
461
+ 459: 'brassiere, bra, bandeau [奶罩]',
462
+ 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty [防波堤,海堤]',
463
+ 461: 'breastplate, aegis, egis [铠甲]',
464
+ 462: 'broom [扫帚]',
465
+ 463: 'bucket, pail [桶]',
466
+ 464: 'buckle [扣环]',
467
+ 465: 'bulletproof vest [防弹背心]',
468
+ 466: 'bullet train, bullet [动车,子弹头列车]',
469
+ 467: 'butcher shop, meat market [肉铺,肉菜市场]',
470
+ 468: 'cab, hack, taxi, taxicab [出租车]',
471
+ 469: 'caldron, cauldron [大锅]',
472
+ 470: 'candle, taper, wax light [蜡烛]',
473
+ 471: 'cannon [大炮]',
474
+ 472: 'canoe [独木舟]',
475
+ 473: 'can opener, tin opener [开瓶器,开罐器]',
476
+ 474: 'cardigan [开衫]',
477
+ 475: 'car mirror [车镜]',
478
+ 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig [旋转木马]',
479
+ 477: 'carpenters kit, tool kit [木匠的工具包,工具包]',
480
+ 478: 'carton [纸箱]',
481
+ 479: 'car wheel [车轮]',
482
+ 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM [取款机,自动取款机]',
483
+ 481: 'cassette [盒式录音带]',
484
+ 482: 'cassette player [卡带播放器]',
485
+ 483: 'castle [城堡]',
486
+ 484: 'catamaran [双体船]',
487
+ 485: 'CD player [CD播放器]',
488
+ 486: 'cello, violoncello [大提琴]',
489
+ 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone [移动电话,手机]',
490
+ 488: 'chain [铁链]',
491
+ 489: 'chainlink fence [围栏]',
492
+ 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour [链甲]',
493
+ 491: 'chain saw, chainsaw [电锯,油锯]',
494
+ 492: 'chest [箱子]',
495
+ 493: 'chiffonier, commode [衣柜,洗脸台]',
496
+ 494: 'chime, bell, gong [编钟,钟,锣]',
497
+ 495: 'china cabinet, china closet [中国橱柜]',
498
+ 496: 'Christmas stocking [圣诞袜]',
499
+ 497: 'church, church building [教堂,教堂建筑]',
500
+ 498: 'cinema, movie theater, movie theatre, movie house, picture palace [电影院,剧场]',
501
+ 499: 'cleaver, meat cleaver, chopper [切肉刀,菜刀]',
502
+ 500: 'cliff dwelling [悬崖屋]',
503
+ 501: 'cloak [斗篷]',
504
+ 502: 'clog, geta, patten, sabot [木屐,木鞋]',
505
+ 503: 'cocktail shaker [鸡尾酒调酒器]',
506
+ 504: 'coffee mug [咖啡杯]',
507
+ 505: 'coffeepot [咖啡壶]',
508
+ 506: 'coil, spiral, volute, whorl, helix [螺旋结构(楼梯)]',
509
+ 507: 'combination lock [组合锁]',
510
+ 508: 'computer keyboard, keypad [电脑键盘,键盘]',
511
+ 509: 'confectionery, confectionary, candy store [糖果,糖果店]',
512
+ 510: 'container ship, containership, container vessel [集装箱船]',
513
+ 511: 'convertible [敞篷车]',
514
+ 512: 'corkscrew, bottle screw [开瓶器,瓶螺杆]',
515
+ 513: 'cornet, horn, trumpet, trump [短号,喇叭]',
516
+ 514: 'cowboy boot [牛仔靴]',
517
+ 515: 'cowboy hat, ten-gallon hat [牛仔帽]',
518
+ 516: 'cradle [摇篮]',
519
+ 517: 'crane [起重机]',
520
+ 518: 'crash helmet [头盔]',
521
+ 519: 'crate [板条箱]',
522
+ 520: 'crib, cot [小儿床]',
523
+ 521: 'Crock Pot [砂锅]',
524
+ 522: 'croquet ball [槌球]',
525
+ 523: 'crutch [拐杖]',
526
+ 524: 'cuirass [胸甲]',
527
+ 525: 'dam, dike, dyke [大坝,堤防]',
528
+ 526: 'desk [书桌]',
529
+ 527: 'desktop computer [台式电脑]',
530
+ 528: 'dial telephone, dial phone [有线电话]',
531
+ 529: 'diaper, nappy, napkin [尿布湿]',
532
+ 530: 'digital clock [数字时钟]',
533
+ 531: 'digital watch [数字手表]',
534
+ 532: 'dining table, board [餐桌板]',
535
+ 533: 'dishrag, dishcloth [抹布]',
536
+ 534: 'dishwasher, dish washer, dishwashing machine [洗碗机,洗碟机]',
537
+ 535: 'disk brake, disc brake [盘式制动器]',
538
+ 536: 'dock, dockage, docking facility [码头,船坞,码头设施]',
539
+ 537: 'dogsled, dog sled, dog sleigh [狗拉雪橇]',
540
+ 538: 'dome [圆顶]',
541
+ 539: 'doormat, welcome mat [门垫,垫子]',
542
+ 540: 'drilling platform, offshore rig [钻井平台,海上钻井]',
543
+ 541: 'drum, membranophone, tympan [鼓,乐器,鼓膜]',
544
+ 542: 'drumstick [鼓槌]',
545
+ 543: 'dumbbell [哑铃]',
546
+ 544: 'Dutch oven [荷兰烤箱]',
547
+ 545: 'electric fan, blower [电风扇,鼓风机]',
548
+ 546: 'electric guitar [电吉他]',
549
+ 547: 'electric locomotive [电力机车]',
550
+ 548: 'entertainment center [电视,电视柜]',
551
+ 549: 'envelope [信封]',
552
+ 550: 'espresso maker [浓缩咖啡机]',
553
+ 551: 'face powder [扑面粉]',
554
+ 552: 'feather boa, boa [女用长围巾]',
555
+ 553: 'file, file cabinet, filing cabinet [文件,文件柜,档案柜]',
556
+ 554: 'fireboat [消防船]',
557
+ 555: 'fire engine, fire truck [消防车]',
558
+ 556: 'fire screen, fireguard [火炉栏]',
559
+ 557: 'flagpole, flagstaff [旗杆]',
560
+ 558: 'flute, transverse flute [长笛]',
561
+ 559: 'folding chair [折叠椅]',
562
+ 560: 'football helmet [橄榄球头盔]',
563
+ 561: 'forklift [叉车]',
564
+ 562: 'fountain [喷泉]',
565
+ 563: 'fountain pen [钢笔]',
566
+ 564: 'four-poster [有四根帷柱的床]',
567
+ 565: 'freight car [运货车厢]',
568
+ 566: 'French horn, horn [圆号,喇叭]',
569
+ 567: 'frying pan, frypan, skillet [煎锅]',
570
+ 568: 'fur coat [裘皮大衣]',
571
+ 569: 'garbage truck, dustcart [垃圾车]',
572
+ 570: 'gasmask, respirator, gas helmet [防毒面具,呼吸器]',
573
+ 571: 'gas pump, gasoline pump, petrol pump, island dispenser [汽油泵]',
574
+ 572: 'goblet [高脚杯]',
575
+ 573: 'go-kart [卡丁车]',
576
+ 574: 'golf ball [高尔夫球]',
577
+ 575: 'golfcart, golf cart [高尔夫球车]',
578
+ 576: 'gondola [狭长小船]',
579
+ 577: 'gong, tam-tam [锣]',
580
+ 578: 'gown [礼服]',
581
+ 579: 'grand piano, grand [钢琴]',
582
+ 580: 'greenhouse, nursery, glasshouse [温室,苗圃]',
583
+ 581: 'grille, radiator grille [散热器格栅]',
584
+ 582: 'grocery store, grocery, food market, market [杂货店,食品市场]',
585
+ 583: 'guillotine [断头台]',
586
+ 584: 'hair slide [小发夹]',
587
+ 585: 'hair spray [头发喷雾]',
588
+ 586: 'half track [半履带装甲车]',
589
+ 587: 'hammer [锤子]',
590
+ 588: 'hamper [大篮子]',
591
+ 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier [手摇鼓风机,吹风机]',
592
+ 590: 'hand-held computer, hand-held microcomputer [手提电脑]',
593
+ 591: 'handkerchief, hankie, hanky, hankey [手帕]',
594
+ 592: 'hard disc, hard disk, fixed disk [硬盘]',
595
+ 593: 'harmonica, mouth organ, harp, mouth harp [口琴,口风琴]',
596
+ 594: 'harp [竖琴]',
597
+ 595: 'harvester, reaper [收割机]',
598
+ 596: 'hatchet [斧头]',
599
+ 597: 'holster [手枪皮套]',
600
+ 598: 'home theater, home theatre [家庭影院]',
601
+ 599: 'honeycomb [蜂窝]',
602
+ 600: 'hook, claw [钩爪]',
603
+ 601: 'hoopskirt, crinoline [衬裙]',
604
+ 602: 'horizontal bar, high bar [单杠]',
605
+ 603: 'horse cart, horse-cart [马车]',
606
+ 604: 'hourglass [沙漏]',
607
+ 605: 'iPod [手机,iPad]',
608
+ 606: 'iron, smoothing iron [熨斗]',
609
+ 607: 'jack-o-lantern [南瓜灯笼]',
610
+ 608: 'jean, blue jean, denim [牛仔裤,蓝色牛仔裤]',
611
+ 609: 'jeep, landrover [吉普车]',
612
+ 610: 'jersey, T-shirt, tee shirt [运动衫,T恤]',
613
+ 611: 'jigsaw puzzle [拼图]',
614
+ 612: 'jinrikisha, ricksha, rickshaw [人力车]',
615
+ 613: 'joystick [操纵杆]',
616
+ 614: 'kimono [和服]',
617
+ 615: 'knee pad [护膝]',
618
+ 616: 'knot [蝴蝶结]',
619
+ 617: 'lab coat, laboratory coat [大褂,实验室外套]',
620
+ 618: 'ladle [长柄勺]',
621
+ 619: 'lampshade, lamp shade [灯罩]',
622
+ 620: 'laptop, laptop computer [笔记本电脑]',
623
+ 621: 'lawn mower, mower [割草机]',
624
+ 622: 'lens cap, lens cover [镜头盖]',
625
+ 623: 'letter opener, paper knife, paperknife [开信刀,裁纸刀]',
626
+ 624: 'library [图书馆]',
627
+ 625: 'lifeboat [救生艇]',
628
+ 626: 'lighter, light, igniter, ignitor [点火器,打火机]',
629
+ 627: 'limousine, limo [豪华轿车]',
630
+ 628: 'liner, ocean liner [远洋班轮]',
631
+ 629: 'lipstick, lip rouge [唇膏,口红]',
632
+ 630: 'Loafer [平底便鞋]',
633
+ 631: 'lotion [洗剂]',
634
+ 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system [扬声器]',
635
+ 633: 'loupe, jewelers loupe [放大镜]',
636
+ 634: 'lumbermill, sawmill [锯木厂]',
637
+ 635: 'magnetic compass [磁罗盘]',
638
+ 636: 'mailbag, postbag [邮袋]',
639
+ 637: 'mailbox, letter box [信箱]',
640
+ 638: 'maillot [女游泳衣]',
641
+ 639: 'maillot, tank suit [有肩带浴衣]',
642
+ 640: 'manhole cover [窨井盖]',
643
+ 641: 'maraca [沙球(一种打击乐器)]',
644
+ 642: 'marimba, xylophone [马林巴木琴]',
645
+ 643: 'mask [面膜]',
646
+ 644: 'matchstick [火柴]',
647
+ 645: 'maypole [花柱]',
648
+ 646: 'maze, labyrinth [迷宫]',
649
+ 647: 'measuring cup [量杯]',
650
+ 648: 'medicine chest, medicine cabinet [药箱]',
651
+ 649: 'megalith, megalithic structure [巨石,巨石结构]',
652
+ 650: 'microphone, mike [麦克风]',
653
+ 651: 'microwave, microwave oven [微波炉]',
654
+ 652: 'military uniform [军装]',
655
+ 653: 'milk can [奶桶]',
656
+ 654: 'minibus [迷你巴士]',
657
+ 655: 'miniskirt, mini [迷你裙]',
658
+ 656: 'minivan [面包车]',
659
+ 657: 'missile [导弹]',
660
+ 658: 'mitten [连指手套]',
661
+ 659: 'mixing bowl [搅拌钵]',
662
+ 660: 'mobile home, manufactured home [活动房屋(由汽车拖拉的)]',
663
+ 661: 'Model T [T型发动机小汽车]',
664
+ 662: 'modem [调制解调器]',
665
+ 663: 'monastery [修道院]',
666
+ 664: 'monitor [显示器]',
667
+ 665: 'moped [电瓶车]',
668
+ 666: 'mortar [砂浆]',
669
+ 667: 'mortarboard [学士]',
670
+ 668: 'mosque [清真寺]',
671
+ 669: 'mosquito net [蚊帐]',
672
+ 670: 'motor scooter, scooter [摩托车]',
673
+ 671: 'mountain bike, all-terrain bike, off-roader [山地自行车]',
674
+ 672: 'mountain tent [登山帐]',
675
+ 673: 'mouse, computer mouse [鼠标,电脑鼠标]',
676
+ 674: 'mousetrap [捕鼠器]',
677
+ 675: 'moving van [搬家车]',
678
+ 676: 'muzzle [口套]',
679
+ 677: 'nail [钉子]',
680
+ 678: 'neck brace [颈托]',
681
+ 679: 'necklace [项链]',
682
+ 680: 'nipple [乳头(瓶)]',
683
+ 681: 'notebook, notebook computer [笔记本,笔记本电脑]',
684
+ 682: 'obelisk [方尖碑]',
685
+ 683: 'oboe, hautboy, hautbois [双簧管]',
686
+ 684: 'ocarina, sweet potato [陶笛,卵形笛]',
687
+ 685: 'odometer, hodometer, mileometer, milometer [里程表]',
688
+ 686: 'oil filter [滤油器]',
689
+ 687: 'organ, pipe organ [风琴,管风琴]',
690
+ 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO [示波器]',
691
+ 689: 'overskirt [罩裙]',
692
+ 690: 'oxcart [牛车]',
693
+ 691: 'oxygen mask [氧气面罩]',
694
+ 692: 'packet [包装]',
695
+ 693: 'paddle, boat paddle [船桨]',
696
+ 694: 'paddlewheel, paddle wheel [明轮,桨轮]',
697
+ 695: 'padlock [挂锁,扣锁]',
698
+ 696: 'paintbrush [画笔]',
699
+ 697: 'pajama, pyjama, pjs, jammies [睡衣]',
700
+ 698: 'palace [宫殿]',
701
+ 699: 'panpipe, pandean pipe, syrinx [排箫,鸣管]',
702
+ 700: 'paper towel [纸巾]',
703
+ 701: 'parachute, chute [降落伞]',
704
+ 702: 'parallel bars, bars [双杠]',
705
+ 703: 'park bench [公园长椅]',
706
+ 704: 'parking meter [停车收费表,停车计时器]',
707
+ 705: 'passenger car, coach, carriage [客车,教练车]',
708
+ 706: 'patio, terrace [露台,阳台]',
709
+ 707: 'pay-phone, pay-station [付费电话]',
710
+ 708: 'pedestal, plinth, footstall [基座,基脚]',
711
+ 709: 'pencil box, pencil case [铅笔盒]',
712
+ 710: 'pencil sharpener [卷笔刀]',
713
+ 711: 'perfume, essence [香水(瓶)]',
714
+ 712: 'Petri dish [培养皿]',
715
+ 713: 'photocopier [复印机]',
716
+ 714: 'pick, plectrum, plectron [拨弦片,拨子]',
717
+ 715: 'pickelhaube [尖顶头盔]',
718
+ 716: 'picket fence, paling [栅栏,栅栏]',
719
+ 717: 'pickup, pickup truck [皮卡,皮卡车]',
720
+ 718: 'pier [桥墩]',
721
+ 719: 'piggy bank, penny bank [存钱罐]',
722
+ 720: 'pill bottle [药瓶]',
723
+ 721: 'pillow [枕头]',
724
+ 722: 'ping-pong ball [乒乓球]',
725
+ 723: 'pinwheel [风车]',
726
+ 724: 'pirate, pirate ship [海盗船]',
727
+ 725: 'pitcher, ewer [水罐]',
728
+ 726: 'plane, carpenters plane, woodworking plane [木工刨]',
729
+ 727: 'planetarium [天文馆]',
730
+ 728: 'plastic bag [塑料袋]',
731
+ 729: 'plate rack [板架]',
732
+ 730: 'plow, plough [犁型铲雪机]',
733
+ 731: 'plunger, plumbers helper [手压皮碗泵]',
734
+ 732: 'Polaroid camera, Polaroid Land camera [宝丽来相机]',
735
+ 733: 'pole [电线杆]',
736
+ 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria [警车,巡逻车]',
737
+ 735: 'poncho [雨披]',
738
+ 736: 'pool table, billiard table, snooker table [台球桌]',
739
+ 737: 'pop bottle, soda bottle [充气饮料瓶]',
740
+ 738: 'pot, flowerpot [花盆]',
741
+ 739: 'potters wheel [陶工旋盘]',
742
+ 740: 'power drill [电钻]',
743
+ 741: 'prayer rug, prayer mat [祈祷垫,地毯]',
744
+ 742: 'printer [打印机]',
745
+ 743: 'prison, prison house [监狱]',
746
+ 744: 'projectile, missile [炮弹,导弹]',
747
+ 745: 'projector [投影仪]',
748
+ 746: 'puck, hockey puck [冰球]',
749
+ 747: 'punching bag, punch bag, punching ball, punchball [沙包,吊球]',
750
+ 748: 'purse [钱包]',
751
+ 749: 'quill, quill pen [羽管笔]',
752
+ 750: 'quilt, comforter, comfort, puff [被子]',
753
+ 751: 'racer, race car, racing car [赛车]',
754
+ 752: 'racket, racquet [球拍]',
755
+ 753: 'radiator [散热器]',
756
+ 754: 'radio, wireless [收音机]',
757
+ 755: 'radio telescope, radio reflector [射电望远镜,无线电反射器]',
758
+ 756: 'rain barrel [雨桶]',
759
+ 757: 'recreational vehicle, RV, R.V. [休闲车,房车]',
760
+ 758: 'reel [卷轴,卷筒]',
761
+ 759: 'reflex camera [反射式照相机]',
762
+ 760: 'refrigerator, icebox [冰箱,冰柜]',
763
+ 761: 'remote control, remote [遥控器]',
764
+ 762: 'restaurant, eating house, eating place, eatery [餐厅,饮食店,食堂]',
765
+ 763: 'revolver, six-gun, six-shooter [左轮手枪]',
766
+ 764: 'rifle [步枪]',
767
+ 765: 'rocking chair, rocker [摇椅]',
768
+ 766: 'rotisserie [电转烤肉架]',
769
+ 767: 'rubber eraser, rubber, pencil eraser [橡皮]',
770
+ 768: 'rugby ball [橄榄球]',
771
+ 769: 'rule, ruler [直尺]',
772
+ 770: 'running shoe [跑步鞋]',
773
+ 771: 'safe [保险柜]',
774
+ 772: 'safety pin [安全别针]',
775
+ 773: 'saltshaker, salt shaker [盐瓶(调味用)]',
776
+ 774: 'sandal [凉鞋]',
777
+ 775: 'sarong [纱笼,围裙]',
778
+ 776: 'sax, saxophone [萨克斯管]',
779
+ 777: 'scabbard [剑鞘]',
780
+ 778: 'scale, weighing machine [秤,称重机]',
781
+ 779: 'school bus [校车]',
782
+ 780: 'schooner [帆船]',
783
+ 781: 'scoreboard [记分牌]',
784
+ 782: 'screen, CRT screen [屏幕]',
785
+ 783: 'screw [螺丝]',
786
+ 784: 'screwdriver [螺丝刀]',
787
+ 785: 'seat belt, seatbelt [安全带]',
788
+ 786: 'sewing machine [缝纫机]',
789
+ 787: 'shield, buckler [盾牌,盾牌]',
790
+ 788: 'shoe shop, shoe-shop, shoe store [皮鞋店,鞋店]',
791
+ 789: 'shoji [障子]',
792
+ 790: 'shopping basket [购物篮]',
793
+ 791: 'shopping cart [购物车]',
794
+ 792: 'shovel [铁锹]',
795
+ 793: 'shower cap [浴帽]',
796
+ 794: 'shower curtain [浴帘]',
797
+ 795: 'ski [滑雪板]',
798
+ 796: 'ski mask [滑雪面罩]',
799
+ 797: 'sleeping bag [睡袋]',
800
+ 798: 'slide rule, slipstick [滑尺]',
801
+ 799: 'sliding door [滑动门]',
802
+ 800: 'slot, one-armed bandit [角子老虎机]',
803
+ 801: 'snorkel [潜水通气管]',
804
+ 802: 'snowmobile [雪橇]',
805
+ 803: 'snowplow, snowplough [扫雪机,扫雪机]',
806
+ 804: 'soap dispenser [皂液器]',
807
+ 805: 'soccer ball [足球]',
808
+ 806: 'sock [袜子]',
809
+ 807: 'solar dish, solar collector, solar furnace [碟式太阳能,太阳能集热器,太阳能炉]',
810
+ 808: 'sombrero [宽边帽]',
811
+ 809: 'soup bowl [汤碗]',
812
+ 810: 'space bar [空格键]',
813
+ 811: 'space heater [空间加热器]',
814
+ 812: 'space shuttle [航天飞机]',
815
+ 813: 'spatula [铲(搅拌或涂敷用的)]',
816
+ 814: 'speedboat [快艇]',
817
+ 815: 'spider web, spiders web [蜘蛛网]',
818
+ 816: 'spindle [纺锤,纱锭]',
819
+ 817: 'sports car, sport car [跑车]',
820
+ 818: 'spotlight, spot [聚光灯]',
821
+ 819: 'stage [舞台]',
822
+ 820: 'steam locomotive [蒸汽机车]',
823
+ 821: 'steel arch bridge [钢拱桥]',
824
+ 822: 'steel drum [钢滚筒]',
825
+ 823: 'stethoscope [听诊器]',
826
+ 824: 'stole [女用披肩]',
827
+ 825: 'stone wall [石头墙]',
828
+ 826: 'stopwatch, stop watch [秒表]',
829
+ 827: 'stove [火炉]',
830
+ 828: 'strainer [过滤器]',
831
+ 829: 'streetcar, tram, tramcar, trolley, trolley car [有轨电车,电车]',
832
+ 830: 'stretcher [担架]',
833
+ 831: 'studio couch, day bed [沙发床]',
834
+ 832: 'stupa, tope [佛塔]',
835
+ 833: 'submarine, pigboat, sub, U-boat [潜艇,潜水艇]',
836
+ 834: 'suit, suit of clothes [套装,衣服]',
837
+ 835: 'sundial [日晷]',
838
+ 836: 'sunglass [太阳镜]',
839
+ 837: 'sunglasses, dark glasses, shades [太阳镜,墨镜]',
840
+ 838: 'sunscreen, sunblock, sun blocker [防晒霜,防晒剂]',
841
+ 839: 'suspension bridge [悬索桥]',
842
+ 840: 'swab, swob, mop [拖把]',
843
+ 841: 'sweatshirt [运动衫]',
844
+ 842: 'swimming trunks, bathing trunks [游泳裤]',
845
+ 843: 'swing [秋千]',
846
+ 844: 'switch, electric switch, electrical switch [开关,电器开关]',
847
+ 845: 'syringe [注射器]',
848
+ 846: 'table lamp [台灯]',
849
+ 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle [坦克,装甲战车,装甲战斗车辆]',
850
+ 848: 'tape player [磁带播放器]',
851
+ 849: 'teapot [茶壶]',
852
+ 850: 'teddy, teddy bear [泰迪,泰迪熊]',
853
+ 851: 'television, television system [电视]',
854
+ 852: 'tennis ball [网球]',
855
+ 853: 'thatch, thatched roof [茅草,茅草屋顶]',
856
+ 854: 'theater curtain, theatre curtain [幕布,剧院的帷幕]',
857
+ 855: 'thimble [顶针]',
858
+ 856: 'thresher, thrasher, threshing machine [脱粒机]',
859
+ 857: 'throne [宝座]',
860
+ 858: 'tile roof [瓦屋顶]',
861
+ 859: 'toaster [烤面包机]',
862
+ 860: 'tobacco shop, tobacconist shop, tobacconist [烟草店,烟草]',
863
+ 861: 'toilet seat [马桶]',
864
+ 862: 'torch [火炬]',
865
+ 863: 'totem pole [图腾柱]',
866
+ 864: 'tow truck, tow car, wrecker [拖车,牵引车,清障车]',
867
+ 865: 'toyshop [玩具店]',
868
+ 866: 'tractor [拖拉机]',
869
+ 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi [拖车,铰接式卡车]',
870
+ 868: 'tray [托盘]',
871
+ 869: 'trench coat [风衣]',
872
+ 870: 'tricycle, trike, velocipede [三轮车]',
873
+ 871: 'trimaran [三体船]',
874
+ 872: 'tripod [三脚架]',
875
+ 873: 'triumphal arch [凯旋门]',
876
+ 874: 'trolleybus, trolley coach, trackless trolley [无轨电车]',
877
+ 875: 'trombone [长号]',
878
+ 876: 'tub, vat [浴盆,浴缸]',
879
+ 877: 'turnstile [旋转式栅门]',
880
+ 878: 'typewriter keyboard [打字机键盘]',
881
+ 879: 'umbrella [伞]',
882
+ 880: 'unicycle, monocycle [独轮车]',
883
+ 881: 'upright, upright piano [直立式钢琴]',
884
+ 882: 'vacuum, vacuum cleaner [真空吸尘器]',
885
+ 883: 'vase [花瓶]',
886
+ 884: 'vault [拱顶]',
887
+ 885: 'velvet [天鹅绒]',
888
+ 886: 'vending machine [自动售货机]',
889
+ 887: 'vestment [祭服]',
890
+ 888: 'viaduct [高架桥]',
891
+ 889: 'violin, fiddle [小提琴,小提琴]',
892
+ 890: 'volleyball [排球]',
893
+ 891: 'waffle iron [松饼机]',
894
+ 892: 'wall clock [挂钟]',
895
+ 893: 'wallet, billfold, notecase, pocketbook [钱包,皮夹]',
896
+ 894: 'wardrobe, closet, press [衣柜,壁橱]',
897
+ 895: 'warplane, military plane [军用飞机]',
898
+ 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin [洗脸盆,洗手盆]',
899
+ 897: 'washer, automatic washer, washing machine [洗衣机,自动洗衣机]',
900
+ 898: 'water bottle [水瓶]',
901
+ 899: 'water jug [水壶]',
902
+ 900: 'water tower [水塔]',
903
+ 901: 'whiskey jug [威士忌壶]',
904
+ 902: 'whistle [哨子]',
905
+ 903: 'wig [假发]',
906
+ 904: 'window screen [纱窗]',
907
+ 905: 'window shade [百叶窗]',
908
+ 906: 'Windsor tie [温莎领带]',
909
+ 907: 'wine bottle [葡萄酒瓶]',
910
+ 908: 'wing [飞机翅膀,飞机]',
911
+ 909: 'wok [炒菜锅]',
912
+ 910: 'wooden spoon [木制的勺子]',
913
+ 911: 'wool, woolen, woollen [毛织品,羊绒]',
914
+ 912: 'worm fence, snake fence, snake-rail fence, Virginia fence [栅栏,围栏]',
915
+ 913: 'wreck [沉船]',
916
+ 914: 'yawl [双桅船]',
917
+ 915: 'yurt [蒙古包]',
918
+ 916: 'web site, website, internet site, site [网站,互联网网站]',
919
+ 917: 'comic book [漫画]',
920
+ 918: 'crossword puzzle, crossword [纵横字谜]',
921
+ 919: 'street sign [路标]',
922
+ 920: 'traffic light, traffic signal, stoplight [交通信号灯]',
923
+ 921: 'book jacket, dust cover, dust jacket, dust wrapper [防尘罩,书皮]',
924
+ 922: 'menu [菜单]',
925
+ 923: 'plate [盘子]',
926
+ 924: 'guacamole [鳄梨酱]',
927
+ 925: 'consomme [清汤]',
928
+ 926: 'hot pot, hotpot [罐焖土豆烧肉]',
929
+ 927: 'trifle [蛋糕]',
930
+ 928: 'ice cream, icecream [冰淇淋]',
931
+ 929: 'ice lolly, lolly, lollipop, popsicle [雪糕,冰棍,冰棒]',
932
+ 930: 'French loaf [法式面包]',
933
+ 931: 'bagel, beigel [百吉饼]',
934
+ 932: 'pretzel [椒盐脆饼]',
935
+ 933: 'cheeseburger [芝士汉堡]',
936
+ 934: 'hotdog, hot dog, red hot [热狗]',
937
+ 935: 'mashed potato [土豆泥]',
938
+ 936: 'head cabbage [结球甘蓝]',
939
+ 937: 'broccoli [西兰花]',
940
+ 938: 'cauliflower [菜花]',
941
+ 939: 'zucchini, courgette [绿皮密生西葫芦]',
942
+ 940: 'spaghetti squash [西葫芦]',
943
+ 941: 'acorn squash [小青南瓜]',
944
+ 942: 'butternut squash [南瓜]',
945
+ 943: 'cucumber, cuke [黄瓜]',
946
+ 944: 'artichoke, globe artichoke [朝鲜蓟]',
947
+ 945: 'bell pepper [甜椒]',
948
+ 946: 'cardoon [刺棘蓟]',
949
+ 947: 'mushroom [蘑菇]',
950
+ 948: 'Granny Smith [绿苹果]',
951
+ 949: 'strawberry [草莓]',
952
+ 950: 'orange [橘子]',
953
+ 951: 'lemon [柠檬]',
954
+ 952: 'fig [无花果]',
955
+ 953: 'pineapple, ananas [菠萝]',
956
+ 954: 'banana [香蕉]',
957
+ 955: 'jackfruit, jak, jack [菠萝蜜]',
958
+ 956: 'custard apple [蛋奶冻苹果]',
959
+ 957: 'pomegranate [石榴]',
960
+ 958: 'hay [干草]',
961
+ 959: 'carbonara [烤面条加干酪沙司]',
962
+ 960: 'chocolate sauce, chocolate syrup [巧克力酱,巧克力糖浆]',
963
+ 961: 'dough [面团]',
964
+ 962: 'meat loaf, meatloaf [瑞士肉包,肉饼]',
965
+ 963: 'pizza, pizza pie [披萨,披萨饼]',
966
+ 964: 'potpie [馅饼]',
967
+ 965: 'burrito [卷饼]',
968
+ 966: 'red wine [红葡萄酒]',
969
+ 967: 'espresso [意大利浓咖啡]',
970
+ 968: 'cup [杯子]',
971
+ 969: 'eggnog [蛋酒]',
972
+ 970: 'alp [高山]',
973
+ 971: 'bubble [泡泡]',
974
+ 972: 'cliff, drop, drop-off [悬崖]',
975
+ 973: 'coral reef [珊瑚礁]',
976
+ 974: 'geyser [间歇泉]',
977
+ 975: 'lakeside, lakeshore [湖边,湖岸]',
978
+ 976: 'promontory, headland, head, foreland [海角]',
979
+ 977: 'sandbar, sand bar [沙洲,沙坝]',
980
+ 978: 'seashore, coast, seacoast, sea-coast [海滨,海岸]',
981
+ 979: 'valley, vale [峡谷]',
982
+ 980: 'volcano [火山]',
983
+ 981: 'ballplayer, baseball player [棒球,棒球运动员]',
984
+ 982: 'groom, bridegroom [新郎]',
985
+ 983: 'scuba diver [潜水员]',
986
+ 984: 'rapeseed [油菜]',
987
+ 985: 'daisy [雏菊]',
988
+ 986: 'yellow ladys slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum [杓兰]',
989
+ 987: 'corn [玉米]',
990
+ 988: 'acorn [橡子]',
991
+ 989: 'hip, rose hip, rosehip [玫瑰果]',
992
+ 990: 'buckeye, horse chestnut, conker [七叶树果实]',
993
+ 991: 'coral fungus [珊瑚菌]',
994
+ 992: 'agaric [木耳]',
995
+ 993: 'gyromitra [鹿花菌]',
996
+ 994: 'stinkhorn, carrion fungus [鬼笔菌]',
997
+ 995: 'earthstar [地星(菌类)]',
998
+ 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa [多叶奇果菌]',
999
+ 997: 'bolete [牛肝菌]',
1000
+ 998: 'ear, spike, capitulum [玉米穗]',
1001
+ 999: 'toilet tissue, toilet paper, bathroom tissue [卫生纸]',
1002
+ }
tools/push_gpt_to_hf.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/sample_ddp.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ import argparse
7
+
8
+ from tokenizer.tokenizer_image.vq_model import VQ_models
9
+ from autoregressive.models.gpt_hf import GPT_models_HF, TransformerHF
10
+
11
+ device = "cuda" if torch.cuda_is_available() else "cpu"
12
+
13
+ def main(args):
14
+ # Setup PyTorch:
15
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
16
+ torch.set_grad_enabled(False)
17
+
18
+ # create and load gpt model
19
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
20
+ latent_size = args.image_size // args.downsample_size
21
+ gpt_model = GPT_models_HF[args.gpt_model](
22
+ vocab_size=args.codebook_size,
23
+ block_size=latent_size ** 2,
24
+ num_classes=args.num_classes,
25
+ cls_token_num=args.cls_token_num,
26
+ model_type=args.gpt_type,
27
+ ).to(device=device, dtype=precision)
28
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
29
+ if args.from_fsdp: # fsdp
30
+ model_weight = checkpoint
31
+ elif "model" in checkpoint: # ddp
32
+ model_weight = checkpoint["model"]
33
+ elif "module" in checkpoint: # deepspeed
34
+ model_weight = checkpoint["module"]
35
+ elif "state_dict" in checkpoint:
36
+ model_weight = checkpoint["state_dict"]
37
+ else:
38
+ raise Exception("please check model weight, maybe add --from-fsdp to run command")
39
+
40
+ # load weights
41
+ gpt_model.load_state_dict(model_weight, strict=False)
42
+ gpt_model.eval()
43
+ del checkpoint
44
+
45
+ # push to hub
46
+ repo_id = f"FoundationVision/{args.gpt_model}-{args.image_size}"
47
+ gpt_model.push_to_hub(repo_id)
48
+
49
+ # reload
50
+ model = TransformerHF.from_pretrained(repo_id)
51
+
52
+
53
+ if __name__ == "__main__":
54
+ parser = argparse.ArgumentParser()
55
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
56
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
57
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
58
+ parser.add_argument("--from-fsdp", action='store_true')
59
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
60
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
61
+ parser.add_argument("--compile", action='store_true', default=True)
62
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
63
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
64
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
65
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
66
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384)
67
+ parser.add_argument("--image-size-eval", type=int, choices=[256, 384, 512], default=256)
68
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
69
+ parser.add_argument("--num-classes", type=int, default=1000)
70
+ args = parser.parse_args()
71
+ main(args)
utils/data.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image
3
+
4
+ def center_crop_arr(pil_image, image_size):
5
+ """
6
+ Center cropping implementation from ADM.
7
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
8
+ """
9
+ while min(*pil_image.size) >= 2 * image_size:
10
+ pil_image = pil_image.resize(
11
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
12
+ )
13
+
14
+ scale = image_size / min(*pil_image.size)
15
+ pil_image = pil_image.resize(
16
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
17
+ )
18
+
19
+ arr = np.array(pil_image)
20
+ crop_y = (arr.shape[0] - image_size) // 2
21
+ crop_x = (arr.shape[1] - image_size) // 2
22
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])