slz1 commited on
Commit
26aa799
·
verified ·
1 Parent(s): 0f586c0

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. condition/example/c2i/depth/4351.npy +3 -0
  2. condition/example/c2i/depth/4351.png +3 -0
  3. dataset/__pycache__/augmentation.cpython-310.pyc +0 -0
  4. dataset/__pycache__/augmentation.cpython-38.pyc +0 -0
  5. dataset/__pycache__/build.cpython-310.pyc +0 -0
  6. dataset/__pycache__/build.cpython-38.pyc +0 -0
  7. dataset/__pycache__/coco.cpython-310.pyc +0 -0
  8. dataset/__pycache__/imagenet.cpython-310.pyc +0 -0
  9. dataset/__pycache__/imagenet.cpython-38.pyc +0 -0
  10. dataset/__pycache__/openimage.cpython-310.pyc +0 -0
  11. dataset/__pycache__/pexels.cpython-310.pyc +0 -0
  12. dataset/__pycache__/t2i.cpython-310.pyc +0 -0
  13. dataset/__pycache__/t2i_control.cpython-310.pyc +0 -0
  14. dataset/__pycache__/utils.cpython-310.pyc +0 -0
  15. dataset/augmentation.py +51 -0
  16. dataset/build.py +29 -0
  17. dataset/coco.py +27 -0
  18. dataset/imagenet.py +120 -0
  19. dataset/make_jsonl.py +73 -0
  20. dataset/openimage.py +42 -0
  21. dataset/pexels.py +4 -0
  22. dataset/t2i.py +167 -0
  23. dataset/t2i_control.py +199 -0
  24. dataset/test_dataset_t2i.py +47 -0
  25. dataset/test_t5_npz.py +41 -0
  26. dataset/utils.py +325 -0
  27. demo/app.py +34 -0
  28. demo/app_depth.py +135 -0
  29. demo/app_edge.py +150 -0
  30. demo/model.py +284 -0
  31. evaluations/ade20k_mIoU.py +68 -0
  32. evaluations/c2i/README.md +96 -0
  33. evaluations/c2i/evaluator.py +665 -0
  34. evaluations/canny_f1score.py +62 -0
  35. evaluations/clean_fid.py +15 -0
  36. evaluations/cocostuff_mIoU.py +72 -0
  37. evaluations/depth_rmse.py +62 -0
  38. evaluations/hed_ssim.py +52 -0
  39. evaluations/lineart_ssim.py +54 -0
  40. evaluations/t2i/PartiPrompts.tsv +0 -0
  41. evaluations/t2i/README.md +21 -0
  42. evaluations/t2i/coco_captions.csv +0 -0
  43. evaluations/t2i/evaluation.py +260 -0
  44. language/README.md +14 -0
  45. language/__init__.py +0 -0
  46. language/__pycache__/extract_t5_feature.cpython-310.pyc +0 -0
  47. language/__pycache__/t5.cpython-310.pyc +0 -0
  48. language/extract_t5_feature.py +129 -0
  49. language/t5.py +201 -0
  50. scripts/autoregressive/extract_codes_c2i.sh +7 -0
condition/example/c2i/depth/4351.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1041de49497a4864fd6325af14e7917d93d9676c2df592ef504bd5140fea9e4
3
+ size 136
condition/example/c2i/depth/4351.png ADDED

Git LFS Details

  • SHA256: 7b487f2856fa2248fa4dca856a2d7161a0f7880f743c451def34bd81a9da92d2
  • Pointer size: 130 Bytes
  • Size of remote file: 14.5 kB
dataset/__pycache__/augmentation.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
dataset/__pycache__/augmentation.cpython-38.pyc ADDED
Binary file (1.99 kB). View file
 
dataset/__pycache__/build.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
dataset/__pycache__/build.cpython-38.pyc ADDED
Binary file (983 Bytes). View file
 
dataset/__pycache__/coco.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
dataset/__pycache__/imagenet.cpython-310.pyc ADDED
Binary file (3.55 kB). View file
 
dataset/__pycache__/imagenet.cpython-38.pyc ADDED
Binary file (3.61 kB). View file
 
dataset/__pycache__/openimage.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
dataset/__pycache__/pexels.cpython-310.pyc ADDED
Binary file (363 Bytes). View file
 
dataset/__pycache__/t2i.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
dataset/__pycache__/t2i_control.cpython-310.pyc ADDED
Binary file (6.66 kB). View file
 
dataset/__pycache__/utils.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
dataset/augmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py
2
+ import math
3
+ import random
4
+ import numpy as np
5
+ from PIL import Image
6
+
7
+
8
+ def center_crop_arr(pil_image, image_size):
9
+ """
10
+ Center cropping implementation from ADM.
11
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
12
+ """
13
+ while min(*pil_image.size) >= 2 * image_size:
14
+ pil_image = pil_image.resize(
15
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
16
+ )
17
+
18
+ scale = image_size / min(*pil_image.size)
19
+ pil_image = pil_image.resize(
20
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
21
+ )
22
+
23
+ arr = np.array(pil_image)
24
+ crop_y = (arr.shape[0] - image_size) // 2
25
+ crop_x = (arr.shape[1] - image_size) // 2
26
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
27
+
28
+
29
+ def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
30
+ min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
31
+ max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
32
+ smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
33
+
34
+ # We are not on a new enough PIL to support the `reducing_gap`
35
+ # argument, which uses BOX downsampling at powers of two first.
36
+ # Thus, we do it by hand to improve downsample quality.
37
+ while min(*pil_image.size) >= 2 * smaller_dim_size:
38
+ pil_image = pil_image.resize(
39
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
40
+ )
41
+
42
+ scale = smaller_dim_size / min(*pil_image.size)
43
+ pil_image = pil_image.resize(
44
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
45
+ )
46
+
47
+ arr = np.array(pil_image)
48
+ crop_y = random.randrange(arr.shape[0] - image_size + 1)
49
+ crop_x = random.randrange(arr.shape[1] - image_size + 1)
50
+ return Image.fromarray(arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size])
51
+
dataset/build.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataset.imagenet import build_imagenet, build_imagenet_code
2
+ from dataset.coco import build_coco
3
+ from dataset.openimage import build_openimage
4
+ from dataset.pexels import build_pexels
5
+ from dataset.t2i import build_t2i, build_t2i_code, build_t2i_image
6
+ # from dataset.t2i_control import build_t2i_control
7
+
8
+ def build_dataset(args, **kwargs):
9
+ # images
10
+ if args.dataset == 'imagenet':
11
+ return build_imagenet(args, **kwargs)
12
+ if args.dataset == 'imagenet_code':
13
+ return build_imagenet_code(args, **kwargs)
14
+ if args.dataset == 'coco':
15
+ return build_coco(args, **kwargs)
16
+ if args.dataset == 'openimage':
17
+ return build_openimage(args, **kwargs)
18
+ if args.dataset == 'pexels':
19
+ return build_pexels(args, **kwargs)
20
+ if args.dataset == 't2i_image':
21
+ return build_t2i_image(args, **kwargs)
22
+ if args.dataset == 't2i':
23
+ return build_t2i(args, **kwargs)
24
+ if args.dataset == 't2i_code':
25
+ return build_t2i_code(args, **kwargs)
26
+ # if args.dataset == 't2i_control':
27
+ # return build_t2i_control(args, **kwargs)
28
+
29
+ raise ValueError(f'dataset {args.dataset} is not supported')
dataset/coco.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from torch.utils.data import Dataset
4
+ from PIL import Image
5
+
6
+
7
+ class SingleFolderDataset(Dataset):
8
+ def __init__(self, directory, transform=None):
9
+ super().__init__()
10
+ self.directory = directory
11
+ self.transform = transform
12
+ self.image_paths = [os.path.join(directory, file_name) for file_name in os.listdir(directory)
13
+ if os.path.isfile(os.path.join(directory, file_name))]
14
+
15
+ def __len__(self):
16
+ return len(self.image_paths)
17
+
18
+ def __getitem__(self, idx):
19
+ image_path = self.image_paths[idx]
20
+ image = Image.open(image_path).convert('RGB')
21
+ if self.transform:
22
+ image = self.transform(image)
23
+ return image, torch.tensor(0)
24
+
25
+
26
+ def build_coco(args, transform):
27
+ return SingleFolderDataset(args.data_path, transform=transform)
dataset/imagenet.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import os
4
+ from torch.utils.data import Dataset
5
+ from torchvision.datasets import ImageFolder
6
+ import cv2
7
+ from datasets import load_dataset
8
+
9
+ class CustomDataset(Dataset):
10
+ def __init__(self, feature_dir, label_dir, condition_dir=None, get_condition_img=False):
11
+ self.feature_dir = feature_dir
12
+ self.label_dir = label_dir
13
+ self.flip = 'flip' in self.feature_dir
14
+ self.get_condition_img = get_condition_img
15
+
16
+ aug_feature_dir = feature_dir.replace('ten_crop/', 'ten_crop_105/')
17
+ aug_label_dir = label_dir.replace('ten_crop/', 'ten_crop_105/')
18
+ if os.path.exists(aug_feature_dir) and os.path.exists(aug_label_dir):
19
+ self.aug_feature_dir = aug_feature_dir
20
+ self.aug_label_dir = aug_label_dir
21
+ else:
22
+ self.aug_feature_dir = None
23
+ self.aug_label_dir = None
24
+
25
+ if condition_dir is not None:
26
+ self.condition_dir = condition_dir
27
+ self.aug_condition_dir = condition_dir.replace('ten_crop/', 'ten_crop_105/')
28
+ if os.path.exists(self.aug_condition_dir):
29
+ self.aug_condition_dir = self.aug_condition_dir
30
+ else:
31
+ self.aug_condition_dir = None
32
+ else:
33
+ self.condition_dir = None
34
+
35
+ # file_num = min(129398,len(os.listdir(feature_dir)))
36
+ file_num = len(os.listdir(feature_dir))
37
+ # file_num = 1000
38
+ self.feature_files = [f"{i}.npy" for i in range(file_num)]
39
+ self.label_files = [f"{i}.npy" for i in range(file_num)]
40
+ self.condition_files = [f"{i}.npy" for i in range(file_num)]
41
+ # self.feature_files = sorted(os.listdir(feature_dir))
42
+ # self.label_files = sorted(os.listdir(label_dir))
43
+ # TODO: make it configurable
44
+ # self.feature_files = [f"{i}.npy" for i in range(1281167)]
45
+ # self.label_files = [f"{i}.npy" for i in range(1281167)]
46
+
47
+ def __len__(self):
48
+ assert len(self.feature_files) == len(self.label_files), \
49
+ "Number of feature files and label files should be same"
50
+ return len(self.feature_files)
51
+
52
+ def __getitem__(self, idx):
53
+ if self.aug_feature_dir is not None and torch.rand(1) < 0.5:
54
+ feature_dir = self.aug_feature_dir
55
+ label_dir = self.aug_label_dir
56
+ else:
57
+ feature_dir = self.feature_dir
58
+ label_dir = self.label_dir
59
+ if self.condition_dir is not None:
60
+ condition_dir = self.condition_dir
61
+
62
+ feature_file = self.feature_files[idx]
63
+ label_file = self.label_files[idx]
64
+ if self.condition_dir is not None:
65
+ condition_file = self.condition_files[idx]
66
+ # condition_code = np.load(os.path.join(condition_dir, condition_file))
67
+ condition_imgs = np.load(os.path.join(os.path.dirname(condition_dir), os.path.basename(condition_dir).replace('codes', 'imagesnpy'), condition_file))/255
68
+ condition_imgs = 2*(condition_imgs-0.5)
69
+ if self.get_condition_img:
70
+ # print(os.path.join(os.path.dirname(condition_dir), os.path.basename(condition_dir).replace('codes', 'images'), condition_file.replace('npy', 'png')))
71
+ condition_img = cv2.imread(os.path.join(os.path.dirname(condition_dir), os.path.basename(condition_dir).replace('codes', 'images'), condition_file.replace('npy', 'png')))/255
72
+ condition_img = 2*(condition_img-0.5)
73
+ #condition = condition[None,...].repeat(3, axis=2)
74
+
75
+ features = np.load(os.path.join(feature_dir, feature_file))
76
+ if self.flip:
77
+ aug_idx = torch.randint(low=0, high=features.shape[1], size=(1,)).item()
78
+ if self.get_condition_img:
79
+ aug_idx = 0
80
+ features = features[:, aug_idx]
81
+ if self.condition_dir is not None:
82
+ # condition_code = condition_code[:, aug_idx]
83
+ condition_imgs = condition_imgs[aug_idx]
84
+
85
+ labels = np.load(os.path.join(label_dir, label_file))
86
+ # if self.condition_dir is not None:
87
+ # if self.get_condition_img:
88
+ # return torch.from_numpy(condition_img.transpose(2,0,1)).to(torch.float32), torch.from_numpy(condition) # (1, 256), (1,1)
89
+ # else:
90
+ # return torch.from_numpy(features), torch.from_numpy(labels), torch.from_numpy(condition) # (1, 256), (1,1)
91
+ # else:
92
+ # return torch.from_numpy(features), torch.from_numpy(labels)
93
+ outputs = {}
94
+ outputs['img_code'] = torch.from_numpy(features)
95
+ outputs['labels'] = torch.from_numpy(labels)
96
+ if self.condition_dir is not None:
97
+ # outputs['condition_code'] = torch.from_numpy(condition_code)
98
+ outputs['condition_imgs'] = torch.from_numpy(condition_imgs)
99
+ if self.get_condition_img:
100
+ outputs['condition_img'] = torch.from_numpy(condition_img.transpose(2,0,1))
101
+ return outputs
102
+
103
+
104
+ def build_imagenet(args, transform):
105
+ return ImageFolder(args.data_path, transform=transform)
106
+
107
+ def build_imagenet_code(args):
108
+ feature_dir = f"{args.code_path}/imagenet{args.image_size}_codes"
109
+ label_dir = f"{args.code_path}/imagenet{args.image_size}_labels"
110
+ if args.condition_type == 'canny':
111
+ condition_dir = f"{args.code_path}/imagenet{args.image_size}_canny_codes"
112
+ elif args.condition_type == 'hed':
113
+ condition_dir = f"{args.code_path}/imagenet{args.image_size}_hed_codes"
114
+ elif args.condition_type == 'depth':
115
+ condition_dir = f"{args.code_path}/imagenet{args.image_size}_depth_codes"
116
+ elif args.condition_type == 'none':
117
+ condition_dir = None
118
+ assert os.path.exists(feature_dir) and os.path.exists(label_dir), \
119
+ f"please first run: bash scripts/autoregressive/extract_codes_c2i.sh ..."
120
+ return CustomDataset(feature_dir, label_dir, condition_dir, args.get_condition_img)
dataset/make_jsonl.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import os
2
+ # import json
3
+
4
+ # image_dir = "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/image"
5
+ # jsonl_path = "/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train/train.jsonl"
6
+
7
+ # with open(jsonl_path, "w") as f:
8
+ # for filename in sorted(os.listdir(image_dir)):
9
+ # if filename.endswith(".png"):
10
+ # img_path = os.path.join(image_dir, filename)
11
+ # entry = {
12
+ # "image_path": img_path
13
+ # }
14
+ # f.write(json.dumps(entry) + "\n")
15
+ import os
16
+ import json
17
+ from PIL import Image
18
+ from tqdm import tqdm
19
+
20
+ def generate_jsonl_from_folder(
21
+ image_dir,
22
+ output_jsonl_path,
23
+ code_dir_name='code',
24
+ caption_emb_dir_name='caption_emb',
25
+ control_dir_name='control',
26
+ label_dir_name='label'
27
+ ):
28
+ """
29
+ 自动从 image 文件夹构建 .jsonl 文件,记录 image_path 和 code_name
30
+ 适配 Text2ImgDataset 类的数据读取格式
31
+ """
32
+ image_files = sorted([f for f in os.listdir(image_dir) if f.endswith('.png')])
33
+ parent_dir = os.path.basename(os.path.normpath(image_dir)) # 通常为 'train'
34
+
35
+ with open(output_jsonl_path, 'w') as f:
36
+ for file in tqdm(image_files, desc="Generating .jsonl"):
37
+ image_path = os.path.join(image_dir, file)
38
+ code_name = os.path.splitext(file)[0] # 文件名去掉扩展名
39
+
40
+ # 检查其他文件是否都存在
41
+ code_path = os.path.join(os.path.dirname(image_dir), code_dir_name, f"{code_name}.npy")
42
+ caption_emb_path = os.path.join(os.path.dirname(image_dir), caption_emb_dir_name, f"{code_name}.npz")
43
+ control_path = os.path.join(os.path.dirname(image_dir), control_dir_name, f"{code_name}.png")
44
+ label_path = os.path.join(os.path.dirname(image_dir), label_dir_name, f"{code_name}.png")
45
+
46
+ if not (os.path.exists(code_path) and os.path.exists(caption_emb_path)
47
+ and os.path.exists(control_path) and os.path.exists(label_path)):
48
+ print(f"⚠️ 缺失对应文件: {code_name}")
49
+ continue
50
+
51
+ data = {
52
+ "image_path": image_path,
53
+ "code_name": int(code_name) # 保证仍然为数字编号
54
+ }
55
+ f.write(json.dumps(data) + '\n')
56
+
57
+ print(f"✅ 成功生成: {output_jsonl_path}")
58
+
59
+
60
+ if __name__ == '__main__':
61
+ # 示例路径(按需修改)
62
+ root_dir = '/media2/user/data/wxy/ControlAR_old/data/Captioned_ADE20K/train'
63
+ image_dir = os.path.join(root_dir, 'image')
64
+ output_jsonl = os.path.join(root_dir, 'train_to_use.jsonl')
65
+
66
+ generate_jsonl_from_folder(
67
+ image_dir=image_dir,
68
+ output_jsonl_path=output_jsonl,
69
+ code_dir_name='code',
70
+ caption_emb_dir_name='caption_emb',
71
+ control_dir_name='control',
72
+ label_dir_name='label'
73
+ )
dataset/openimage.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import numpy as np
4
+ from PIL import Image
5
+
6
+ import torch
7
+ from torch.utils.data import Dataset
8
+
9
+
10
+ class DatasetJson(Dataset):
11
+ def __init__(self, data_path, transform=None):
12
+ super().__init__()
13
+ self.data_path = data_path
14
+ self.transform = transform
15
+ json_path = os.path.join(data_path, 'image_paths.json')
16
+ assert os.path.exists(json_path), f"please first run: python3 tools/openimage_json.py"
17
+ with open(json_path, 'r') as f:
18
+ self.image_paths = json.load(f)
19
+
20
+ def __len__(self):
21
+ return len(self.image_paths)
22
+
23
+ def __getitem__(self, idx):
24
+ for _ in range(20):
25
+ try:
26
+ return self.getdata(idx)
27
+ except Exception as e:
28
+ print(f"Error details: {str(e)}")
29
+ idx = np.random.randint(len(self))
30
+ raise RuntimeError('Too many bad data.')
31
+
32
+ def getdata(self, idx):
33
+ image_path = self.image_paths[idx]
34
+ image_path_full = os.path.join(self.data_path, image_path)
35
+ image = Image.open(image_path_full).convert('RGB')
36
+ if self.transform:
37
+ image = self.transform(image)
38
+ return image, torch.tensor(0)
39
+
40
+
41
+ def build_openimage(args, transform):
42
+ return DatasetJson(args.data_path, transform=transform)
dataset/pexels.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from torchvision.datasets import ImageFolder
2
+
3
+ def build_pexels(args, transform):
4
+ return ImageFolder(args.data_path, transform=transform)
dataset/t2i.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import numpy as np
4
+
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+ from PIL import Image
8
+
9
+ # 这个类只是获取image,使用ImageNet数据集,用于C2I任务
10
+ class Text2ImgDatasetImg(Dataset):
11
+ def __init__(self, lst_dir, face_lst_dir, transform):
12
+ img_path_list = []
13
+ valid_file_path = []
14
+ # 遍历数据路径,找到所有 jsonl 文件
15
+ for lst_name in sorted(os.listdir(lst_dir)):
16
+ if not lst_name.endswith('.jsonl'):
17
+ continue
18
+ file_path = os.path.join(lst_dir, lst_name)
19
+ valid_file_path.append(file_path)
20
+
21
+ # collect valid jsonl for face
22
+ if face_lst_dir is not None:
23
+ for lst_name in sorted(os.listdir(face_lst_dir)):
24
+ if not lst_name.endswith('_face.jsonl'):
25
+ continue
26
+ file_path = os.path.join(face_lst_dir, lst_name)
27
+ valid_file_path.append(file_path)
28
+ #读取 jsonl,获取所有图像路径
29
+ for file_path in valid_file_path:
30
+ with open(file_path, 'r') as file:
31
+ for line_idx, line in enumerate(file):
32
+ data = json.loads(line)
33
+ img_path = data['image_path']
34
+ code_dir = file_path.split('/')[-1].split('.')[0] #code_dir 记录文件夹名称,方便匹配 T5 特征文件
35
+ img_path_list.append((img_path, code_dir, line_idx))
36
+ self.img_path_list = img_path_list
37
+ self.transform = transform
38
+
39
+ def __len__(self):
40
+ return len(self.img_path_list)
41
+ # 预处理图像,self.transform 进行 归一化、调整大小(需要传入 transform 参数)
42
+ def __getitem__(self, index):
43
+ img_path, code_dir, code_name = self.img_path_list[index]
44
+ img = Image.open(img_path).convert("RGB")
45
+ if self.transform is not None:
46
+ img = self.transform(img)
47
+ return img, code_name
48
+
49
+
50
+ # 这个类是获取图像张量 [3, H, W] 文本特征 [1, 120, 2048],使用ADE20k数据集,用于T2I任务
51
+ class Text2ImgDataset(Dataset):
52
+ def __init__(self, args, transform):
53
+ img_path_list = []
54
+ valid_file_path = []
55
+ # collect valid jsonl file path
56
+ for lst_name in sorted(os.listdir(args.data_path)):
57
+ if not lst_name.endswith('.jsonl'):
58
+ continue
59
+ file_path = os.path.join(args.data_path, lst_name)
60
+ valid_file_path.append(file_path)
61
+
62
+ for file_path in valid_file_path:
63
+ with open(file_path, 'r') as file:
64
+ for line_idx, line in enumerate(file):
65
+ data = json.loads(line)
66
+ img_path = data['image_path']
67
+ code_dir = file_path.split('/')[-1].split('.')[0]
68
+ img_path_list.append((img_path, code_dir, line_idx))
69
+ self.img_path_list = img_path_list
70
+ self.transform = transform
71
+
72
+ self.t5_feat_path = args.t5_feat_path
73
+ self.short_t5_feat_path = args.short_t5_feat_path
74
+ self.t5_feat_path_base = self.t5_feat_path.split('/')[-1]
75
+ if self.short_t5_feat_path is not None:
76
+ self.short_t5_feat_path_base = self.short_t5_feat_path.split('/')[-1]
77
+ else:
78
+ self.short_t5_feat_path_base = self.t5_feat_path_base
79
+ self.image_size = args.image_size
80
+ latent_size = args.image_size // args.downsample_size
81
+ self.code_len = latent_size ** 2
82
+ self.t5_feature_max_len = 120
83
+ self.t5_feature_dim = 2048
84
+ self.max_seq_length = self.t5_feature_max_len + self.code_len
85
+
86
+ def __len__(self):
87
+ return len(self.img_path_list)
88
+
89
+ # 在数据加载失败时返回“占位数据”,防止 DataLoader 崩溃
90
+ def dummy_data(self):
91
+ img = torch.zeros((3, self.image_size, self.image_size), dtype=torch.float32)
92
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
93
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool)).unsqueeze(0)
94
+ valid = 0
95
+ return img, t5_feat_padding, attn_mask, valid
96
+
97
+ def __getitem__(self, index):
98
+ img_path, code_dir, code_name = self.img_path_list[index]
99
+ try:
100
+ img = Image.open(img_path).convert("RGB")
101
+ print(f"✅ 打开图片成功: {img_path}")
102
+ except:
103
+ print(f"❌ 打开图片失败: {img_path}")
104
+ img, t5_feat_padding, attn_mask, valid = self.dummy_data()
105
+ return img, t5_feat_padding, attn_mask, torch.tensor(valid)
106
+
107
+ if min(img.size) < self.image_size:
108
+ print(f"⚠️ 图片尺寸太小: {img.size} < {self.image_size}")
109
+ img, t5_feat_padding, attn_mask, valid = self.dummy_data()
110
+ return img, t5_feat_padding, attn_mask, torch.tensor(valid)
111
+
112
+ # 图像预处理
113
+ if self.transform is not None:
114
+ img = self.transform(img)
115
+ # 加载 T5 文本特征
116
+ # t5_file = os.path.join(self.t5_feat_path, code_dir, f"{code_name}.npy")
117
+ t5_file = os.path.join(self.t5_feat_path, f"{code_name}.npz")
118
+ print(t5_file)
119
+ if torch.rand(1) < 0.3:
120
+ t5_file = t5_file.replace(self.t5_feat_path_base, self.short_t5_feat_path_base)
121
+
122
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
123
+ if os.path.isfile(t5_file):
124
+ try:
125
+ # t5_feat = torch.from_numpy(np.load(t5_file))
126
+ npz_data = np.load(t5_file)
127
+ t5_feat = torch.from_numpy(npz_data["caption_emb"])
128
+ t5_feat_len = t5_feat.shape[1]
129
+ feat_len = min(self.t5_feature_max_len, t5_feat_len)
130
+ t5_feat_padding[:, -feat_len:] = t5_feat[:, :feat_len]
131
+ emb_mask = torch.zeros((self.t5_feature_max_len,))
132
+ emb_mask[-feat_len:] = 1
133
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length))
134
+ T = self.t5_feature_max_len
135
+ attn_mask[:, :T] = attn_mask[:, :T] * emb_mask.unsqueeze(0)
136
+ eye_matrix = torch.eye(self.max_seq_length, self.max_seq_length)
137
+ attn_mask = attn_mask * (1 - eye_matrix) + eye_matrix
138
+ attn_mask = attn_mask.unsqueeze(0).to(torch.bool)
139
+ valid = 1
140
+ except:
141
+ img, t5_feat_padding, attn_mask, valid = self.dummy_data()
142
+ else:
143
+ img, t5_feat_padding, attn_mask, valid = self.dummy_data()
144
+ # 最终返回:
145
+ # 图像张量 [3, H, W] 文本特征 [1, 120, 2048]
146
+ # 掩码 [1, max_seq_len, max_seq_len]
147
+ # 是否有效的标志位 valid。
148
+ return img, t5_feat_padding, attn_mask, torch.tensor(valid)
149
+
150
+
151
+ # 这个类是获取图像张量,文本特征以及控制图像(seg,depth,canny等),使用ADE20k数据集或者coco等,用于T2I任务且使用control image
152
+ # 这个提取数据的类单独在t2i_control.py中实现了
153
+ class Text2ImgDatasetCode(Dataset):
154
+ def __init__(self, args):
155
+ pass
156
+
157
+
158
+
159
+
160
+ def build_t2i_image(args, transform):
161
+ return Text2ImgDatasetImg(args.data_path, args.data_face_path, transform)
162
+
163
+ def build_t2i(args, transform):
164
+ return Text2ImgDataset(args, transform)
165
+
166
+ def build_t2i_code(args):
167
+ return Text2ImgDatasetCode(args)
dataset/t2i_control.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import PngImagePlugin
2
+ MaximumDecompressedSize = 1024
3
+ MegaByte = 2**20
4
+ PngImagePlugin.MAX_TEXT_CHUNK = MaximumDecompressedSize * MegaByte
5
+ import torch
6
+ from datasets import load_dataset, load_from_disk
7
+ import random
8
+ import pickle
9
+ import logging
10
+ from accelerate import Accelerator
11
+ from accelerate.logging import get_logger
12
+ from accelerate.utils import ProjectConfiguration, set_seed
13
+ from datasets import load_dataset, load_from_disk, concatenate_datasets
14
+ from huggingface_hub import create_repo, upload_folder
15
+ from transformers import AutoTokenizer, PretrainedConfig
16
+ import argparse
17
+ from PIL import Image
18
+ from pathlib import Path
19
+ from tqdm.auto import tqdm
20
+ from packaging import version
21
+ from torchvision import transforms
22
+ from torch.cuda.amp import autocast
23
+ from torchvision.transforms.functional import normalize
24
+
25
+ from dataset.utils import group_random_crop
26
+ import numpy as np
27
+ import os
28
+ from language.t5 import T5Embedder
29
+ from torch.utils.data import Dataset
30
+ from condition.canny import CannyDetector
31
+ # from condition.hed import HEDdetector
32
+
33
+
34
+ logger = get_logger(__name__)
35
+
36
+ class T2IControlCode(Dataset):
37
+ def __init__(self, args):
38
+ self.get_image = args.get_image
39
+ self.get_prompt = args.get_prompt
40
+ self.get_label = args.get_label
41
+ self.control_type = args.condition_type
42
+ if self.control_type == 'canny':
43
+ self.get_control = CannyDetector()
44
+
45
+ self.code_path = args.code_path
46
+ code_file_path = os.path.join(self.code_path, 'code')
47
+ file_num = len(os.listdir(code_file_path))
48
+ self.code_files = [os.path.join(code_file_path, f"{i}.npy") for i in range(file_num)]
49
+
50
+ if args.code_path2 is not None:
51
+ self.code_path2 = args.code_path2
52
+ code_file_path2 = os.path.join(self.code_path2, 'code')
53
+ file_num2 = len(os.listdir(code_file_path2))
54
+ self.code_files2 = [os.path.join(code_file_path2, f"{i}.npy") for i in range(file_num2)]
55
+ self.code_files = self.code_files + self.code_files2
56
+
57
+ self.image_size = args.image_size
58
+ latent_size = args.image_size // args.downsample_size
59
+ self.code_len = latent_size ** 2
60
+ self.t5_feature_max_len = 120
61
+ self.t5_feature_dim = 2048
62
+ self.max_seq_length = self.t5_feature_max_len + self.code_len
63
+
64
+ def __len__(self):
65
+ return len(self.code_files)
66
+
67
+ def dummy_data(self):
68
+ img = torch.zeros((3, self.image_size, self.image_size), dtype=torch.float32)
69
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
70
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool)).unsqueeze(0)
71
+ valid = 0
72
+ return img, t5_feat_padding, attn_mask, valid
73
+
74
+ def collate_fn(self, examples):
75
+
76
+ code = torch.stack([example["code"] for example in examples])
77
+ control = torch.stack([example["control"] for example in examples])
78
+ if self.control_type == 'canny':
79
+ control = control.unsqueeze(1).repeat(1,3,1,1)
80
+ caption_emb = torch.stack([example["caption_emb"] for example in examples])
81
+ attn_mask = torch.stack([example["attn_mask"] for example in examples])
82
+ valid = torch.stack([example["valid"] for example in examples])
83
+ if self.get_image:
84
+ image = torch.stack([example["image"] for example in examples])
85
+ if self.get_prompt:
86
+ prompt = [example["prompt"][0] for example in examples]
87
+ if self.control_type == "seg":
88
+ label = torch.stack([example["label"] for example in examples])
89
+
90
+ output = {}
91
+ output['code'] = code
92
+ output['control'] = control
93
+ output['caption_emb'] = caption_emb
94
+ output['attn_mask'] = attn_mask
95
+ output['valid'] = valid
96
+ output['prompt'] = prompt
97
+ if self.get_image:
98
+ output['image'] = image
99
+ if self.get_prompt:
100
+ output['prompt'] = prompt
101
+ if self.control_type == "seg":
102
+ output['label'] = label
103
+ return output
104
+
105
+ def __getitem__(self, index):
106
+
107
+
108
+ code_path = self.code_files[index]
109
+ if self.control_type == 'seg':
110
+ control_path = code_path.replace('code', 'control').replace('npy', 'png')
111
+ control = np.array(Image.open(control_path))/255
112
+ control = 2*(control - 0.5)
113
+ elif self.control_type == 'depth':
114
+ control_path = code_path.replace('code', 'control_depth').replace('npy', 'png')
115
+ control = np.array(Image.open(control_path))/255
116
+ control = 2*(control - 0.5)
117
+ caption_path = code_path.replace('code', 'caption_emb').replace('npy', 'npz')
118
+ image_path = code_path.replace('code', 'image').replace('npy', 'png')
119
+ label_path = code_path.replace('code', 'label').replace('npy', 'png')
120
+
121
+ code = np.load(code_path)
122
+ image = np.array(Image.open(image_path))
123
+
124
+
125
+
126
+ t5_feat_padding = torch.zeros((1, self.t5_feature_max_len, self.t5_feature_dim))
127
+ caption = np.load(caption_path)
128
+
129
+ t5_feat = torch.from_numpy(caption['caption_emb'])
130
+ prompt = caption['prompt']
131
+ t5_feat_len = t5_feat.shape[1]
132
+ feat_len = min(self.t5_feature_max_len, t5_feat_len)
133
+ t5_feat_padding[:, -feat_len:] = t5_feat[:, :feat_len]
134
+ emb_mask = torch.zeros((self.t5_feature_max_len,))
135
+ emb_mask[-feat_len:] = 1
136
+ attn_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length))
137
+ T = self.t5_feature_max_len
138
+ attn_mask[:, :T] = attn_mask[:, :T] * emb_mask.unsqueeze(0)
139
+ eye_matrix = torch.eye(self.max_seq_length, self.max_seq_length)
140
+ attn_mask = attn_mask * (1 - eye_matrix) + eye_matrix
141
+ attn_mask = attn_mask.unsqueeze(0).to(torch.bool)
142
+ valid = 1
143
+
144
+ output = {}
145
+ output['code'] = torch.from_numpy(code)
146
+ if self.control_type == 'canny':
147
+ output['control'] = torch.from_numpy(2*(self.get_control(image)/255 - 0.5))
148
+ elif self.control_type == "seg":
149
+ output['control'] = torch.from_numpy(control.transpose(2,0,1))
150
+ elif self.control_type == "depth":
151
+ output['control'] = torch.from_numpy(control.transpose(2,0,1))
152
+ elif self.control_type == 'hed':
153
+ output['control'] = torch.from_numpy(image.transpose(2,0,1))
154
+ elif self.control_type == 'lineart':
155
+ output['control'] = torch.from_numpy(image.transpose(2,0,1))
156
+ output['caption_emb'] = t5_feat_padding
157
+ output['attn_mask'] = attn_mask
158
+ output['valid'] = torch.tensor(valid)
159
+ output['image'] = torch.from_numpy(image.transpose(2,0,1))
160
+ if self.get_prompt:
161
+ output['prompt'] = prompt
162
+ if self.control_type == "seg":
163
+ output['label'] = torch.from_numpy(np.array(Image.open(label_path)))
164
+ return output
165
+
166
+
167
+ def build_t2i_control_code(args):
168
+ dataset = T2IControlCode(args)
169
+ return dataset
170
+
171
+ if __name__ == '__main__':
172
+
173
+ args = parse_args()
174
+
175
+ logging_dir = Path(args.output_dir, args.logging_dir)
176
+
177
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
178
+
179
+ accelerator = Accelerator(
180
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
181
+ mixed_precision=args.mixed_precision,
182
+ log_with=args.report_to,
183
+ project_config=accelerator_project_config,
184
+ )
185
+
186
+ train_dataset, val_dataset = make_train_dataset(args, None, accelerator)
187
+
188
+
189
+ train_dataloader = torch.utils.data.DataLoader(
190
+ train_dataset,
191
+ shuffle=True,
192
+ collate_fn=collate_fn,
193
+ batch_size=8,
194
+ num_workers=0,
195
+ )
196
+
197
+ from tqdm import tqdm
198
+ for step, batch in tqdm(enumerate(train_dataloader)):
199
+ continue
dataset/test_dataset_t2i.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import torch
4
+ from torchvision import transforms
5
+ from torch.utils.data import DataLoader
6
+ from PIL import Image
7
+ from t2i import Text2ImgDataset # 确保你的类写在 t2i.py 或其他 import 位置正确
8
+ from tqdm import tqdm
9
+
10
+ def get_args():
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument('--data_path', type=str, required=True, help='包含 .jsonl 文件的路径')
13
+ parser.add_argument('--t5_feat_path', type=str, required=True, help='T5 .npy 文件路径')
14
+ parser.add_argument('--short_t5_feat_path', type=str, default=None, help='备用 T5 特征路径')
15
+ parser.add_argument('--image_size', type=int, default=512)
16
+ parser.add_argument('--downsample_size', type=int, default=8)
17
+ parser.add_argument('--max_show', type=int, default=5, help='最多显示多少条样本')
18
+ return parser.parse_args()
19
+
20
+ def main():
21
+ args = get_args()
22
+
23
+ transform = transforms.Compose([
24
+ transforms.Resize((args.image_size, args.image_size)),
25
+ transforms.ToTensor()
26
+ ])
27
+
28
+ dataset = Text2ImgDataset(args, transform=transform)
29
+ dataset.__getitem__
30
+ print(f"📦 数据集大小: {len(dataset)}")
31
+
32
+ loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)
33
+
34
+ for i, (img, t5_feat, attn_mask, valid) in enumerate(tqdm(loader)):
35
+ print(f"\n🟡 Sample #{i}")
36
+ print(f" - 图像尺寸: {img.shape}")
37
+ print(f" - T5 特征 shape: {t5_feat.shape}")
38
+ print(f" - Attention mask shape: {attn_mask.shape}")
39
+ print(f" - 是否有效: {valid.item()}")
40
+ if valid.item() == 0:
41
+ print(" ⚠️ 无效样本,可能 T5 特征缺失或图片加载失败")
42
+ if i + 1 >= args.max_show:
43
+ break
44
+
45
+ if __name__ == "__main__":
46
+ main()
47
+
dataset/test_t5_npz.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import argparse
4
+ from tqdm import tqdm
5
+
6
+ def get_args():
7
+ parser = argparse.ArgumentParser()
8
+ parser.add_argument('--data_path', type=str, default="./data/Captioned_ADE20K/train", required=True, help='路径下包含多个 .jsonl 文件')
9
+ parser.add_argument('--t5_feat_path', default="./data/Captioned_ADE20K/train/caption_emb", type=str, required=True, help='T5 特征路径(主)')
10
+ return parser.parse_args()
11
+
12
+ def main():
13
+ args = get_args()
14
+ error_list = []
15
+
16
+ # 遍历 .jsonl 文件
17
+ for fname in os.listdir(args.data_path):
18
+ if not fname.endswith('.jsonl'):
19
+ continue
20
+ fpath = os.path.join(args.data_path, fname)
21
+ with open(fpath, 'r') as f:
22
+ for line in tqdm(f, desc=f"Checking {fname}"):
23
+ data = json.loads(line.strip())
24
+ img_path = data['image_path'] # e.g., train/image/0.png
25
+ img_name = os.path.basename(img_path) # e.g., 0.png
26
+ code_name = os.path.splitext(img_name)[0] # e.g., 0
27
+
28
+ # 检查是否存在 .npz 或 .npy
29
+ npz_path = os.path.join(args.t5_feat_path, f"{code_name}.npz")
30
+ npy_path = os.path.join(args.t5_feat_path, f"{code_name}.npy")
31
+ if not os.path.exists(npz_path) and not os.path.exists(npy_path):
32
+ error_list.append(code_name)
33
+
34
+ print(f"\n缺失 T5 特征文件的样本数: {len(error_list)}")
35
+ if error_list:
36
+ print("缺失编号(前20个):")
37
+ for path in error_list[:20]:
38
+ print(path)
39
+
40
+ if __name__ == '__main__':
41
+ main()
dataset/utils.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ import torchvision.transforms.functional as F
5
+
6
+ from PIL import Image
7
+ from typing import Optional
8
+ from functools import partial
9
+ from torch import Tensor
10
+ from torchvision import transforms
11
+
12
+ # from canny_tools import Canny # canny edge detection
13
+ # from mmengine.hub import get_model # segmentation
14
+ from transformers import DPTForDepthEstimation # depth estimation
15
+
16
+ # from mmseg.models.losses.silog_loss import silog_loss
17
+ from torchvision.transforms import RandomCrop
18
+
19
+
20
+ def get_reward_model(task='segmentation', model_path='mmseg::upernet/upernet_r50_4xb4-160k_ade20k-512x512.py'):
21
+ """Return reward model for different tasks.
22
+
23
+ Args:
24
+ task (str, optional): Task name. Defaults to 'segmentation'.
25
+ model_path (str, optional): Model name or pre-trained path.
26
+
27
+ """
28
+ if task == 'segmentation':
29
+ return get_model(model_path, pretrained=True)
30
+ elif task == 'canny':
31
+ return Canny()
32
+ elif task == 'depth':
33
+ return DPTForDepthEstimation.from_pretrained(model_path)
34
+ elif task == 'lineart':
35
+ model = LineDrawingModel()
36
+ model.load_state_dict(torch.hub.load_state_dict_from_url(model_path, map_location=torch.device('cpu')))
37
+ return model
38
+ elif task == 'hed':
39
+ return HEDdetector(model_path)
40
+ else:
41
+ raise not NotImplementedError("Only support segmentation, canny and depth for now.")
42
+
43
+
44
+ def get_reward_loss(predictions, labels, task='segmentation', **args):
45
+ """Return reward loss for different tasks.
46
+
47
+ Args:
48
+ task (str, optional): Task name.
49
+
50
+ Returns:
51
+ torch.nn.Module: Loss class.
52
+ """
53
+ if task == 'segmentation':
54
+ return nn.functional.cross_entropy(predictions, labels, ignore_index=255, **args)
55
+ elif task == 'canny':
56
+ loss = nn.functional.mse_loss(predictions, labels, **args).mean(2)
57
+ return loss.mean((-1,-2))
58
+ elif task in ['depth', 'lineart', 'hed']:
59
+ loss = nn.functional.mse_loss(predictions, labels, **args)
60
+ return loss
61
+ else:
62
+ raise not NotImplementedError("Only support segmentation, canny and depth for now.")
63
+
64
+
65
+ def image_grid(imgs, rows, cols):
66
+ """Image grid for visualization."""
67
+ assert len(imgs) == rows * cols
68
+
69
+ w, h = imgs[0].size
70
+ grid = Image.new("RGB", size=(cols * w, rows * h))
71
+
72
+ for i, img in enumerate(imgs):
73
+ grid.paste(img, box=(i % cols * w, i // cols * h))
74
+ return grid
75
+
76
+
77
+ def map_color_to_index(image, dataset='limingcv/Captioned_ADE20K'):
78
+ """Map colored segmentation image (RGB) into original label format (L).
79
+
80
+ Args:
81
+ image (torch.tensor): image tensor with shape (N, 3, H, W).
82
+ dataset (str, optional): Dataset name. Defaults to 'ADE20K'.
83
+
84
+ Returns:
85
+ torch.tensor: mask tensor with shape (N, H, W).
86
+ """
87
+ if dataset == 'limingcv/Captioned_ADE20K':
88
+ palette = np.load('ade20k_palette.npy')
89
+ elif dataset == 'limingcv/Captioned_COCOStuff':
90
+ palette = np.load('coco_stuff_palette.npy')
91
+ else:
92
+ raise NotImplementedError("Only support ADE20K and COCO-Stuff dataset for now.")
93
+
94
+ image = image * 255
95
+ palette_tensor = torch.tensor(palette, dtype=image.dtype, device=image.device)
96
+ reshaped_image = image.permute(0, 2, 3, 1).reshape(-1, 3)
97
+
98
+ # Calculate the difference of colors and find the index of the minimum distance
99
+ indices = torch.argmin(torch.norm(reshaped_image[:, None, :] - palette_tensor, dim=-1), dim=-1)
100
+
101
+ # Transform indices back to original shape
102
+ return indices.view(image.shape[0], image.shape[2], image.shape[3])
103
+
104
+
105
+ def seg_label_transform(
106
+ labels,
107
+ dataset_name='limingcv/Captioned_ADE20K',
108
+ output_size=(64, 64),
109
+ interpolation=transforms.InterpolationMode.NEAREST,
110
+ max_size=None,
111
+ antialias=True):
112
+ """Adapt RGB seg_map into loss computation. \
113
+ (1) Map the RGB seg_map into the original label format (Single Channel). \
114
+ (2) Resize the seg_map into the same size as the output feature map. \
115
+ (3) Remove background class if needed (usually for ADE20K).
116
+
117
+ Args:
118
+ labels (torch.tensor): Segmentation map. (N, 3, H, W) for ADE20K and (N, H, W) for COCO-Stuff.
119
+ dataset_name (string): Dataset name. Default to 'ADE20K'.
120
+ output_size (tuple): Resized image size, should be aligned with the output of segmentation models.
121
+ interpolation (optional): _description_. Defaults to transforms.InterpolationMode.NEAREST.
122
+ max_size (optional): Defaults to None.
123
+ antialias (optional): Defaults to True.
124
+
125
+ Returns:
126
+ torch.tensor: formatted labels for loss computation.
127
+ """
128
+
129
+ if dataset_name == 'limingcv/Captioned_ADE20K':
130
+ labels = map_color_to_index(labels, dataset_name)
131
+ labels = F.resize(labels, output_size, interpolation, max_size, antialias)
132
+
133
+ # 0 means the background class in ADE20K
134
+ # In a unified format, we use 255 to represent the background class for both ADE20K and COCO-Stuff
135
+ labels = labels - 1
136
+ labels[labels == -1] = 255
137
+ elif dataset_name == 'limingcv/Captioned_COCOStuff':
138
+ labels = F.resize(labels, output_size, interpolation, max_size, antialias)
139
+
140
+ return labels.long()
141
+
142
+ def depth_label_transform(
143
+ labels,
144
+ dataset_name,
145
+ output_size=None,
146
+ interpolation=transforms.InterpolationMode.BILINEAR,
147
+ max_size=None,
148
+ antialias=True
149
+ ):
150
+
151
+ if output_size is not None:
152
+ labels = F.resize(labels, output_size, interpolation, max_size, antialias)
153
+ return labels
154
+
155
+
156
+ def edge_label_transform(labels, dataset_name):
157
+ return labels
158
+
159
+
160
+ def label_transform(labels, task, dataset_name, **args):
161
+ if task == 'segmentation':
162
+ return seg_label_transform(labels, dataset_name, **args)
163
+ elif task == 'depth':
164
+ return depth_label_transform(labels, dataset_name, **args)
165
+ elif task in ['canny', 'lineart', 'hed']:
166
+ return edge_label_transform(labels, dataset_name, **args)
167
+ else:
168
+ raise NotImplementedError("Only support segmentation and edge detection for now.")
169
+
170
+
171
+ def group_random_crop(images, resolution):
172
+ """
173
+ Args:
174
+ images (list of PIL Image or Tensor): List of images to be cropped.
175
+
176
+ Returns:
177
+ List of PIL Image or Tensor: List of cropped image.
178
+ """
179
+
180
+ if isinstance(resolution, int):
181
+ resolution = (resolution, resolution)
182
+
183
+ for idx, image in enumerate(images):
184
+ i, j, h, w = RandomCrop.get_params(image, output_size=resolution)
185
+ images[idx] = F.crop(image, i, j, h, w)
186
+
187
+ return images
188
+
189
+
190
+ norm_layer = nn.InstanceNorm2d
191
+ class ResidualBlock(nn.Module):
192
+ def __init__(self, in_features):
193
+ super(ResidualBlock, self).__init__()
194
+
195
+ conv_block = [ nn.ReflectionPad2d(1),
196
+ nn.Conv2d(in_features, in_features, 3),
197
+ norm_layer(in_features),
198
+ nn.ReLU(inplace=True),
199
+ nn.ReflectionPad2d(1),
200
+ nn.Conv2d(in_features, in_features, 3),
201
+ norm_layer(in_features)
202
+ ]
203
+
204
+ self.conv_block = nn.Sequential(*conv_block)
205
+
206
+ def forward(self, x):
207
+ return x + self.conv_block(x)
208
+
209
+
210
+ class LineDrawingModel(nn.Module):
211
+ def __init__(self, input_nc=3, output_nc=1, n_residual_blocks=3, sigmoid=True):
212
+ super(LineDrawingModel, self).__init__()
213
+
214
+ # Initial convolution block
215
+ model0 = [ nn.ReflectionPad2d(3),
216
+ nn.Conv2d(input_nc, 64, 7),
217
+ norm_layer(64),
218
+ nn.ReLU(inplace=True) ]
219
+ self.model0 = nn.Sequential(*model0)
220
+
221
+ # Downsampling
222
+ model1 = []
223
+ in_features = 64
224
+ out_features = in_features*2
225
+ for _ in range(2):
226
+ model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
227
+ norm_layer(out_features),
228
+ nn.ReLU(inplace=True) ]
229
+ in_features = out_features
230
+ out_features = in_features*2
231
+ self.model1 = nn.Sequential(*model1)
232
+
233
+ model2 = []
234
+ # Residual blocks
235
+ for _ in range(n_residual_blocks):
236
+ model2 += [ResidualBlock(in_features)]
237
+ self.model2 = nn.Sequential(*model2)
238
+
239
+ # Upsampling
240
+ model3 = []
241
+ out_features = in_features//2
242
+ for _ in range(2):
243
+ model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
244
+ norm_layer(out_features),
245
+ nn.ReLU(inplace=True) ]
246
+ in_features = out_features
247
+ out_features = in_features//2
248
+ self.model3 = nn.Sequential(*model3)
249
+
250
+ # Output layer
251
+ model4 = [ nn.ReflectionPad2d(3),
252
+ nn.Conv2d(64, output_nc, 7)]
253
+ if sigmoid:
254
+ model4 += [nn.Sigmoid()]
255
+
256
+ self.model4 = nn.Sequential(*model4)
257
+
258
+ def forward(self, x, cond=None):
259
+ out = self.model0(x)
260
+ out = self.model1(out)
261
+ out = self.model2(out)
262
+ out = self.model3(out)
263
+ out = self.model4(out)
264
+
265
+ return out
266
+
267
+
268
+
269
+ class DoubleConvBlock(torch.nn.Module):
270
+ def __init__(self, input_channel, output_channel, layer_number):
271
+ super().__init__()
272
+ self.convs = torch.nn.Sequential()
273
+ self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
274
+ for i in range(1, layer_number):
275
+ self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
276
+ self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
277
+
278
+ def __call__(self, x, down_sampling=False):
279
+ h = x
280
+ if down_sampling:
281
+ h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
282
+ for conv in self.convs:
283
+ h = conv(h)
284
+ h = torch.nn.functional.relu(h)
285
+ return h, self.projection(h)
286
+
287
+
288
+ class ControlNetHED_Apache2(torch.nn.Module):
289
+ def __init__(self):
290
+ super().__init__()
291
+ self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
292
+ self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
293
+ self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
294
+ self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
295
+ self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
296
+ self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
297
+
298
+ def __call__(self, x):
299
+ h = x - self.norm
300
+ h, projection1 = self.block1(h)
301
+ h, projection2 = self.block2(h, down_sampling=True)
302
+ h, projection3 = self.block3(h, down_sampling=True)
303
+ h, projection4 = self.block4(h, down_sampling=True)
304
+ h, projection5 = self.block5(h, down_sampling=True)
305
+ return projection1, projection2, projection3, projection4, projection5
306
+
307
+
308
+ class HEDdetector(nn.Module):
309
+ def __init__(self, model_path):
310
+ super().__init__()
311
+ state_dict = torch.hub.load_state_dict_from_url(model_path, map_location=torch.device('cpu'))
312
+
313
+ self.netNetwork = ControlNetHED_Apache2()
314
+ self.netNetwork.load_state_dict(state_dict)
315
+
316
+ def __call__(self, input_image):
317
+ H, W = input_image.shape[2], input_image.shape[3]
318
+
319
+ edges = self.netNetwork((input_image * 255).clip(0, 255))
320
+ edges = [torch.nn.functional.interpolate(edge, size=(H, W), mode='bilinear') for edge in edges]
321
+ edges = torch.stack(edges, dim=1)
322
+ edge = 1 / (1 + torch.exp(-torch.mean(edges, dim=1)))
323
+ edge = (edge * 255.0).clip(0, 255).to(torch.uint8)
324
+
325
+ return edge / 255.0
demo/app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from .model import Model
4
+ from huggingface_hub import hf_hub_download
5
+ from app_canny import create_demo as create_demo_canny
6
+ from app_depth import create_demo as create_demo_depth
7
+
8
+ hf_hub_download(repo_id='wondervictor/ControlAR',
9
+ filename='canny_base.safetensors',
10
+ local_dir='./checkpoints/')
11
+ hf_hub_download(repo_id='wondervictor/ControlAR',
12
+ filename='depth_base.safetensors',
13
+ local_dir='./checkpoints/')
14
+ # hf_hub_download('google/flan-t5-xl', cache_dir='./checkpoints/')
15
+
16
+ DESCRIPTION = "# [ControlAR: Controllable Image Generation with Autoregressive Models](https://arxiv.org/abs/2410.02705) \n ### The first row in outputs is the input image and condition. The second row is the images generated by ControlAR. \n ### You can run locally by following the instruction on our [Github Repo](https://github.com/hustvl/ControlAR)."
17
+ SHOW_DUPLICATE_BUTTON = os.getenv("SHOW_DUPLICATE_BUTTON") == "1"
18
+ model = Model()
19
+ device = "cuda"
20
+ with gr.Blocks(css="style.css") as demo:
21
+ gr.Markdown(DESCRIPTION)
22
+ gr.DuplicateButton(
23
+ value="Duplicate Space for private use",
24
+ elem_id="duplicate-button",
25
+ visible=SHOW_DUPLICATE_BUTTON,
26
+ )
27
+ with gr.Tabs():
28
+ with gr.TabItem("Depth"):
29
+ create_demo_depth(model.process_depth)
30
+ with gr.TabItem("Canny"):
31
+ create_demo_canny(model.process_edge)
32
+
33
+ if __name__ == "__main__":
34
+ demo.launch(share=False)
demo/app_depth.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+
4
+
5
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
6
+ if randomize_seed:
7
+ seed = random.randint(0, 100000000)
8
+ return seed
9
+
10
+
11
+ examples = [
12
+ [
13
+ "condition/example/t2i/bird.jpg",
14
+ "A bird made of blue crystal"
15
+ ],
16
+ [
17
+ "condition/example/t2i/sofa.png",
18
+ "The red sofa in the living room has several pillows on it"
19
+ ],
20
+ [
21
+ "condition/example/t2i/house.jpg",
22
+ "A brick house with a chimney under a starry sky.",
23
+ ]
24
+ ]
25
+
26
+
27
+ def create_demo(process):
28
+ with gr.Blocks() as demo:
29
+ with gr.Row():
30
+ with gr.Column():
31
+ image = gr.Image()
32
+ prompt = gr.Textbox(label="Prompt")
33
+ run_button = gr.Button("Run")
34
+ with gr.Accordion("Advanced options", open=False):
35
+ preprocessor_name = gr.Radio(
36
+ label="Preprocessor",
37
+ choices=[
38
+ "depth",
39
+ "No preprocess",
40
+ ],
41
+ type="value",
42
+ value="depth",
43
+ info='depth.',
44
+ )
45
+ cfg_scale = gr.Slider(label="Guidance scale",
46
+ minimum=0.1,
47
+ maximum=30.0,
48
+ value=4,
49
+ step=0.1)
50
+ control_strength = gr.Slider(minimum=0., maximum=1.0, step=0.1, value=0.6, label="control_strength")
51
+ # resolution = gr.Slider(label="(H, W)",
52
+ # minimum=384,
53
+ # maximum=768,
54
+ # value=512,
55
+ # step=16)
56
+ top_k = gr.Slider(minimum=1,
57
+ maximum=16384,
58
+ step=1,
59
+ value=2000,
60
+ label='Top-K')
61
+ top_p = gr.Slider(minimum=0.,
62
+ maximum=1.0,
63
+ step=0.1,
64
+ value=1.0,
65
+ label="Top-P")
66
+ temperature = gr.Slider(minimum=0.,
67
+ maximum=1.0,
68
+ step=0.1,
69
+ value=1.0,
70
+ label='Temperature')
71
+ seed = gr.Slider(label="Seed",
72
+ minimum=0,
73
+ maximum=100000000,
74
+ step=1,
75
+ value=0)
76
+ randomize_seed = gr.Checkbox(label="Randomize seed",
77
+ value=True)
78
+ with gr.Column():
79
+ result = gr.Gallery(label="Output",
80
+ show_label=False,
81
+ height='800px',
82
+ columns=2,
83
+ object_fit="scale-down")
84
+ gr.Examples(
85
+ examples=examples,
86
+ inputs=[
87
+ image,
88
+ prompt,
89
+ # resolution,
90
+ ]
91
+ )
92
+ inputs = [
93
+ image,
94
+ prompt,
95
+ cfg_scale,
96
+ temperature,
97
+ top_k,
98
+ top_p,
99
+ seed,
100
+ control_strength,
101
+ preprocessor_name
102
+ ]
103
+ prompt.submit(
104
+ fn=randomize_seed_fn,
105
+ inputs=[seed, randomize_seed],
106
+ outputs=seed,
107
+ queue=False,
108
+ api_name=False,
109
+ ).then(
110
+ fn=process,
111
+ inputs=inputs,
112
+ outputs=result,
113
+ api_name=False,
114
+ )
115
+ run_button.click(
116
+ fn=randomize_seed_fn,
117
+ inputs=[seed, randomize_seed],
118
+ outputs=seed,
119
+ queue=False,
120
+ api_name=False,
121
+ ).then(
122
+ fn=process,
123
+ inputs=inputs,
124
+ outputs=result,
125
+ api_name="depth",
126
+ )
127
+ return demo
128
+
129
+
130
+ if __name__ == "__main__":
131
+ from model import Model
132
+ model = Model()
133
+ demo = create_demo(model.process_depth)
134
+ demo.queue().launch(share=False, server_name="0.0.0.0")
135
+
demo/app_edge.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+
4
+
5
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
6
+ if randomize_seed:
7
+ seed = random.randint(0, 100000000)
8
+ return seed
9
+
10
+
11
+ examples = [
12
+ [
13
+ "condition/example/t2i/landscape.jpg",
14
+ "Landscape photos with snow on the mountains in the distance and clear reflections in the lake near by",
15
+ ],
16
+ [
17
+ "condition/example/t2i/girl.jpg",
18
+ "A girl with blue hair",
19
+ ],
20
+ [
21
+ "condition/example/t2i/eye.png",
22
+ "A vivid drawing of an eye with a few pencils nearby",
23
+ ],
24
+ ]
25
+
26
+
27
+ def create_demo(process):
28
+ with gr.Blocks() as demo:
29
+ with gr.Row():
30
+ with gr.Column():
31
+ image = gr.Image()
32
+ prompt = gr.Textbox(label="Prompt")
33
+ run_button = gr.Button("Run")
34
+ with gr.Accordion("Advanced options", open=False):
35
+ preprocessor_name = gr.Radio(
36
+ label="Preprocessor",
37
+ choices=[
38
+ "Hed",
39
+ "Canny",
40
+ "Lineart",
41
+ "No preprocess",
42
+ ],
43
+ type="value",
44
+ value="Hed",
45
+ info='Edge type.',
46
+ )
47
+ canny_low_threshold = gr.Slider(
48
+ label="Canny low threshold",
49
+ minimum=0,
50
+ maximum=255,
51
+ value=100,
52
+ step=50)
53
+ canny_high_threshold = gr.Slider(
54
+ label="Canny high threshold",
55
+ minimum=0,
56
+ maximum=255,
57
+ value=200,
58
+ step=50)
59
+ cfg_scale = gr.Slider(label="Guidance scale",
60
+ minimum=0.1,
61
+ maximum=30.0,
62
+ value=4,
63
+ step=0.1)
64
+ control_strength = gr.Slider(minimum=0., maximum=1.0, step=0.1, value=0.6, label="control_strength")
65
+ # relolution = gr.Slider(label="(H, W)",
66
+ # minimum=384,
67
+ # maximum=768,
68
+ # value=512,
69
+ # step=16)
70
+ top_k = gr.Slider(minimum=1,
71
+ maximum=16384,
72
+ step=1,
73
+ value=2000,
74
+ label='Top-K')
75
+ top_p = gr.Slider(minimum=0.,
76
+ maximum=1.0,
77
+ step=0.1,
78
+ value=1.0,
79
+ label="Top-P")
80
+ temperature = gr.Slider(minimum=0.,
81
+ maximum=1.0,
82
+ step=0.1,
83
+ value=1.0,
84
+ label='Temperature')
85
+ seed = gr.Slider(label="Seed",
86
+ minimum=0,
87
+ maximum=100000000,
88
+ step=1,
89
+ value=0)
90
+ randomize_seed = gr.Checkbox(label="Randomize seed",
91
+ value=True)
92
+ with gr.Column():
93
+ result = gr.Gallery(label="Output",
94
+ show_label=False,
95
+ height='800px',
96
+ columns=2,
97
+ object_fit="scale-down")
98
+ gr.Examples(
99
+ examples=examples,
100
+ inputs=[
101
+ image,
102
+ prompt,
103
+ # relolution,
104
+ ]
105
+ )
106
+ inputs = [
107
+ image,
108
+ prompt,
109
+ cfg_scale,
110
+ temperature,
111
+ top_k,
112
+ top_p,
113
+ seed,
114
+ canny_low_threshold,
115
+ canny_high_threshold,
116
+ control_strength,
117
+ preprocessor_name,
118
+ ]
119
+ # prompt.submit(
120
+ # fn=randomize_seed_fn,
121
+ # inputs=[seed, randomize_seed],
122
+ # outputs=seed,
123
+ # queue=False,
124
+ # api_name=False,
125
+ # ).then(
126
+ # fn=process,
127
+ # inputs=inputs,
128
+ # outputs=result,
129
+ # api_name=False,
130
+ # )
131
+ run_button.click(
132
+ fn=randomize_seed_fn,
133
+ inputs=[seed, randomize_seed],
134
+ outputs=seed,
135
+ queue=False,
136
+ api_name=False,
137
+ ).then(
138
+ fn=process,
139
+ inputs=inputs,
140
+ outputs=result,
141
+ api_name="edge",
142
+ )
143
+ return demo
144
+
145
+
146
+ if __name__ == "__main__":
147
+ from model import Model
148
+ model = Model()
149
+ demo = create_demo(model.process_edge)
150
+ demo.queue().launch(share=False, server_name="0.0.0.0")
demo/model.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import spaces
3
+ from safetensors.torch import load_file
4
+ from autoregressive.models.gpt_t2i import GPT_models
5
+ from tokenizer.tokenizer_image.vq_model import VQ_models
6
+ from language.t5 import T5Embedder
7
+ import torch
8
+ import numpy as np
9
+ import PIL
10
+ from PIL import Image
11
+ from condition.canny import CannyDetector
12
+ import time
13
+ from autoregressive.models.generate import generate
14
+ from condition.midas.depth import MidasDetector
15
+ from preprocessor import Preprocessor
16
+
17
+ models = {
18
+ "edge": "checkpoints/edge_base.safetensors",
19
+ "depth": "checkpoints/depth_base.safetensors",
20
+ }
21
+ class Model:
22
+ def __init__(self):
23
+ self.device = torch.device(
24
+ "cuda")
25
+ self.base_model_id = ""
26
+ self.task_name = ""
27
+ self.vq_model = self.load_vq()
28
+ self.t5_model = self.load_t5()
29
+ # self.gpt_model_edge = self.load_gpt(condition_type='edge')
30
+ # self.gpt_model_depth = self.load_gpt(condition_type='depth')
31
+ self.gpt_model = self.load_gpt()
32
+ self.preprocessor = Preprocessor()
33
+
34
+ def to(self, device):
35
+ self.gpt_model_canny.to('cuda')
36
+
37
+ def load_vq(self):
38
+ vq_model = VQ_models["VQ-16"](codebook_size=16384,
39
+ codebook_embed_dim=8)
40
+ vq_model.eval()
41
+ checkpoint = torch.load(f"checkpoints/vq_ds16_t2i.pt",
42
+ map_location="cpu")
43
+ vq_model.load_state_dict(checkpoint["model"])
44
+ del checkpoint
45
+ print("image tokenizer is loaded")
46
+ return vq_model
47
+
48
+ def load_gpt(self, condition_type='edge'):
49
+ # gpt_ckpt = models[condition_type]
50
+ # precision = torch.bfloat16
51
+ precision = torch.float32
52
+ latent_size = 512 // 16
53
+ gpt_model = GPT_models["GPT-XL"](
54
+ block_size=latent_size**2,
55
+ cls_token_num=120,
56
+ model_type='t2i',
57
+ condition_type=condition_type,
58
+ adapter_size='base',
59
+ ).to(device='cpu', dtype=precision)
60
+ # model_weight = load_file(gpt_ckpt)
61
+ # gpt_model.load_state_dict(model_weight, strict=False)
62
+ # gpt_model.eval()
63
+ # print("gpt model is loaded")
64
+ return gpt_model
65
+
66
+ def load_gpt_weight(self, condition_type='edge'):
67
+ torch.cuda.empty_cache()
68
+ gc.collect()
69
+ gpt_ckpt = models[condition_type]
70
+ model_weight = load_file(gpt_ckpt)
71
+ self.gpt_model.load_state_dict(model_weight, strict=False)
72
+ self.gpt_model.eval()
73
+ torch.cuda.empty_cache()
74
+ gc.collect()
75
+ # print("gpt model is loaded")
76
+
77
+ def load_t5(self):
78
+ # precision = torch.bfloat16
79
+ precision = torch.float32
80
+ t5_model = T5Embedder(
81
+ device=self.device,
82
+ local_cache=True,
83
+ cache_dir='checkpoints/flan-t5-xl',
84
+ dir_or_name='flan-t5-xl',
85
+ torch_dtype=precision,
86
+ model_max_length=120,
87
+ )
88
+ return t5_model
89
+
90
+ @torch.no_grad()
91
+ @spaces.GPU(enable_queue=True)
92
+ def process_edge(
93
+ self,
94
+ image: np.ndarray,
95
+ prompt: str,
96
+ cfg_scale: float,
97
+ temperature: float,
98
+ top_k: int,
99
+ top_p: int,
100
+ seed: int,
101
+ low_threshold: int,
102
+ high_threshold: int,
103
+ control_strength: float,
104
+ preprocessor_name: str,
105
+ ) -> list[PIL.Image.Image]:
106
+
107
+ if isinstance(image, np.ndarray):
108
+ image = Image.fromarray(image)
109
+ origin_W, origin_H = image.size
110
+ if preprocessor_name == 'Canny':
111
+ self.preprocessor.load("Canny")
112
+ condition_img = self.preprocessor(
113
+ image=image, low_threshold=low_threshold, high_threshold=high_threshold, detect_resolution=512)
114
+ elif preprocessor_name == 'Hed':
115
+ self.preprocessor.load("HED")
116
+ condition_img = self.preprocessor(
117
+ image=image,image_resolution=512, detect_resolution=512)
118
+ elif preprocessor_name == 'Lineart':
119
+ self.preprocessor.load("Lineart")
120
+ condition_img = self.preprocessor(
121
+ image=image,image_resolution=512, detect_resolution=512)
122
+ elif preprocessor_name == 'No preprocess':
123
+ condition_img = image
124
+ print('get edge')
125
+ del self.preprocessor.model
126
+ torch.cuda.empty_cache()
127
+ condition_img = condition_img.resize((512,512))
128
+ W, H = condition_img.size
129
+
130
+ self.t5_model.model.to('cuda').to(torch.bfloat16)
131
+ self.load_gpt_weight('edge')
132
+ self.gpt_model.to('cuda').to(torch.bfloat16)
133
+ self.vq_model.to('cuda')
134
+ condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(1,1,1,1)
135
+ condition_img = condition_img.to(self.device)
136
+ condition_img = 2*(condition_img/255 - 0.5)
137
+ prompts = [prompt] * 1
138
+ caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
139
+
140
+ print(f"processing left-padding...")
141
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
142
+ new_caption_embs = []
143
+ for idx, (caption_emb,
144
+ emb_mask) in enumerate(zip(caption_embs, emb_masks)):
145
+ valid_num = int(emb_mask.sum().item())
146
+ print(f' prompt {idx} token len: {valid_num}')
147
+ new_caption_emb = torch.cat(
148
+ [caption_emb[valid_num:], caption_emb[:valid_num]])
149
+ new_caption_embs.append(new_caption_emb)
150
+ new_caption_embs = torch.stack(new_caption_embs)
151
+ c_indices = new_caption_embs * new_emb_masks[:, :, None]
152
+ c_emb_masks = new_emb_masks
153
+ qzshape = [len(c_indices), 8, H // 16, W // 16]
154
+ t1 = time.time()
155
+ print(caption_embs.device)
156
+ index_sample = generate(
157
+ self.gpt_model,
158
+ c_indices,
159
+ (H // 16) * (W // 16),
160
+ c_emb_masks,
161
+ condition=condition_img,
162
+ cfg_scale=cfg_scale,
163
+ temperature=temperature,
164
+ top_k=top_k,
165
+ top_p=top_p,
166
+ sample_logits=True,
167
+ control_strength=control_strength,
168
+ )
169
+ sampling_time = time.time() - t1
170
+ print(f"Full sampling takes about {sampling_time:.2f} seconds.")
171
+
172
+ t2 = time.time()
173
+ print(index_sample.shape)
174
+ samples = self.vq_model.decode_code(
175
+ index_sample, qzshape) # output value is between [-1, 1]
176
+ decoder_time = time.time() - t2
177
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
178
+ # samples = condition_img[0:1]
179
+ samples = torch.cat((condition_img[0:1], samples), dim=0)
180
+ samples = 255 * (samples * 0.5 + 0.5)
181
+ samples = [
182
+ Image.fromarray(
183
+ sample.permute(1, 2, 0).cpu().detach().numpy().clip(
184
+ 0, 255).astype(np.uint8)) for sample in samples
185
+ ]
186
+ del condition_img
187
+ torch.cuda.empty_cache()
188
+ return samples
189
+
190
+ @torch.no_grad()
191
+ @spaces.GPU(enable_queue=True)
192
+ def process_depth(
193
+ self,
194
+ image: np.ndarray,
195
+ prompt: str,
196
+ cfg_scale: float,
197
+ temperature: float,
198
+ top_k: int,
199
+ top_p: int,
200
+ seed: int,
201
+ control_strength: float,
202
+ preprocessor_name: str
203
+ ) -> list[PIL.Image.Image]:
204
+
205
+ if isinstance(image, np.ndarray):
206
+ image = Image.fromarray(image)
207
+ origin_W, origin_H = image.size
208
+ # print(image)
209
+ if preprocessor_name == 'depth':
210
+ self.preprocessor.load("Depth")
211
+ condition_img = self.preprocessor(
212
+ image=image,
213
+ image_resolution=512,
214
+ detect_resolution=512,
215
+ )
216
+ elif preprocessor_name == 'No preprocess':
217
+ condition_img = image
218
+ print('get depth')
219
+ del self.preprocessor.model
220
+ torch.cuda.empty_cache()
221
+ condition_img = condition_img.resize((512,512))
222
+ W, H = condition_img.size
223
+
224
+ self.t5_model.model.to(self.device).to(torch.bfloat16)
225
+ self.load_gpt_weight('depth')
226
+ self.gpt_model.to('cuda').to(torch.bfloat16)
227
+ self.vq_model.to(self.device)
228
+ condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(1,1,1,1)
229
+ condition_img = condition_img.to(self.device)
230
+ condition_img = 2*(condition_img/255 - 0.5)
231
+ prompts = [prompt] * 1
232
+ caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
233
+
234
+ print(f"processing left-padding...")
235
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
236
+ new_caption_embs = []
237
+ for idx, (caption_emb,
238
+ emb_mask) in enumerate(zip(caption_embs, emb_masks)):
239
+ valid_num = int(emb_mask.sum().item())
240
+ print(f' prompt {idx} token len: {valid_num}')
241
+ new_caption_emb = torch.cat(
242
+ [caption_emb[valid_num:], caption_emb[:valid_num]])
243
+ new_caption_embs.append(new_caption_emb)
244
+ new_caption_embs = torch.stack(new_caption_embs)
245
+
246
+ c_indices = new_caption_embs * new_emb_masks[:, :, None]
247
+ c_emb_masks = new_emb_masks
248
+ qzshape = [len(c_indices), 8, H // 16, W // 16]
249
+ t1 = time.time()
250
+ index_sample = generate(
251
+ self.gpt_model,
252
+ c_indices,
253
+ (H // 16) * (W // 16),
254
+ c_emb_masks,
255
+ condition=condition_img,
256
+ cfg_scale=cfg_scale,
257
+ temperature=temperature,
258
+ top_k=top_k,
259
+ top_p=top_p,
260
+ sample_logits=True,
261
+ control_strength=control_strength,
262
+ )
263
+ sampling_time = time.time() - t1
264
+ print(f"Full sampling takes about {sampling_time:.2f} seconds.")
265
+
266
+ t2 = time.time()
267
+ print(index_sample.shape)
268
+ samples = self.vq_model.decode_code(index_sample, qzshape)
269
+ decoder_time = time.time() - t2
270
+ print(f"decoder takes about {decoder_time:.2f} seconds.")
271
+ condition_img = condition_img.cpu()
272
+ samples = samples.cpu()
273
+
274
+ # samples = condition_img[0:1]
275
+ samples = torch.cat((condition_img[0:1], samples), dim=0)
276
+ samples = 255 * (samples * 0.5 + 0.5)
277
+ samples = [
278
+ Image.fromarray(
279
+ sample.permute(1, 2, 0).cpu().detach().numpy().clip(0, 255).astype(np.uint8))
280
+ for sample in samples
281
+ ]
282
+ del condition_img
283
+ torch.cuda.empty_cache()
284
+ return samples
evaluations/ade20k_mIoU.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from mmseg.apis import init_model, inference_model, show_result_pyplot#, inference_segmentor
4
+ import torch
5
+ from PIL import Image
6
+ from sklearn.metrics import confusion_matrix
7
+ from torchmetrics import JaccardIndex
8
+
9
+ def main():
10
+ config_file = 'mmsegmentation/configs/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640.py'
11
+ checkpoint_file = 'evaluations/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235933-7120c214.pth'
12
+
13
+ # build the model from a config file and a checkpoint file
14
+ model = init_model(config_file, checkpoint_file, device='cuda:0')
15
+
16
+ # Image and segmentation labels directories
17
+ img_dir = 'sample/ade20k/visualization'
18
+ ann_dir = 'sample/ade20k/annotations'
19
+
20
+ # List all image files
21
+ img_fns = [f for f in sorted(os.listdir(img_dir)) if f.endswith(".png")]
22
+ # ann_fns = [f for f in sorted(os.listdir(ann_dir)) if f.endswith(".png")]
23
+
24
+ total_mIoU = 0
25
+ from tqdm import tqdm
26
+ i = 0
27
+ jaccard_index = JaccardIndex(task="multiclass", num_classes=150)
28
+ num_classes = 150
29
+ conf_matrix = np.zeros((num_classes + 1, num_classes + 1), dtype=np.int64)
30
+ for img_fn in tqdm(img_fns):
31
+ i += 1
32
+ # if i >= 100:
33
+ # break
34
+ # try:
35
+ img_path = os.path.join(img_dir, img_fn)
36
+ ann_path = os.path.join(ann_dir, img_fn)
37
+ result = inference_model(model, img_path)
38
+ # except Exception as e:
39
+ # continue
40
+ # Read ground truth segmentation map
41
+ gt_semantic_seg = np.array(Image.open(ann_path))
42
+
43
+ ignore_label = 0
44
+ gt = gt_semantic_seg.copy()
45
+ pred = result.pred_sem_seg.data[0].cpu().numpy().copy()+1
46
+ gt[gt == ignore_label] = num_classes
47
+ conf_matrix += np.bincount(
48
+ (num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
49
+ minlength=conf_matrix.size,
50
+ ).reshape(conf_matrix.shape)
51
+
52
+
53
+ # calculate miou
54
+ acc = np.full(num_classes, np.nan, dtype=np.float64)
55
+ iou = np.full(num_classes, np.nan, dtype=np.float64)
56
+ tp = conf_matrix.diagonal()[:-1].astype(np.float64)
57
+ pos_gt = np.sum(conf_matrix[:-1, :-1], axis=0).astype(np.float64)
58
+ pos_pred = np.sum(conf_matrix[:-1, :-1], axis=1).astype(np.float64)
59
+ acc_valid = pos_gt > 0
60
+ acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
61
+ iou_valid = (pos_gt + pos_pred) > 0
62
+ union = pos_gt + pos_pred - tp
63
+ iou[acc_valid] = tp[acc_valid] / union[acc_valid]
64
+ miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
65
+ print(f"mIoU: {miou}")
66
+
67
+ if __name__ == '__main__':
68
+ main()
evaluations/c2i/README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluations from [OpenAI](https://github.com/openai/guided-diffusion/tree/main/evaluations)
2
+
3
+ To compare different generative models, we use FID, sFID, Precision, Recall, and Inception Score. These metrics can all be calculated using batches of samples, which we store in `.npz` (numpy) files.
4
+
5
+ # Installation
6
+ ### cuda version 11.7
7
+ ```
8
+ pip install tensorflow-gpu==2.5.0
9
+ pip install numpy==1.22.0
10
+ pip install scipy
11
+ pip install pydantic
12
+ ```
13
+ There will happen error like `tensorflow.python.framework.errors_impl.NotFoundError: /usr/local/lib/python3.9/dist-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so: undefined symbol: _ZN10tensorflow...`, deleting `/usr/local/lib/python3.9/dist-packages/tensorflow/core/kernels/libtfkernel_sobol_op.so` will fix this error.
14
+
15
+ ### cuda version 12.1
16
+ ```
17
+ pip install tensorflow
18
+ pip install numpy==1.23.5
19
+ pip install scipy
20
+ ```
21
+
22
+ ### H100, cuda version 12.2
23
+ ```
24
+ pip install tensorflow
25
+ pip install numpy==1.26.2
26
+ pip install scipy
27
+ ```
28
+
29
+ # Download batches
30
+
31
+ We provide pre-computed sample batches for the reference datasets, our diffusion models, and several baselines we compare against. These are all stored in `.npz` format.
32
+
33
+ Reference dataset batches contain pre-computed statistics over the whole dataset, as well as 10,000 images for computing Precision and Recall. All other batches contain 50,000 images which can be used to compute statistics and Precision/Recall.
34
+
35
+ Here are links to download all of the sample and reference batches:
36
+
37
+ * LSUN
38
+ * LSUN bedroom: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/bedroom/VIRTUAL_lsun_bedroom256.npz)
39
+ * [ADM (dropout)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/bedroom/admnet_dropout_lsun_bedroom.npz)
40
+ * [DDPM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/bedroom/ddpm_lsun_bedroom.npz)
41
+ * [IDDPM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/bedroom/iddpm_lsun_bedroom.npz)
42
+ * [StyleGAN](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/bedroom/stylegan_lsun_bedroom.npz)
43
+ * LSUN cat: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/cat/VIRTUAL_lsun_cat256.npz)
44
+ * [ADM (dropout)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/cat/admnet_dropout_lsun_cat.npz)
45
+ * [StyleGAN2](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/cat/stylegan2_lsun_cat.npz)
46
+ * LSUN horse: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/horse/VIRTUAL_lsun_horse256.npz)
47
+ * [ADM (dropout)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/horse/admnet_dropout_lsun_horse.npz)
48
+ * [ADM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/lsun/horse/admnet_lsun_horse.npz)
49
+
50
+ * ImageNet
51
+ * ImageNet 64x64: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/64/VIRTUAL_imagenet64_labeled.npz)
52
+ * [ADM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/64/admnet_imagenet64.npz)
53
+ * [IDDPM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/64/iddpm_imagenet64.npz)
54
+ * [BigGAN](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/64/biggan_deep_imagenet64.npz)
55
+ * ImageNet 128x128: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/128/VIRTUAL_imagenet128_labeled.npz)
56
+ * [ADM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/128/admnet_imagenet128.npz)
57
+ * [ADM-G](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/128/admnet_guided_imagenet128.npz)
58
+ * [ADM-G, 25 steps](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/128/admnet_guided_25step_imagenet128.npz)
59
+ * [BigGAN-deep (trunc=1.0)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/128/biggan_deep_trunc1_imagenet128.npz)
60
+ * ImageNet 256x256: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/VIRTUAL_imagenet256_labeled.npz)
61
+ * [ADM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/admnet_imagenet256.npz)
62
+ * [ADM-G](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/admnet_guided_imagenet256.npz)
63
+ * [ADM-G, 25 step](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/admnet_guided_25step_imagenet256.npz)
64
+ * [ADM-G + ADM-U](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/admnet_guided_upsampled_imagenet256.npz)
65
+ * [ADM-U](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/admnet_upsampled_imagenet256.npz)
66
+ * [BigGAN-deep (trunc=1.0)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/biggan_deep_trunc1_imagenet256.npz)
67
+ * ImageNet 512x512: [reference batch](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/VIRTUAL_imagenet512.npz)
68
+ * [ADM](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/admnet_imagenet512.npz)
69
+ * [ADM-G](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/admnet_guided_imagenet512.npz)
70
+ * [ADM-G, 25 step](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/admnet_guided_25step_imagenet512.npz)
71
+ * [ADM-G + ADM-U](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/admnet_guided_upsampled_imagenet512.npz)
72
+ * [ADM-U](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/admnet_upsampled_imagenet512.npz)
73
+ * [BigGAN-deep (trunc=1.0)](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/512/biggan_deep_trunc1_imagenet512.npz)
74
+
75
+ # Run evaluations
76
+
77
+ First, generate or download a batch of samples and download the corresponding reference batch for the given dataset. For this example, we'll use ImageNet 256x256, so the refernce batch is `VIRTUAL_imagenet256_labeled.npz` and we can use the sample batch `admnet_guided_upsampled_imagenet256.npz`.
78
+
79
+ Next, run the `evaluator.py` script. The requirements of this script can be found in [requirements.txt](requirements.txt). Pass two arguments to the script: the reference batch and the sample batch. The script will download the InceptionV3 model used for evaluations into the current working directory (if it is not already present). This file is roughly 100MB.
80
+
81
+ The output of the script will look something like this, where the first `...` is a bunch of verbose TensorFlow logging:
82
+
83
+ ```
84
+ $ python evaluator.py VIRTUAL_imagenet256_labeled.npz admnet_guided_upsampled_imagenet256.npz
85
+ ...
86
+ computing reference batch activations...
87
+ computing/reading reference batch statistics...
88
+ computing sample batch activations...
89
+ computing/reading sample batch statistics...
90
+ Computing evaluations...
91
+ Inception Score: 215.8370361328125
92
+ FID: 3.9425574129223264
93
+ sFID: 6.140433703346162
94
+ Precision: 0.8265
95
+ Recall: 0.5309
96
+ ```
evaluations/c2i/evaluator.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import io
3
+ import os
4
+ import random
5
+ import warnings
6
+ import zipfile
7
+ from abc import ABC, abstractmethod
8
+ from contextlib import contextmanager
9
+ from functools import partial
10
+ from multiprocessing import cpu_count
11
+ from multiprocessing.pool import ThreadPool
12
+ from typing import Iterable, Optional, Tuple
13
+
14
+ import numpy as np
15
+ import requests
16
+ import tensorflow.compat.v1 as tf
17
+ from scipy import linalg
18
+ from tqdm.auto import tqdm
19
+
20
+ INCEPTION_V3_URL = "https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/classify_image_graph_def.pb"
21
+ INCEPTION_V3_PATH = "classify_image_graph_def.pb"
22
+
23
+ FID_POOL_NAME = "pool_3:0"
24
+ FID_SPATIAL_NAME = "mixed_6/conv:0"
25
+
26
+
27
+ def main():
28
+ parser = argparse.ArgumentParser()
29
+ parser.add_argument("ref_batch", help="path to reference batch npz file")
30
+ parser.add_argument("sample_batch", help="path to sample batch npz file")
31
+ args = parser.parse_args()
32
+
33
+ config = tf.ConfigProto(
34
+ allow_soft_placement=True # allows DecodeJpeg to run on CPU in Inception graph
35
+ )
36
+ config.gpu_options.allow_growth = True
37
+ evaluator = Evaluator(tf.Session(config=config))
38
+
39
+ print("warming up TensorFlow...")
40
+ # This will cause TF to print a bunch of verbose stuff now rather
41
+ # than after the next print(), to help prevent confusion.
42
+ evaluator.warmup()
43
+
44
+ print("computing reference batch activations...")
45
+ ref_acts = evaluator.read_activations(args.ref_batch)
46
+ print("computing/reading reference batch statistics...")
47
+ ref_stats, ref_stats_spatial = evaluator.read_statistics(args.ref_batch, ref_acts)
48
+
49
+ print("computing sample batch activations...")
50
+ sample_acts = evaluator.read_activations(args.sample_batch)
51
+ print("computing/reading sample batch statistics...")
52
+ sample_stats, sample_stats_spatial = evaluator.read_statistics(args.sample_batch, sample_acts)
53
+
54
+ print("Computing evaluations...")
55
+ IS = evaluator.compute_inception_score(sample_acts[0])
56
+ FID = sample_stats.frechet_distance(ref_stats)
57
+ sFID = sample_stats_spatial.frechet_distance(ref_stats_spatial)
58
+ print("Inception Score:", IS)
59
+ print("FID:", FID)
60
+ print("sFID:", sFID)
61
+ prec, recall = evaluator.compute_prec_recall(ref_acts[0], sample_acts[0])
62
+ print("Precision:", prec)
63
+ print("Recall:", recall)
64
+
65
+ txt_path = args.sample_batch.replace('.npz', '.txt')
66
+ print("writing to {}".format(txt_path))
67
+ with open(txt_path, 'w') as f:
68
+ print("Inception Score:", IS, file=f)
69
+ print("FID:", FID, file=f)
70
+ print("sFID:", sFID, file=f)
71
+ print("Precision:", prec, file=f)
72
+ print("Recall:", recall, file=f)
73
+
74
+
75
+ class InvalidFIDException(Exception):
76
+ pass
77
+
78
+
79
+ class FIDStatistics:
80
+ def __init__(self, mu: np.ndarray, sigma: np.ndarray):
81
+ self.mu = mu
82
+ self.sigma = sigma
83
+
84
+ def frechet_distance(self, other, eps=1e-6):
85
+ """
86
+ Compute the Frechet distance between two sets of statistics.
87
+ """
88
+ # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L132
89
+ mu1, sigma1 = self.mu, self.sigma
90
+ mu2, sigma2 = other.mu, other.sigma
91
+
92
+ mu1 = np.atleast_1d(mu1)
93
+ mu2 = np.atleast_1d(mu2)
94
+
95
+ sigma1 = np.atleast_2d(sigma1)
96
+ sigma2 = np.atleast_2d(sigma2)
97
+
98
+ assert (
99
+ mu1.shape == mu2.shape
100
+ ), f"Training and test mean vectors have different lengths: {mu1.shape}, {mu2.shape}"
101
+ assert (
102
+ sigma1.shape == sigma2.shape
103
+ ), f"Training and test covariances have different dimensions: {sigma1.shape}, {sigma2.shape}"
104
+
105
+ diff = mu1 - mu2
106
+
107
+ # product might be almost singular
108
+ covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
109
+ if not np.isfinite(covmean).all():
110
+ msg = (
111
+ "fid calculation produces singular product; adding %s to diagonal of cov estimates"
112
+ % eps
113
+ )
114
+ warnings.warn(msg)
115
+ offset = np.eye(sigma1.shape[0]) * eps
116
+ covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
117
+
118
+ # numerical error might give slight imaginary component
119
+ if np.iscomplexobj(covmean):
120
+ if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
121
+ m = np.max(np.abs(covmean.imag))
122
+ raise ValueError("Imaginary component {}".format(m))
123
+ covmean = covmean.real
124
+
125
+ tr_covmean = np.trace(covmean)
126
+
127
+ return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
128
+
129
+
130
+ class Evaluator:
131
+ def __init__(
132
+ self,
133
+ session,
134
+ batch_size=64,
135
+ softmax_batch_size=512,
136
+ ):
137
+ self.sess = session
138
+ self.batch_size = batch_size
139
+ self.softmax_batch_size = softmax_batch_size
140
+ self.manifold_estimator = ManifoldEstimator(session)
141
+ with self.sess.graph.as_default():
142
+ self.image_input = tf.placeholder(tf.float32, shape=[None, None, None, 3])
143
+ self.softmax_input = tf.placeholder(tf.float32, shape=[None, 2048])
144
+ self.pool_features, self.spatial_features = _create_feature_graph(self.image_input)
145
+ self.softmax = _create_softmax_graph(self.softmax_input)
146
+
147
+ def warmup(self):
148
+ self.compute_activations(np.zeros([1, 8, 64, 64, 3]))
149
+
150
+ def read_activations(self, npz_path: str) -> Tuple[np.ndarray, np.ndarray]:
151
+ with open_npz_array(npz_path, "arr_0") as reader:
152
+ return self.compute_activations(reader.read_batches(self.batch_size))
153
+
154
+ def compute_activations(self, batches: Iterable[np.ndarray]) -> Tuple[np.ndarray, np.ndarray]:
155
+ """
156
+ Compute image features for downstream evals.
157
+
158
+ :param batches: a iterator over NHWC numpy arrays in [0, 255].
159
+ :return: a tuple of numpy arrays of shape [N x X], where X is a feature
160
+ dimension. The tuple is (pool_3, spatial).
161
+ """
162
+ preds = []
163
+ spatial_preds = []
164
+ for batch in tqdm(batches):
165
+ batch = batch.astype(np.float32)
166
+ pred, spatial_pred = self.sess.run(
167
+ [self.pool_features, self.spatial_features], {self.image_input: batch}
168
+ )
169
+ preds.append(pred.reshape([pred.shape[0], -1]))
170
+ spatial_preds.append(spatial_pred.reshape([spatial_pred.shape[0], -1]))
171
+ return (
172
+ np.concatenate(preds, axis=0),
173
+ np.concatenate(spatial_preds, axis=0),
174
+ )
175
+
176
+ def read_statistics(
177
+ self, npz_path: str, activations: Tuple[np.ndarray, np.ndarray]
178
+ ) -> Tuple[FIDStatistics, FIDStatistics]:
179
+ obj = np.load(npz_path)
180
+ if "mu" in list(obj.keys()):
181
+ return FIDStatistics(obj["mu"], obj["sigma"]), FIDStatistics(
182
+ obj["mu_s"], obj["sigma_s"]
183
+ )
184
+ return tuple(self.compute_statistics(x) for x in activations)
185
+
186
+ def compute_statistics(self, activations: np.ndarray) -> FIDStatistics:
187
+ mu = np.mean(activations, axis=0)
188
+ sigma = np.cov(activations, rowvar=False)
189
+ return FIDStatistics(mu, sigma)
190
+
191
+ def compute_inception_score(self, activations: np.ndarray, split_size: int = 5000) -> float:
192
+ softmax_out = []
193
+ for i in range(0, len(activations), self.softmax_batch_size):
194
+ acts = activations[i : i + self.softmax_batch_size]
195
+ softmax_out.append(self.sess.run(self.softmax, feed_dict={self.softmax_input: acts}))
196
+ preds = np.concatenate(softmax_out, axis=0)
197
+ # https://github.com/openai/improved-gan/blob/4f5d1ec5c16a7eceb206f42bfc652693601e1d5c/inception_score/model.py#L46
198
+ scores = []
199
+ for i in range(0, len(preds), split_size):
200
+ part = preds[i : i + split_size]
201
+ kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
202
+ kl = np.mean(np.sum(kl, 1))
203
+ scores.append(np.exp(kl))
204
+ return float(np.mean(scores))
205
+
206
+ def compute_prec_recall(
207
+ self, activations_ref: np.ndarray, activations_sample: np.ndarray
208
+ ) -> Tuple[float, float]:
209
+ radii_1 = self.manifold_estimator.manifold_radii(activations_ref)
210
+ radii_2 = self.manifold_estimator.manifold_radii(activations_sample)
211
+ pr = self.manifold_estimator.evaluate_pr(
212
+ activations_ref, radii_1, activations_sample, radii_2
213
+ )
214
+ return (float(pr[0][0]), float(pr[1][0]))
215
+
216
+
217
+ class ManifoldEstimator:
218
+ """
219
+ A helper for comparing manifolds of feature vectors.
220
+
221
+ Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L57
222
+ """
223
+
224
+ def __init__(
225
+ self,
226
+ session,
227
+ row_batch_size=10000,
228
+ col_batch_size=10000,
229
+ nhood_sizes=(3,),
230
+ clamp_to_percentile=None,
231
+ eps=1e-5,
232
+ ):
233
+ """
234
+ Estimate the manifold of given feature vectors.
235
+
236
+ :param session: the TensorFlow session.
237
+ :param row_batch_size: row batch size to compute pairwise distances
238
+ (parameter to trade-off between memory usage and performance).
239
+ :param col_batch_size: column batch size to compute pairwise distances.
240
+ :param nhood_sizes: number of neighbors used to estimate the manifold.
241
+ :param clamp_to_percentile: prune hyperspheres that have radius larger than
242
+ the given percentile.
243
+ :param eps: small number for numerical stability.
244
+ """
245
+ self.distance_block = DistanceBlock(session)
246
+ self.row_batch_size = row_batch_size
247
+ self.col_batch_size = col_batch_size
248
+ self.nhood_sizes = nhood_sizes
249
+ self.num_nhoods = len(nhood_sizes)
250
+ self.clamp_to_percentile = clamp_to_percentile
251
+ self.eps = eps
252
+
253
+ def warmup(self):
254
+ feats, radii = (
255
+ np.zeros([1, 2048], dtype=np.float32),
256
+ np.zeros([1, 1], dtype=np.float32),
257
+ )
258
+ self.evaluate_pr(feats, radii, feats, radii)
259
+
260
+ def manifold_radii(self, features: np.ndarray) -> np.ndarray:
261
+ num_images = len(features)
262
+
263
+ # Estimate manifold of features by calculating distances to k-NN of each sample.
264
+ radii = np.zeros([num_images, self.num_nhoods], dtype=np.float32)
265
+ distance_batch = np.zeros([self.row_batch_size, num_images], dtype=np.float32)
266
+ seq = np.arange(max(self.nhood_sizes) + 1, dtype=np.int32)
267
+
268
+ for begin1 in range(0, num_images, self.row_batch_size):
269
+ end1 = min(begin1 + self.row_batch_size, num_images)
270
+ row_batch = features[begin1:end1]
271
+
272
+ for begin2 in range(0, num_images, self.col_batch_size):
273
+ end2 = min(begin2 + self.col_batch_size, num_images)
274
+ col_batch = features[begin2:end2]
275
+
276
+ # Compute distances between batches.
277
+ distance_batch[
278
+ 0 : end1 - begin1, begin2:end2
279
+ ] = self.distance_block.pairwise_distances(row_batch, col_batch)
280
+
281
+ # Find the k-nearest neighbor from the current batch.
282
+ radii[begin1:end1, :] = np.concatenate(
283
+ [
284
+ x[:, self.nhood_sizes]
285
+ for x in _numpy_partition(distance_batch[0 : end1 - begin1, :], seq, axis=1)
286
+ ],
287
+ axis=0,
288
+ )
289
+
290
+ if self.clamp_to_percentile is not None:
291
+ max_distances = np.percentile(radii, self.clamp_to_percentile, axis=0)
292
+ radii[radii > max_distances] = 0
293
+ return radii
294
+
295
+ def evaluate(self, features: np.ndarray, radii: np.ndarray, eval_features: np.ndarray):
296
+ """
297
+ Evaluate if new feature vectors are at the manifold.
298
+ """
299
+ num_eval_images = eval_features.shape[0]
300
+ num_ref_images = radii.shape[0]
301
+ distance_batch = np.zeros([self.row_batch_size, num_ref_images], dtype=np.float32)
302
+ batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32)
303
+ max_realism_score = np.zeros([num_eval_images], dtype=np.float32)
304
+ nearest_indices = np.zeros([num_eval_images], dtype=np.int32)
305
+
306
+ for begin1 in range(0, num_eval_images, self.row_batch_size):
307
+ end1 = min(begin1 + self.row_batch_size, num_eval_images)
308
+ feature_batch = eval_features[begin1:end1]
309
+
310
+ for begin2 in range(0, num_ref_images, self.col_batch_size):
311
+ end2 = min(begin2 + self.col_batch_size, num_ref_images)
312
+ ref_batch = features[begin2:end2]
313
+
314
+ distance_batch[
315
+ 0 : end1 - begin1, begin2:end2
316
+ ] = self.distance_block.pairwise_distances(feature_batch, ref_batch)
317
+
318
+ # From the minibatch of new feature vectors, determine if they are in the estimated manifold.
319
+ # If a feature vector is inside a hypersphere of some reference sample, then
320
+ # the new sample lies at the estimated manifold.
321
+ # The radii of the hyperspheres are determined from distances of neighborhood size k.
322
+ samples_in_manifold = distance_batch[0 : end1 - begin1, :, None] <= radii
323
+ batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(np.int32)
324
+
325
+ max_realism_score[begin1:end1] = np.max(
326
+ radii[:, 0] / (distance_batch[0 : end1 - begin1, :] + self.eps), axis=1
327
+ )
328
+ nearest_indices[begin1:end1] = np.argmin(distance_batch[0 : end1 - begin1, :], axis=1)
329
+
330
+ return {
331
+ "fraction": float(np.mean(batch_predictions)),
332
+ "batch_predictions": batch_predictions,
333
+ "max_realisim_score": max_realism_score,
334
+ "nearest_indices": nearest_indices,
335
+ }
336
+
337
+ def evaluate_pr(
338
+ self,
339
+ features_1: np.ndarray,
340
+ radii_1: np.ndarray,
341
+ features_2: np.ndarray,
342
+ radii_2: np.ndarray,
343
+ ) -> Tuple[np.ndarray, np.ndarray]:
344
+ """
345
+ Evaluate precision and recall efficiently.
346
+
347
+ :param features_1: [N1 x D] feature vectors for reference batch.
348
+ :param radii_1: [N1 x K1] radii for reference vectors.
349
+ :param features_2: [N2 x D] feature vectors for the other batch.
350
+ :param radii_2: [N x K2] radii for other vectors.
351
+ :return: a tuple of arrays for (precision, recall):
352
+ - precision: an np.ndarray of length K1
353
+ - recall: an np.ndarray of length K2
354
+ """
355
+ features_1_status = np.zeros([len(features_1), radii_2.shape[1]], dtype=bool)
356
+ features_2_status = np.zeros([len(features_2), radii_1.shape[1]], dtype=bool)
357
+ for begin_1 in range(0, len(features_1), self.row_batch_size):
358
+ end_1 = begin_1 + self.row_batch_size
359
+ batch_1 = features_1[begin_1:end_1]
360
+ for begin_2 in range(0, len(features_2), self.col_batch_size):
361
+ end_2 = begin_2 + self.col_batch_size
362
+ batch_2 = features_2[begin_2:end_2]
363
+ batch_1_in, batch_2_in = self.distance_block.less_thans(
364
+ batch_1, radii_1[begin_1:end_1], batch_2, radii_2[begin_2:end_2]
365
+ )
366
+ features_1_status[begin_1:end_1] |= batch_1_in
367
+ features_2_status[begin_2:end_2] |= batch_2_in
368
+ return (
369
+ np.mean(features_2_status.astype(np.float64), axis=0),
370
+ np.mean(features_1_status.astype(np.float64), axis=0),
371
+ )
372
+
373
+
374
+ class DistanceBlock:
375
+ """
376
+ Calculate pairwise distances between vectors.
377
+
378
+ Adapted from https://github.com/kynkaat/improved-precision-and-recall-metric/blob/f60f25e5ad933a79135c783fcda53de30f42c9b9/precision_recall.py#L34
379
+ """
380
+
381
+ def __init__(self, session):
382
+ self.session = session
383
+
384
+ # Initialize TF graph to calculate pairwise distances.
385
+ with session.graph.as_default():
386
+ self._features_batch1 = tf.placeholder(tf.float32, shape=[None, None])
387
+ self._features_batch2 = tf.placeholder(tf.float32, shape=[None, None])
388
+ distance_block_16 = _batch_pairwise_distances(
389
+ tf.cast(self._features_batch1, tf.float16),
390
+ tf.cast(self._features_batch2, tf.float16),
391
+ )
392
+ self.distance_block = tf.cond(
393
+ tf.reduce_all(tf.math.is_finite(distance_block_16)),
394
+ lambda: tf.cast(distance_block_16, tf.float32),
395
+ lambda: _batch_pairwise_distances(self._features_batch1, self._features_batch2),
396
+ )
397
+
398
+ # Extra logic for less thans.
399
+ self._radii1 = tf.placeholder(tf.float32, shape=[None, None])
400
+ self._radii2 = tf.placeholder(tf.float32, shape=[None, None])
401
+ dist32 = tf.cast(self.distance_block, tf.float32)[..., None]
402
+ self._batch_1_in = tf.math.reduce_any(dist32 <= self._radii2, axis=1)
403
+ self._batch_2_in = tf.math.reduce_any(dist32 <= self._radii1[:, None], axis=0)
404
+
405
+ def pairwise_distances(self, U, V):
406
+ """
407
+ Evaluate pairwise distances between two batches of feature vectors.
408
+ """
409
+ return self.session.run(
410
+ self.distance_block,
411
+ feed_dict={self._features_batch1: U, self._features_batch2: V},
412
+ )
413
+
414
+ def less_thans(self, batch_1, radii_1, batch_2, radii_2):
415
+ return self.session.run(
416
+ [self._batch_1_in, self._batch_2_in],
417
+ feed_dict={
418
+ self._features_batch1: batch_1,
419
+ self._features_batch2: batch_2,
420
+ self._radii1: radii_1,
421
+ self._radii2: radii_2,
422
+ },
423
+ )
424
+
425
+
426
+ def _batch_pairwise_distances(U, V):
427
+ """
428
+ Compute pairwise distances between two batches of feature vectors.
429
+ """
430
+ with tf.variable_scope("pairwise_dist_block"):
431
+ # Squared norms of each row in U and V.
432
+ norm_u = tf.reduce_sum(tf.square(U), 1)
433
+ norm_v = tf.reduce_sum(tf.square(V), 1)
434
+
435
+ # norm_u as a column and norm_v as a row vectors.
436
+ norm_u = tf.reshape(norm_u, [-1, 1])
437
+ norm_v = tf.reshape(norm_v, [1, -1])
438
+
439
+ # Pairwise squared Euclidean distances.
440
+ D = tf.maximum(norm_u - 2 * tf.matmul(U, V, False, True) + norm_v, 0.0)
441
+
442
+ return D
443
+
444
+
445
+ class NpzArrayReader(ABC):
446
+ @abstractmethod
447
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
448
+ pass
449
+
450
+ @abstractmethod
451
+ def remaining(self) -> int:
452
+ pass
453
+
454
+ def read_batches(self, batch_size: int) -> Iterable[np.ndarray]:
455
+ def gen_fn():
456
+ while True:
457
+ batch = self.read_batch(batch_size)
458
+ if batch is None:
459
+ break
460
+ yield batch
461
+
462
+ rem = self.remaining()
463
+ num_batches = rem // batch_size + int(rem % batch_size != 0)
464
+ return BatchIterator(gen_fn, num_batches)
465
+
466
+
467
+ class BatchIterator:
468
+ def __init__(self, gen_fn, length):
469
+ self.gen_fn = gen_fn
470
+ self.length = length
471
+
472
+ def __len__(self):
473
+ return self.length
474
+
475
+ def __iter__(self):
476
+ return self.gen_fn()
477
+
478
+
479
+ class StreamingNpzArrayReader(NpzArrayReader):
480
+ def __init__(self, arr_f, shape, dtype):
481
+ self.arr_f = arr_f
482
+ self.shape = shape
483
+ self.dtype = dtype
484
+ self.idx = 0
485
+
486
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
487
+ if self.idx >= self.shape[0]:
488
+ return None
489
+
490
+ bs = min(batch_size, self.shape[0] - self.idx)
491
+ self.idx += bs
492
+
493
+ if self.dtype.itemsize == 0:
494
+ return np.ndarray([bs, *self.shape[1:]], dtype=self.dtype)
495
+
496
+ read_count = bs * np.prod(self.shape[1:])
497
+ read_size = int(read_count * self.dtype.itemsize)
498
+ data = _read_bytes(self.arr_f, read_size, "array data")
499
+ return np.frombuffer(data, dtype=self.dtype).reshape([bs, *self.shape[1:]])
500
+
501
+ def remaining(self) -> int:
502
+ return max(0, self.shape[0] - self.idx)
503
+
504
+
505
+ class MemoryNpzArrayReader(NpzArrayReader):
506
+ def __init__(self, arr):
507
+ self.arr = arr
508
+ self.idx = 0
509
+
510
+ @classmethod
511
+ def load(cls, path: str, arr_name: str):
512
+ with open(path, "rb") as f:
513
+ arr = np.load(f)[arr_name]
514
+ return cls(arr)
515
+
516
+ def read_batch(self, batch_size: int) -> Optional[np.ndarray]:
517
+ if self.idx >= self.arr.shape[0]:
518
+ return None
519
+
520
+ res = self.arr[self.idx : self.idx + batch_size]
521
+ self.idx += batch_size
522
+ return res
523
+
524
+ def remaining(self) -> int:
525
+ return max(0, self.arr.shape[0] - self.idx)
526
+
527
+
528
+ @contextmanager
529
+ def open_npz_array(path: str, arr_name: str) -> NpzArrayReader:
530
+ with _open_npy_file(path, arr_name) as arr_f:
531
+ version = np.lib.format.read_magic(arr_f)
532
+ if version == (1, 0):
533
+ header = np.lib.format.read_array_header_1_0(arr_f)
534
+ elif version == (2, 0):
535
+ header = np.lib.format.read_array_header_2_0(arr_f)
536
+ else:
537
+ yield MemoryNpzArrayReader.load(path, arr_name)
538
+ return
539
+ shape, fortran, dtype = header
540
+ if fortran or dtype.hasobject:
541
+ yield MemoryNpzArrayReader.load(path, arr_name)
542
+ else:
543
+ yield StreamingNpzArrayReader(arr_f, shape, dtype)
544
+
545
+
546
+ def _read_bytes(fp, size, error_template="ran out of data"):
547
+ """
548
+ Copied from: https://github.com/numpy/numpy/blob/fb215c76967739268de71aa4bda55dd1b062bc2e/numpy/lib/format.py#L788-L886
549
+
550
+ Read from file-like object until size bytes are read.
551
+ Raises ValueError if not EOF is encountered before size bytes are read.
552
+ Non-blocking objects only supported if they derive from io objects.
553
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
554
+ requested.
555
+ """
556
+ data = bytes()
557
+ while True:
558
+ # io files (default in python3) return None or raise on
559
+ # would-block, python2 file will truncate, probably nothing can be
560
+ # done about that. note that regular files can't be non-blocking
561
+ try:
562
+ r = fp.read(size - len(data))
563
+ data += r
564
+ if len(r) == 0 or len(data) == size:
565
+ break
566
+ except io.BlockingIOError:
567
+ pass
568
+ if len(data) != size:
569
+ msg = "EOF: reading %s, expected %d bytes got %d"
570
+ raise ValueError(msg % (error_template, size, len(data)))
571
+ else:
572
+ return data
573
+
574
+
575
+ @contextmanager
576
+ def _open_npy_file(path: str, arr_name: str):
577
+ with open(path, "rb") as f:
578
+ with zipfile.ZipFile(f, "r") as zip_f:
579
+ if f"{arr_name}.npy" not in zip_f.namelist():
580
+ raise ValueError(f"missing {arr_name} in npz file")
581
+ with zip_f.open(f"{arr_name}.npy", "r") as arr_f:
582
+ yield arr_f
583
+
584
+
585
+ def _download_inception_model():
586
+ if os.path.exists(INCEPTION_V3_PATH):
587
+ return
588
+ print("downloading InceptionV3 model...")
589
+ with requests.get(INCEPTION_V3_URL, stream=True) as r:
590
+ r.raise_for_status()
591
+ tmp_path = INCEPTION_V3_PATH + ".tmp"
592
+ with open(tmp_path, "wb") as f:
593
+ for chunk in tqdm(r.iter_content(chunk_size=8192)):
594
+ f.write(chunk)
595
+ os.rename(tmp_path, INCEPTION_V3_PATH)
596
+
597
+
598
+ def _create_feature_graph(input_batch):
599
+ _download_inception_model()
600
+ prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
601
+ with open(INCEPTION_V3_PATH, "rb") as f:
602
+ graph_def = tf.GraphDef()
603
+ graph_def.ParseFromString(f.read())
604
+ pool3, spatial = tf.import_graph_def(
605
+ graph_def,
606
+ input_map={f"ExpandDims:0": input_batch},
607
+ return_elements=[FID_POOL_NAME, FID_SPATIAL_NAME],
608
+ name=prefix,
609
+ )
610
+ _update_shapes(pool3)
611
+ spatial = spatial[..., :7]
612
+ return pool3, spatial
613
+
614
+
615
+ def _create_softmax_graph(input_batch):
616
+ _download_inception_model()
617
+ prefix = f"{random.randrange(2**32)}_{random.randrange(2**32)}"
618
+ with open(INCEPTION_V3_PATH, "rb") as f:
619
+ graph_def = tf.GraphDef()
620
+ graph_def.ParseFromString(f.read())
621
+ (matmul,) = tf.import_graph_def(
622
+ graph_def, return_elements=[f"softmax/logits/MatMul"], name=prefix
623
+ )
624
+ w = matmul.inputs[1]
625
+ logits = tf.matmul(input_batch, w)
626
+ return tf.nn.softmax(logits)
627
+
628
+
629
+ def _update_shapes(pool3):
630
+ # https://github.com/bioinf-jku/TTUR/blob/73ab375cdf952a12686d9aa7978567771084da42/fid.py#L50-L63
631
+ ops = pool3.graph.get_operations()
632
+ for op in ops:
633
+ for o in op.outputs:
634
+ shape = o.get_shape()
635
+ if shape._dims is not None: # pylint: disable=protected-access
636
+ # shape = [s.value for s in shape] TF 1.x
637
+ shape = [s for s in shape] # TF 2.x
638
+ new_shape = []
639
+ for j, s in enumerate(shape):
640
+ if s == 1 and j == 0:
641
+ new_shape.append(None)
642
+ else:
643
+ new_shape.append(s)
644
+ o.__dict__["_shape_val"] = tf.TensorShape(new_shape)
645
+ return pool3
646
+
647
+
648
+ def _numpy_partition(arr, kth, **kwargs):
649
+ num_workers = min(cpu_count(), len(arr))
650
+ chunk_size = len(arr) // num_workers
651
+ extra = len(arr) % num_workers
652
+
653
+ start_idx = 0
654
+ batches = []
655
+ for i in range(num_workers):
656
+ size = chunk_size + (1 if i < extra else 0)
657
+ batches.append(arr[start_idx : start_idx + size])
658
+ start_idx += size
659
+
660
+ with ThreadPool(num_workers) as pool:
661
+ return list(pool.map(partial(np.partition, kth=kth, **kwargs), batches))
662
+
663
+
664
+ if __name__ == "__main__":
665
+ main()
evaluations/canny_f1score.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from tqdm import tqdm
3
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
4
+ from PIL import Image
5
+ import os
6
+ import sys
7
+ import torch
8
+ import numpy as np
9
+ from torch.utils.data import DataLoader, Dataset
10
+ current_directory = os.getcwd()
11
+ sys.path.append(current_directory)
12
+ from autoregressive.test.metric import RMSE, SSIM, F1score
13
+ import torch.nn.functional as F
14
+ from condition.hed import HEDdetector
15
+ from condition.canny import CannyDetector
16
+ from torchmetrics.classification import BinaryF1Score
17
+ # Define a dataset class for loading image and label pairs
18
+ class ImageDataset(Dataset):
19
+ def __init__(self, img_dir, label_dir):
20
+ self.img_dir = img_dir
21
+ self.label_dir = label_dir
22
+ self.images = os.listdir(img_dir)
23
+
24
+ def __len__(self):
25
+ return len(self.images)
26
+
27
+ def __getitem__(self, idx):
28
+ img_path = os.path.join(self.img_dir, self.images[idx])
29
+ label_path = os.path.join(self.label_dir, self.images[idx])
30
+
31
+ image = np.array(Image.open(img_path).convert("RGB"))
32
+ label = np.array(Image.open(label_path))
33
+ return torch.from_numpy(image), torch.from_numpy(label).permute(2, 0, 1)
34
+
35
+ model = CannyDetector()
36
+ # Define the dataset and data loader
37
+ img_dir = 'sample/multigen/canny/visualization'
38
+ label_dir = 'sample/multigen/canny/annotations'
39
+ dataset = ImageDataset(img_dir, label_dir)
40
+ data_loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4)
41
+
42
+ # Instantiate the metric
43
+ f1score = BinaryF1Score()
44
+ f1 = []
45
+ i = 0
46
+ with torch.no_grad():
47
+ for images, labels in tqdm(data_loader):
48
+ i += 1
49
+ images = images
50
+ outputs = []
51
+ for img in images:
52
+ outputs.append(model(img))
53
+ # Move predictions and labels to numpy for RMSE calculation
54
+ predicted_canny = outputs
55
+ labels = labels[:, 0, :, :].numpy() # Assuming labels are in Bx1xHxW format
56
+
57
+ for pred, label in zip(predicted_canny, labels):
58
+ pred[pred == 255] = 1
59
+ label[label == 255] = 1
60
+ f1.append(f1score(torch.from_numpy(pred).flatten(), torch.from_numpy(label).flatten()).item())
61
+
62
+ print(f'f1score: {np.array(f1).mean()}')
evaluations/clean_fid.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from cleanfid import fid
2
+ import argparse
3
+
4
+ def main(args):
5
+ real_data_path = args.val_images
6
+ gen_data_path = args.generated_images
7
+ cleanfid_score = fid.compute_fid(gen_data_path, real_data_path)
8
+ print(f"The Clean-FID score is {cleanfid_score}")
9
+
10
+ if __name__ == "__main__":
11
+ parser = argparse.ArgumentParser()
12
+ parser.add_argument("--val-images", type=str, required=True)
13
+ parser.add_argument("--generated-images", type=str, required=True)
14
+ args = parser.parse_args()
15
+ main(args)
evaluations/cocostuff_mIoU.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from mmseg.apis import init_model, inference_model, show_result_pyplot#, inference_segmentor
4
+ import torch
5
+ from PIL import Image
6
+ from sklearn.metrics import confusion_matrix
7
+ from torchmetrics import JaccardIndex
8
+
9
+ def main():
10
+ config_file = 'mmsegmentation/configs/deeplabv3/deeplabv3_r101-d8_4xb4-320k_coco-stuff164k-512x512.py'
11
+ checkpoint_file = 'evaluations/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth'
12
+
13
+ # build the model from a config file and a checkpoint file
14
+ model = init_model(config_file, checkpoint_file, device='cuda:1')
15
+
16
+ # Image and segmentation labels directories
17
+ img_dir = 'sample/cocostuff/visualization'
18
+ ann_dir = 'sample/cocostuff/annotations'
19
+
20
+ # List all image files
21
+ img_fns = [f for f in sorted(os.listdir(img_dir)) if f.endswith(".png")]
22
+
23
+
24
+ total_mIoU = 0
25
+ from tqdm import tqdm
26
+ i = 0
27
+ num_classes = 171
28
+ jaccard_index = JaccardIndex(task="multiclass", num_classes=num_classes)
29
+
30
+ conf_matrix = np.zeros((num_classes+1, num_classes+1), dtype=np.int64)
31
+ for img_fn in tqdm(img_fns):
32
+ ann_fn = img_fn
33
+ i += 1
34
+ # if i == 4891:
35
+ # continue
36
+ try:
37
+ img_path = os.path.join(img_dir, img_fn)
38
+ ann_path = os.path.join(ann_dir, img_fn)
39
+ result = inference_model(model, img_path)
40
+ except Exception as e:
41
+ continue
42
+ # Read ground truth segmentation map
43
+ gt_semantic_seg = np.array(Image.open(ann_path))
44
+
45
+ ignore_label = 255
46
+ gt = gt_semantic_seg.copy()
47
+ # import pdb;pdb.set_trace()
48
+ # print(np.unique(gt), np.unique(result.pred_sem_seg.data[0].cpu().numpy()))
49
+ pred = result.pred_sem_seg.data[0].cpu().numpy().copy()#+1
50
+ gt[gt == ignore_label] = num_classes
51
+ conf_matrix += np.bincount(
52
+ (num_classes+1) * pred.reshape(-1) + gt.reshape(-1),
53
+ minlength=conf_matrix.size,
54
+ ).reshape(conf_matrix.shape)
55
+
56
+
57
+ # calculate miou
58
+ acc = np.full(num_classes, np.nan, dtype=np.float64)
59
+ iou = np.full(num_classes, np.nan, dtype=np.float64)
60
+ tp = conf_matrix.diagonal()[:-1].astype(np.float64)
61
+ pos_gt = np.sum(conf_matrix[:-1, :-1], axis=0).astype(np.float64)
62
+ pos_pred = np.sum(conf_matrix[:-1, :-1], axis=1).astype(np.float64)
63
+ acc_valid = pos_gt > 0
64
+ acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
65
+ iou_valid = (pos_gt + pos_pred) > 0
66
+ union = pos_gt + pos_pred - tp
67
+ iou[acc_valid] = tp[acc_valid] / union[acc_valid]
68
+ miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
69
+ print(f"mIoU: {miou}")
70
+
71
+ if __name__ == '__main__':
72
+ main()
evaluations/depth_rmse.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from tqdm import tqdm
3
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
4
+ from PIL import Image
5
+ import os
6
+ import torch
7
+ import numpy as np
8
+ from torch.utils.data import DataLoader, Dataset
9
+ import sys
10
+ current_directory = os.getcwd()
11
+ sys.path.append(current_directory)
12
+ from autoregressive.test.metric import RMSE
13
+ import torch.nn.functional as F
14
+ # Define a dataset class for loading image and label pairs
15
+ class ImageDataset(Dataset):
16
+ def __init__(self, img_dir, label_dir):
17
+ self.img_dir = img_dir
18
+ self.label_dir = label_dir
19
+ self.images = os.listdir(img_dir)
20
+
21
+ def __len__(self):
22
+ return len(self.images)
23
+
24
+ def __getitem__(self, idx):
25
+ img_path = os.path.join(self.img_dir, self.images[idx])
26
+ label_path = os.path.join(self.label_dir, self.images[idx])
27
+ image = Image.open(img_path).convert("RGB")
28
+ label = np.array(Image.open(label_path))
29
+
30
+ return np.array(image), torch.from_numpy(label).permute(2, 0, 1)
31
+
32
+ # Instantiate the model and processor
33
+ processor = DPTImageProcessor.from_pretrained("condition/ckpts/dpt_large")
34
+ model = DPTForDepthEstimation.from_pretrained("condition/ckpts/dpt_large").cuda()
35
+
36
+ # Define the dataset and data loader
37
+ img_dir = 'sample/multigen/depth/visualization'
38
+ label_dir = 'sample/multigen/depth/annotations'
39
+ dataset = ImageDataset(img_dir, label_dir)
40
+ data_loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4)
41
+
42
+ # Instantiate the metric
43
+ metric = RMSE()
44
+
45
+ # Perform inference on batches and calculate RMSE
46
+ model.eval()
47
+ rmse = []
48
+ with torch.no_grad():
49
+ for images, labels in tqdm(data_loader):
50
+ inputs = processor(images=images, return_tensors="pt", size=(512,512)).to('cuda:0')
51
+ outputs = model(**inputs)
52
+
53
+ predicted_depth = outputs.predicted_depth
54
+ predicted_depth = predicted_depth.squeeze().cpu()
55
+ labels = labels[:, 0, :, :]
56
+
57
+ for pred, label in zip(predicted_depth, labels):
58
+ # Preprocess predicted depth for fair comparison
59
+ pred = (pred * 255 / pred.max())
60
+ per_pixel_mse = torch.sqrt(F.mse_loss(pred.float(), label.float()))
61
+ rmse.append(per_pixel_mse)
62
+ print(np.array(rmse).mean())
evaluations/hed_ssim.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from tqdm import tqdm
3
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
4
+ from PIL import Image
5
+ import os
6
+ import torch
7
+ import numpy as np
8
+ from torch.utils.data import DataLoader, Dataset
9
+ import sys
10
+ current_directory = os.getcwd()
11
+ sys.path.append(current_directory)
12
+ from autoregressive.test.metric import RMSE, SSIM
13
+ import torch.nn.functional as F
14
+ from condition.hed import HEDdetector
15
+ from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
16
+ # Define a dataset class for loading image and label pairs
17
+ class ImageDataset(Dataset):
18
+ def __init__(self, img_dir, label_dir):
19
+ self.img_dir = img_dir
20
+ self.label_dir = label_dir
21
+ self.images = os.listdir(img_dir)
22
+
23
+ def __len__(self):
24
+ return len(self.images)
25
+
26
+ def __getitem__(self, idx):
27
+ img_path = os.path.join(self.img_dir, self.images[idx])
28
+ label_path = os.path.join(self.label_dir, self.images[idx])
29
+ image = np.array(Image.open(img_path).convert("RGB"))
30
+ label = np.array(Image.open(label_path))
31
+ return torch.from_numpy(image), torch.from_numpy(label).permute(2, 0, 1)
32
+
33
+ model = HEDdetector().cuda().eval()
34
+
35
+ # Define the dataset and data loader
36
+ img_dir = 'sample/multigen/hed/visualization'
37
+ label_dir = 'sample/multigen/hed/annotations'
38
+ dataset = ImageDataset(img_dir, label_dir)
39
+ data_loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4)
40
+
41
+ model.eval()
42
+ ssim = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0).cuda()
43
+ ssim_score = []
44
+ with torch.no_grad():
45
+ for images, labels in tqdm(data_loader):
46
+ images = images.permute(0,3,1,2).cuda()
47
+ outputs = model(images)
48
+ predicted_hed = outputs.unsqueeze(1)
49
+ labels = labels[:, 0:1, :, :].cuda()
50
+ ssim_score.append(ssim((predicted_hed/255.0).clip(0,1), (labels/255.0).clip(0,1)))
51
+
52
+ print(f'ssim: {torch.stack(ssim_score).mean()}')
evaluations/lineart_ssim.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ from tqdm import tqdm
3
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
4
+ from PIL import Image
5
+ import os
6
+ import torch
7
+ import numpy as np
8
+ from torch.utils.data import DataLoader, Dataset
9
+ import sys
10
+ current_directory = os.getcwd()
11
+ sys.path.append(current_directory)
12
+ from autoregressive.test.metric import RMSE, SSIM
13
+ import torch.nn.functional as F
14
+ from condition.hed import HEDdetector
15
+ from torchmetrics.image import MultiScaleStructuralSimilarityIndexMeasure
16
+ from condition.lineart import LineArt
17
+ # Define a dataset class for loading image and label pairs
18
+ class ImageDataset(Dataset):
19
+ def __init__(self, img_dir, label_dir):
20
+ self.img_dir = img_dir
21
+ self.label_dir = label_dir
22
+ self.images = os.listdir(img_dir)
23
+
24
+ def __len__(self):
25
+ return len(self.images)
26
+
27
+ def __getitem__(self, idx):
28
+ img_path = os.path.join(self.img_dir, self.images[idx])
29
+ label_path = os.path.join(self.label_dir, self.images[idx])
30
+
31
+ image = np.array(Image.open(img_path).convert("RGB"))
32
+ label = np.array(Image.open(label_path))
33
+ return torch.from_numpy(image), torch.from_numpy(label).permute(2, 0, 1)
34
+
35
+ model = LineArt()
36
+ model.load_state_dict(torch.load('condition/ckpts/model.pth', map_location=torch.device('cpu')))
37
+ model.cuda()
38
+ # Define the dataset and data loader
39
+ img_dir = 'sample/multigen/lineart/visualization'
40
+ label_dir = 'sample/multigen/lineart/annotations'
41
+ dataset = ImageDataset(img_dir, label_dir)
42
+ data_loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=4)
43
+
44
+ ssim = MultiScaleStructuralSimilarityIndexMeasure(data_range=1.0).cuda()
45
+ ssim_score = []
46
+ with torch.no_grad():
47
+ for images, labels in tqdm(data_loader):
48
+ images = images.permute(0,3,1,2).cuda()
49
+ outputs = model(images.float())*255
50
+ predicted_hed = outputs
51
+ labels = labels[:, 0:1, :, :].cuda()
52
+ ssim_score.append(ssim((predicted_hed/255.0).clip(0,1), (labels/255.0).clip(0,1)))
53
+
54
+ print(f'ssim: {torch.stack(ssim_score).mean()}')
evaluations/t2i/PartiPrompts.tsv ADDED
The diff for this file is too large to render. See raw diff
 
evaluations/t2i/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Evaluations from [GigaGAN](https://github.com/mingukkang/GigaGAN/tree/main/evaluation)
2
+
3
+ ```
4
+ pip install git+https://github.com/openai/CLIP.git
5
+ pip install open_clip_torch
6
+ pip install clean_fid
7
+ ```
8
+
9
+ ```
10
+ python3 evaluations/t2i/evaluation.py \
11
+ --eval_res 256 \
12
+ --batch_size 256 \
13
+ --how_many 30000 \
14
+ --ref_data "coco2014" \
15
+ --ref_type "val2014" \
16
+ --eval_res 256 \
17
+ --batch_size 256 \
18
+ --ref_dir "/path/to/coco" \
19
+ --fake_dir "/path/to/generation" \
20
+ $@
21
+ ```
evaluations/t2i/coco_captions.csv ADDED
The diff for this file is too large to render. See raw diff
 
evaluations/t2i/evaluation.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # GigaGAN: https://github.com/mingukkang/GigaGAN
3
+ import os
4
+ import torch
5
+ import numpy as np
6
+ import re
7
+ import io
8
+ import random
9
+
10
+ from pathlib import Path
11
+ from tqdm import tqdm
12
+ from PIL import Image
13
+ import torch.nn.functional as F
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from torchvision.datasets import CocoCaptions
16
+ from torchvision.datasets import ImageFolder
17
+ from torchvision.transforms import InterpolationMode
18
+ from PIL import Image
19
+ import torchvision.transforms as transforms
20
+ import glob
21
+
22
+
23
+ resizer_collection = {"nearest": InterpolationMode.NEAREST,
24
+ "box": InterpolationMode.BOX,
25
+ "bilinear": InterpolationMode.BILINEAR,
26
+ "hamming": InterpolationMode.HAMMING,
27
+ "bicubic": InterpolationMode.BICUBIC,
28
+ "lanczos": InterpolationMode.LANCZOS}
29
+
30
+
31
+ class CenterCropLongEdge(object):
32
+ """
33
+ this code is borrowed from https://github.com/ajbrock/BigGAN-PyTorch
34
+ MIT License
35
+ Copyright (c) 2019 Andy Brock
36
+ """
37
+ def __call__(self, img):
38
+ return transforms.functional.center_crop(img, min(img.size))
39
+
40
+ def __repr__(self):
41
+ return self.__class__.__name__
42
+
43
+
44
+ class EvalDataset(Dataset):
45
+ def __init__(self,
46
+ data_name,
47
+ data_dir,
48
+ data_type,
49
+ crop_long_edge=False,
50
+ resize_size=None,
51
+ resizer="lanczos",
52
+ normalize=True,
53
+ load_txt_from_file=False,
54
+ ):
55
+ super(EvalDataset, self).__init__()
56
+ self.data_name = data_name
57
+ self.data_dir = data_dir
58
+ self.data_type = data_type
59
+ self.resize_size = resize_size
60
+ self.normalize = normalize
61
+ self.load_txt_from_file = load_txt_from_file
62
+
63
+ self.trsf_list = [CenterCropLongEdge()]
64
+ if isinstance(self.resize_size, int):
65
+ self.trsf_list += [transforms.Resize(self.resize_size,
66
+ interpolation=resizer_collection[resizer])]
67
+ if self.normalize:
68
+ self.trsf_list += [transforms.ToTensor()]
69
+ self.trsf_list += [transforms.Normalize([0.5, 0.5, 0.5],
70
+ [0.5, 0.5, 0.5])]
71
+ else:
72
+ self.trsf_list += [transforms.PILToTensor()]
73
+ self.trsf = transforms.Compose(self.trsf_list)
74
+
75
+ self.load_dataset()
76
+
77
+ def natural_sort(self, l):
78
+ convert = lambda text: int(text) if text.isdigit() else text.lower()
79
+ alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
80
+ return sorted(l, key=alphanum_key)
81
+
82
+ def load_dataset(self):
83
+ if self.data_name == "coco2014":
84
+ if self.load_txt_from_file:
85
+ self.imagelist = self.natural_sort(glob.glob(os.path.join(self.data_dir, self.data_type, "*.%s" % "png")))
86
+ captionfile = os.path.join(self.data_dir, "captions.txt")
87
+ with io.open(captionfile, 'r', encoding="utf-8") as f:
88
+ self.captions = f.read().splitlines()
89
+ self.data = list(zip(self.imagelist, self.captions))
90
+ else:
91
+ self.data = CocoCaptions(root=os.path.join(self.data_dir,
92
+ "val2014"),
93
+ annFile=os.path.join(self.data_dir,
94
+ "annotations",
95
+ "captions_val2014.json"))
96
+ else:
97
+ root = os.path.join(self.data_dir, self.data_type)
98
+ self.data = ImageFolder(root=root)
99
+
100
+ def __len__(self):
101
+ num_dataset = len(self.data)
102
+ return num_dataset
103
+
104
+ def __getitem__(self, index):
105
+ if self.data_name == "coco2014":
106
+ img, txt = self.data[index]
107
+ if isinstance(img, str):
108
+ img = Image.open(img).convert("RGB")
109
+ if isinstance(txt, list):
110
+ txt = txt[random.randint(0, 4)]
111
+ return self.trsf(img), txt
112
+ else:
113
+ img, label = self.data[index]
114
+ return self.trsf(img), int(label)
115
+
116
+
117
+ def tensor2pil(image: torch.Tensor):
118
+ ''' output image : tensor to PIL
119
+ '''
120
+ if isinstance(image, list) or image.ndim == 4:
121
+ return [tensor2pil(im) for im in image]
122
+
123
+ assert image.ndim == 3
124
+ output_image = Image.fromarray(((image + 1.0) * 127.5).clamp(
125
+ 0.0, 255.0).to(torch.uint8).permute(1, 2, 0).detach().cpu().numpy())
126
+ return output_image
127
+
128
+
129
+ @torch.no_grad()
130
+ def compute_clip_score(
131
+ dataset: DataLoader, clip_model="ViT-B/32", device="cuda", how_many=5000):
132
+ print("Computing CLIP score")
133
+ import clip as openai_clip
134
+ if clip_model == "ViT-B/32":
135
+ clip, clip_preprocessor = openai_clip.load("ViT-B/32", device=device)
136
+ clip = clip.eval()
137
+ elif clip_model == "ViT-G/14":
138
+ import open_clip
139
+ clip, _, clip_preprocessor = open_clip.create_model_and_transforms("ViT-g-14", pretrained="laion2b_s12b_b42k")
140
+ clip = clip.to(device)
141
+ clip = clip.eval()
142
+ clip = clip.float()
143
+ else:
144
+ raise NotImplementedError
145
+
146
+ cos_sims = []
147
+ count = 0
148
+ for imgs, txts in tqdm(dataset):
149
+ imgs_pil = [clip_preprocessor(tensor2pil(img)) for img in imgs]
150
+ imgs = torch.stack(imgs_pil, dim=0).to(device)
151
+ tokens = openai_clip.tokenize(txts, truncate=True).to(device)
152
+ # Prepending text prompts with "A photo depicts "
153
+ # https://arxiv.org/abs/2104.08718
154
+ prepend_text = "A photo depicts "
155
+ prepend_text_token = openai_clip.tokenize(prepend_text)[:, 1:4].to(device)
156
+ prepend_text_tokens = prepend_text_token.expand(tokens.shape[0], -1)
157
+
158
+ start_tokens = tokens[:, :1]
159
+ new_text_tokens = torch.cat(
160
+ [start_tokens, prepend_text_tokens, tokens[:, 1:]], dim=1)[:, :77]
161
+ last_cols = new_text_tokens[:, 77 - 1:77]
162
+ last_cols[last_cols > 0] = 49407 # eot token
163
+ new_text_tokens = torch.cat([new_text_tokens[:, :76], last_cols], dim=1)
164
+
165
+ img_embs = clip.encode_image(imgs)
166
+ text_embs = clip.encode_text(new_text_tokens)
167
+
168
+ similarities = F.cosine_similarity(img_embs, text_embs, dim=1)
169
+ cos_sims.append(similarities)
170
+ count += similarities.shape[0]
171
+ if count >= how_many:
172
+ break
173
+
174
+ clip_score = torch.cat(cos_sims, dim=0)[:how_many].mean()
175
+ clip_score = clip_score.detach().cpu().numpy()
176
+ return clip_score
177
+
178
+
179
+ @torch.no_grad()
180
+ def compute_fid(fake_dir: Path, gt_dir: Path,
181
+ resize_size=None, feature_extractor="clip"):
182
+ from cleanfid import fid
183
+ center_crop_trsf = CenterCropLongEdge()
184
+ def resize_and_center_crop(image_np):
185
+ image_pil = Image.fromarray(image_np)
186
+ image_pil = center_crop_trsf(image_pil)
187
+
188
+ if resize_size is not None:
189
+ image_pil = image_pil.resize((resize_size, resize_size),
190
+ Image.LANCZOS)
191
+ return np.array(image_pil)
192
+
193
+ if feature_extractor == "inception":
194
+ model_name = "inception_v3"
195
+ elif feature_extractor == "clip":
196
+ model_name = "clip_vit_b_32"
197
+ else:
198
+ raise ValueError(
199
+ "Unrecognized feature extractor [%s]" % feature_extractor)
200
+ fid = fid.compute_fid(gt_dir,
201
+ fake_dir,
202
+ model_name=model_name,
203
+ custom_image_tranform=resize_and_center_crop)
204
+ return fid
205
+
206
+
207
+ def evaluate_model(opt):
208
+ ### Generated images
209
+ dset2 = EvalDataset(data_name=opt.ref_data,
210
+ data_dir=opt.fake_dir,
211
+ data_type="images",
212
+ crop_long_edge=True,
213
+ resize_size=opt.eval_res,
214
+ resizer="lanczos",
215
+ normalize=True,
216
+ load_txt_from_file=True if opt.ref_data == "coco2014" else False)
217
+
218
+ dset2_dataloader = DataLoader(dataset=dset2,
219
+ batch_size=opt.batch_size,
220
+ shuffle=False,
221
+ pin_memory=True,
222
+ drop_last=False)
223
+
224
+ if opt.ref_data == "coco2014":
225
+ clip_score = compute_clip_score(dset2_dataloader, clip_model=opt.clip_model4eval, how_many=opt.how_many)
226
+ print(f"CLIP score: {clip_score}")
227
+
228
+ ref_sub_folder_name = "val2014" if opt.ref_data == "coco2014" else opt.ref_type
229
+ fake_sub_folder_name = "images"
230
+ fid = compute_fid(
231
+ os.path.join(opt.ref_dir, ref_sub_folder_name),
232
+ os.path.join(opt.fake_dir, fake_sub_folder_name),
233
+ resize_size=opt.eval_res,
234
+ feature_extractor="inception")
235
+ print(f"FID_{opt.eval_res}px: {fid}")
236
+
237
+ txt_path = opt.fake_dir + '/score.txt'
238
+ print("writing to {}".format(txt_path))
239
+ with open(txt_path, 'w') as f:
240
+ print(f"CLIP score: {clip_score}", file=f)
241
+ print(f"FID_{opt.eval_res}px: {fid}", file=f)
242
+
243
+ return
244
+
245
+
246
+ if __name__ == "__main__":
247
+ import argparse
248
+ parser = argparse.ArgumentParser()
249
+ parser.add_argument("--fake_dir", required=True, default="/home/GigaGAN_images/", help="location of fake images for evaluation")
250
+ parser.add_argument("--ref_dir", required=True, default="/home/COCO/", help="location of the reference images for evaluation")
251
+ parser.add_argument("--ref_data", default="coco2014", type=str, help="in [imagenet2012, coco2014, laion4k]")
252
+ parser.add_argument("--ref_type", default="train/valid/test", help="Type of reference dataset")
253
+
254
+ parser.add_argument("--how_many", default=30000, type=int)
255
+ parser.add_argument("--clip_model4eval", default="ViT-B/32", type=str, help="[WO, ViT-B/32, ViT-G/14]")
256
+ parser.add_argument("--eval_res", default=256, type=int)
257
+ parser.add_argument("--batch_size", default=8, type=int)
258
+
259
+ opt, _ = parser.parse_known_args()
260
+ evaluate_model(opt)
language/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Language models for text-conditional image generation
2
+
3
+ ### Requirements
4
+ ```
5
+ pip install ftfy
6
+ pip install transformers
7
+ pip install accelerate
8
+ pip install sentencepiece
9
+ pip install pandas
10
+ pip install bs4
11
+ ```
12
+
13
+ ### Language Models
14
+ Download flan-t5-xl models from [flan-t5-xl](https://huggingface.co/google/flan-t5-xl) and put into the folder of `./pretrained_models/t5-ckpt/`
language/__init__.py ADDED
File without changes
language/__pycache__/extract_t5_feature.cpython-310.pyc ADDED
Binary file (4.01 kB). View file
 
language/__pycache__/t5.cpython-310.pyc ADDED
Binary file (5.51 kB). View file
 
language/extract_t5_feature.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.distributed as dist
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torch.utils.data.distributed import DistributedSampler
7
+ import numpy as np
8
+ import argparse
9
+ import os
10
+ import json
11
+
12
+ # from utils.distributed import init_distributed_mode
13
+ from language.t5 import T5Embedder
14
+
15
+ CAPTION_KEY = {
16
+ 'blip': 0,
17
+ 'llava': 1,
18
+ 'llava_first': 2,
19
+ }
20
+ #################################################################################
21
+ # Training Helper Functions #
22
+ #################################################################################
23
+ class CustomDataset(Dataset):
24
+ def __init__(self, lst_dir, start, end, caption_key, trunc_caption=False):
25
+ img_path_list = []
26
+ for lst_name in sorted(os.listdir(lst_dir))[start: end+1]:
27
+ if not lst_name.endswith('.jsonl'):
28
+ continue
29
+ file_path = os.path.join(lst_dir, lst_name)
30
+ with open(file_path, 'r') as file:
31
+ for line_idx, line in enumerate(file):
32
+ data = json.loads(line)
33
+ # caption = data[caption_key]
34
+ caption = data['text'][CAPTION_KEY[caption_key]]
35
+ code_dir = file_path.split('/')[-1].split('.')[0]
36
+ if trunc_caption:
37
+ caption = caption.split('.')[0]
38
+ img_path_list.append((caption, code_dir, line_idx))
39
+ self.img_path_list = img_path_list
40
+
41
+ def __len__(self):
42
+ return len(self.img_path_list)
43
+
44
+ def __getitem__(self, index):
45
+ caption, code_dir, code_name = self.img_path_list[index]
46
+ return caption, code_dir, code_name
47
+
48
+
49
+
50
+ #################################################################################
51
+ # Training Loop #
52
+ #################################################################################
53
+ def main(args):
54
+ """
55
+ Trains a new DiT model.
56
+ """
57
+ assert torch.cuda.is_available(), "Training currently requires at least one GPU."
58
+
59
+ # Setup DDP:
60
+ # dist.init_process_group("nccl")
61
+ init_distributed_mode(args)
62
+ rank = dist.get_rank()
63
+ device = rank % torch.cuda.device_count()
64
+ seed = args.global_seed * dist.get_world_size() + rank
65
+ torch.manual_seed(seed)
66
+ torch.cuda.set_device(device)
67
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
68
+
69
+ # Setup a feature folder:
70
+ if rank == 0:
71
+ os.makedirs(args.t5_path, exist_ok=True)
72
+
73
+ # Setup data:
74
+ print(f"Dataset is preparing...")
75
+ dataset = CustomDataset(args.data_path, args.data_start, args.data_end, args.caption_key, args.trunc_caption)
76
+ sampler = DistributedSampler(
77
+ dataset,
78
+ num_replicas=dist.get_world_size(),
79
+ rank=rank,
80
+ shuffle=False,
81
+ seed=args.global_seed
82
+ )
83
+ loader = DataLoader(
84
+ dataset,
85
+ batch_size=1, # important!
86
+ shuffle=False,
87
+ sampler=sampler,
88
+ num_workers=args.num_workers,
89
+ pin_memory=True,
90
+ drop_last=False
91
+ )
92
+ print(f"Dataset contains {len(dataset):,} images")
93
+
94
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
95
+ assert os.path.exists(args.t5_model_path)
96
+ t5_xxl = T5Embedder(
97
+ device=device,
98
+ local_cache=True,
99
+ cache_dir=args.t5_model_path,
100
+ dir_or_name=args.t5_model_type,
101
+ torch_dtype=precision
102
+ )
103
+
104
+ for caption, code_dir, code_name in loader:
105
+ caption_embs, emb_masks = t5_xxl.get_text_embeddings(caption)
106
+ valid_caption_embs = caption_embs[:, :emb_masks.sum()]
107
+ x = valid_caption_embs.to(torch.float32).detach().cpu().numpy()
108
+ os.makedirs(os.path.join(args.t5_path, code_dir[0]), exist_ok=True)
109
+ np.save(os.path.join(args.t5_path, code_dir[0], '{}.npy'.format(code_name.item())), x)
110
+ print(code_name.item())
111
+
112
+ dist.destroy_process_group()
113
+
114
+
115
+ if __name__ == "__main__":
116
+ parser = argparse.ArgumentParser()
117
+ parser.add_argument("--data-path", type=str, required=True)
118
+ parser.add_argument("--t5-path", type=str, required=True)
119
+ parser.add_argument("--data-start", type=int, required=True)
120
+ parser.add_argument("--data-end", type=int, required=True)
121
+ parser.add_argument("--caption-key", type=str, default='blip', choices=list(CAPTION_KEY.keys()))
122
+ parser.add_argument("--trunc-caption", action='store_true', default=False)
123
+ parser.add_argument("--t5-model-path", type=str, default='./pretrained_models/t5-ckpt')
124
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
125
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
126
+ parser.add_argument("--global-seed", type=int, default=0)
127
+ parser.add_argument("--num-workers", type=int, default=24)
128
+ args = parser.parse_args()
129
+ main(args)
language/t5.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # PixArt: https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/t5.py
3
+ import os
4
+ import re
5
+ import html
6
+ import urllib.parse as ul
7
+
8
+ import ftfy
9
+ import torch
10
+ from bs4 import BeautifulSoup
11
+ from transformers import T5EncoderModel, AutoTokenizer
12
+ from huggingface_hub import hf_hub_download
13
+
14
+
15
+ class T5Embedder:
16
+ available_models = ['t5-v1_1-xxl', 't5-v1_1-xl', 'flan-t5-xl']
17
+ bad_punct_regex = re.compile(r'['+'#®•©™&@·º½¾¿¡§~'+'\)'+'\('+'\]'+'\['+'\}'+'\{'+'\|'+'\\'+'\/'+'\*' + r']{1,}') # noqa
18
+
19
+ def __init__(self, device, dir_or_name='t5-v1_1-xxl', *, local_cache=False, cache_dir=None, hf_token=None, use_text_preprocessing=True,
20
+ t5_model_kwargs=None, torch_dtype=None, use_offload_folder=None, model_max_length=120):
21
+ self.device = torch.device(device)
22
+ self.torch_dtype = torch_dtype or torch.bfloat16
23
+ if t5_model_kwargs is None:
24
+ t5_model_kwargs = {'low_cpu_mem_usage': True, 'torch_dtype': self.torch_dtype}
25
+ t5_model_kwargs['device_map'] = {'shared': self.device, 'encoder': self.device}
26
+
27
+ self.use_text_preprocessing = use_text_preprocessing
28
+ self.hf_token = hf_token
29
+ self.cache_dir = cache_dir or os.path.expanduser('~/.cache/IF_')
30
+ self.dir_or_name = dir_or_name
31
+ tokenizer_path, path = dir_or_name, dir_or_name
32
+ if local_cache:
33
+ cache_dir = os.path.join(self.cache_dir, dir_or_name)
34
+ tokenizer_path, path = cache_dir, cache_dir
35
+ elif dir_or_name in self.available_models:
36
+ cache_dir = os.path.join(self.cache_dir, dir_or_name)
37
+ for filename in [
38
+ 'config.json', 'special_tokens_map.json', 'spiece.model', 'tokenizer_config.json',
39
+ 'pytorch_model.bin.index.json', 'pytorch_model-00001-of-00002.bin', 'pytorch_model-00002-of-00002.bin'
40
+ ]:
41
+ hf_hub_download(repo_id=f'DeepFloyd/{dir_or_name}', filename=filename, cache_dir=cache_dir,
42
+ force_filename=filename, token=self.hf_token)
43
+ tokenizer_path, path = cache_dir, cache_dir
44
+ else:
45
+ cache_dir = os.path.join(self.cache_dir, 't5-v1_1-xxl')
46
+ for filename in [
47
+ 'config.json', 'special_tokens_map.json', 'spiece.model', 'tokenizer_config.json',
48
+ ]:
49
+ hf_hub_download(repo_id='DeepFloyd/t5-v1_1-xxl', filename=filename, cache_dir=cache_dir,
50
+ force_filename=filename, token=self.hf_token)
51
+ tokenizer_path = cache_dir
52
+
53
+ print(tokenizer_path)
54
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
55
+ self.model = T5EncoderModel.from_pretrained(path, **t5_model_kwargs).eval()
56
+ self.model_max_length = model_max_length
57
+
58
+ def get_text_embeddings(self, texts):
59
+ texts = [self.text_preprocessing(text) for text in texts]
60
+
61
+ text_tokens_and_mask = self.tokenizer(
62
+ texts,
63
+ max_length=self.model_max_length,
64
+ padding='max_length',
65
+ truncation=True,
66
+ return_attention_mask=True,
67
+ add_special_tokens=True,
68
+ return_tensors='pt'
69
+ )
70
+
71
+ text_tokens_and_mask['input_ids'] = text_tokens_and_mask['input_ids']
72
+ text_tokens_and_mask['attention_mask'] = text_tokens_and_mask['attention_mask']
73
+
74
+ with torch.no_grad():
75
+ text_encoder_embs = self.model(
76
+ input_ids=text_tokens_and_mask['input_ids'].to(self.device),
77
+ attention_mask=text_tokens_and_mask['attention_mask'].to(self.device),
78
+ )['last_hidden_state'].detach()
79
+ return text_encoder_embs, text_tokens_and_mask['attention_mask'].to(self.device)
80
+
81
+ def text_preprocessing(self, text):
82
+ if self.use_text_preprocessing:
83
+ # The exact text cleaning as was in the training stage:
84
+ text = self.clean_caption(text)
85
+ text = self.clean_caption(text)
86
+ return text
87
+ else:
88
+ return text.lower().strip()
89
+
90
+ @staticmethod
91
+ def basic_clean(text):
92
+ text = ftfy.fix_text(text)
93
+ text = html.unescape(html.unescape(text))
94
+ return text.strip()
95
+
96
+ def clean_caption(self, caption):
97
+ caption = str(caption)
98
+ caption = ul.unquote_plus(caption)
99
+ caption = caption.strip().lower()
100
+ caption = re.sub('<person>', 'person', caption)
101
+ # urls:
102
+ caption = re.sub(
103
+ r'\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))', # noqa
104
+ '', caption) # regex for urls
105
+ caption = re.sub(
106
+ r'\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))', # noqa
107
+ '', caption) # regex for urls
108
+ # html:
109
+ caption = BeautifulSoup(caption, features='html.parser').text
110
+
111
+ # @<nickname>
112
+ caption = re.sub(r'@[\w\d]+\b', '', caption)
113
+
114
+ # 31C0—31EF CJK Strokes
115
+ # 31F0—31FF Katakana Phonetic Extensions
116
+ # 3200—32FF Enclosed CJK Letters and Months
117
+ # 3300—33FF CJK Compatibility
118
+ # 3400—4DBF CJK Unified Ideographs Extension A
119
+ # 4DC0—4DFF Yijing Hexagram Symbols
120
+ # 4E00—9FFF CJK Unified Ideographs
121
+ caption = re.sub(r'[\u31c0-\u31ef]+', '', caption)
122
+ caption = re.sub(r'[\u31f0-\u31ff]+', '', caption)
123
+ caption = re.sub(r'[\u3200-\u32ff]+', '', caption)
124
+ caption = re.sub(r'[\u3300-\u33ff]+', '', caption)
125
+ caption = re.sub(r'[\u3400-\u4dbf]+', '', caption)
126
+ caption = re.sub(r'[\u4dc0-\u4dff]+', '', caption)
127
+ caption = re.sub(r'[\u4e00-\u9fff]+', '', caption)
128
+ #######################################################
129
+
130
+ # все виды тире / all types of dash --> "-"
131
+ caption = re.sub(
132
+ r'[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+', # noqa
133
+ '-', caption)
134
+
135
+ # кавычки к одному стандарту
136
+ caption = re.sub(r'[`´«»“”¨]', '"', caption)
137
+ caption = re.sub(r'[‘’]', "'", caption)
138
+
139
+ # &quot;
140
+ caption = re.sub(r'&quot;?', '', caption)
141
+ # &amp
142
+ caption = re.sub(r'&amp', '', caption)
143
+
144
+ # ip adresses:
145
+ caption = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', ' ', caption)
146
+
147
+ # article ids:
148
+ caption = re.sub(r'\d:\d\d\s+$', '', caption)
149
+
150
+ # \n
151
+ caption = re.sub(r'\\n', ' ', caption)
152
+
153
+ # "#123"
154
+ caption = re.sub(r'#\d{1,3}\b', '', caption)
155
+ # "#12345.."
156
+ caption = re.sub(r'#\d{5,}\b', '', caption)
157
+ # "123456.."
158
+ caption = re.sub(r'\b\d{6,}\b', '', caption)
159
+ # filenames:
160
+ caption = re.sub(r'[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption)
161
+
162
+ #
163
+ caption = re.sub(r'[\"\']{2,}', r'"', caption) # """AUSVERKAUFT"""
164
+ caption = re.sub(r'[\.]{2,}', r' ', caption) # """AUSVERKAUFT"""
165
+
166
+ caption = re.sub(self.bad_punct_regex, r' ', caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
167
+ caption = re.sub(r'\s+\.\s+', r' ', caption) # " . "
168
+
169
+ # this-is-my-cute-cat / this_is_my_cute_cat
170
+ regex2 = re.compile(r'(?:\-|\_)')
171
+ if len(re.findall(regex2, caption)) > 3:
172
+ caption = re.sub(regex2, ' ', caption)
173
+
174
+ caption = self.basic_clean(caption)
175
+
176
+ caption = re.sub(r'\b[a-zA-Z]{1,3}\d{3,15}\b', '', caption) # jc6640
177
+ caption = re.sub(r'\b[a-zA-Z]+\d+[a-zA-Z]+\b', '', caption) # jc6640vc
178
+ caption = re.sub(r'\b\d+[a-zA-Z]+\d+\b', '', caption) # 6640vc231
179
+
180
+ caption = re.sub(r'(worldwide\s+)?(free\s+)?shipping', '', caption)
181
+ caption = re.sub(r'(free\s)?download(\sfree)?', '', caption)
182
+ caption = re.sub(r'\bclick\b\s(?:for|on)\s\w+', '', caption)
183
+ caption = re.sub(r'\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?', '', caption)
184
+ caption = re.sub(r'\bpage\s+\d+\b', '', caption)
185
+
186
+ caption = re.sub(r'\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b', r' ', caption) # j2d1a2a...
187
+
188
+ caption = re.sub(r'\b\d+\.?\d*[xх×]\d+\.?\d*\b', '', caption)
189
+
190
+ caption = re.sub(r'\b\s+\:\s+', r': ', caption)
191
+ caption = re.sub(r'(\D[,\./])\b', r'\1 ', caption)
192
+ caption = re.sub(r'\s+', ' ', caption)
193
+
194
+ caption.strip()
195
+
196
+ caption = re.sub(r'^[\"\']([\w\W]+)[\"\']$', r'\1', caption)
197
+ caption = re.sub(r'^[\'\_,\-\:;]', r'', caption)
198
+ caption = re.sub(r'[\'\_,\-\:\-\+]$', r'', caption)
199
+ caption = re.sub(r'^\.\S+$', '', caption)
200
+
201
+ return caption.strip()
scripts/autoregressive/extract_codes_c2i.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=2 --node_rank=0 \
6
+ --master_port=12335 \
7
+ autoregressive/train/extract_codes_c2i.py "$@"