Emotion-Director commited on
Commit
017bf8e
·
verified ·
1 Parent(s): 95fe79b

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
MMP_Diffusion_Lora_config.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ from transformers import PretrainedConfig
4
+
5
+
6
+ def import_model_class_from_model_name_or_path(
7
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
8
+ ):
9
+ text_encoder_config = PretrainedConfig.from_pretrained(
10
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
11
+ )
12
+ model_class = text_encoder_config.architectures[0]
13
+
14
+ if model_class == "CLIPTextModel":
15
+ from transformers import CLIPTextModel
16
+
17
+ return CLIPTextModel
18
+ elif model_class == "CLIPTextModelWithProjection":
19
+ from transformers import CLIPTextModelWithProjection
20
+
21
+ return CLIPTextModelWithProjection
22
+ else:
23
+ raise ValueError(f"{model_class} is not supported.")
24
+
25
+
26
+ def parse_args():
27
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
28
+ parser.add_argument(
29
+ "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1."
30
+ )
31
+ parser.add_argument(
32
+ "--pretrained_model_name_or_path",
33
+ type=str,
34
+ default='/cpfs04/user/liudawei/jgl/projects/download_models/stable-diffusion/SDXL',
35
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
36
+ )
37
+ parser.add_argument(
38
+ "--revision",
39
+ type=str,
40
+ default=None,
41
+ required=False,
42
+ help="Revision of pretrained model identifier from huggingface.co/models.",
43
+ )
44
+ parser.add_argument(
45
+ "--dataset_name",
46
+ type=str,
47
+ default='custom',
48
+ help=(
49
+ "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
50
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
51
+ " or to a folder containing files that 🤗 Datasets can understand."
52
+ ),
53
+ )
54
+ parser.add_argument(
55
+ "--dataset_path",
56
+ type=str,
57
+ default='/cpfs02/shared/llmit6/liudawei/jgl/EmotionDPO/data/ETI_emotion.parquet',
58
+ help=(
59
+ "The path of ETI dataset"
60
+ ),
61
+ )
62
+ parser.add_argument(
63
+ "--dataset_config_name",
64
+ type=str,
65
+ default=None,
66
+ help="The config of the Dataset, leave as None if there's only one config.",
67
+ )
68
+ parser.add_argument(
69
+ "--train_data_dir",
70
+ type=str,
71
+ default=None,
72
+ help=(
73
+ "A folder containing the training data. Folder contents must follow the structure described in"
74
+ " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
75
+ " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
76
+ ),
77
+ )
78
+ parser.add_argument(
79
+ "--visual_prompts_dir",
80
+ type=str,
81
+ default='features/origin',
82
+ help="Path to initial visual prompts",
83
+ )
84
+ parser.add_argument(
85
+ "--prompt_len",
86
+ type=int,
87
+ default=16,
88
+ help="visual prompts length",
89
+ )
90
+ parser.add_argument(
91
+ "--image_column", type=str, default="image", help="The column of the dataset containing an image."
92
+ )
93
+ parser.add_argument(
94
+ "--caption_column",
95
+ type=str,
96
+ default="caption",
97
+ help="The column of the dataset containing a caption or a list of captions.",
98
+ )
99
+ parser.add_argument(
100
+ "--max_train_samples",
101
+ type=int,
102
+ default=None,
103
+ # default=256,
104
+ help=(
105
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
106
+ "value if set."
107
+ ),
108
+ )
109
+ parser.add_argument(
110
+ "--cache_dir",
111
+ type=str,
112
+ default='/cpfs04/user/liudawei/jgl/projects/download_models',
113
+ help="The directory where the downloaded models and datasets will be stored.",
114
+ )
115
+ parser.add_argument("--seed",
116
+ type=int,
117
+ # default=None,
118
+ default=42,
119
+ # was random for submission, need to test that not distributing same noise etc across devices
120
+ help="A seed for reproducible training."
121
+ )
122
+ parser.add_argument(
123
+ "--resolution",
124
+ type=int,
125
+ default=512,
126
+ help=(
127
+ "The resolution for input images, all the images in the dataset will be resized to this"
128
+ " resolution"
129
+ ),
130
+ )
131
+ parser.add_argument(
132
+ "--random_crop",
133
+ default=False,
134
+ action="store_true",
135
+ help=(
136
+ "If set the images will be randomly"
137
+ " cropped (instead of center). The images will be resized to the resolution first before cropping."
138
+ ),
139
+ )
140
+ parser.add_argument(
141
+ "--no_hflip",
142
+ action="store_true",
143
+ help="whether to supress horizontal flipping",
144
+ )
145
+ parser.add_argument(
146
+ "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
147
+ )
148
+ parser.add_argument(
149
+ "--num_train_epochs", type=int, default=3
150
+ )
151
+ parser.add_argument(
152
+ "--max_train_steps",
153
+ type=int,
154
+ default=2000,
155
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
156
+ )
157
+ parser.add_argument(
158
+ "--gradient_accumulation_steps",
159
+ type=int,
160
+ default=1,
161
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
162
+ )
163
+ parser.add_argument(
164
+ "--gradient_checkpointing",
165
+ action="store_true",
166
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
167
+ )
168
+ parser.add_argument(
169
+ "--learning_rate_unet",
170
+ type=float,
171
+ default=1e-6,
172
+ help="Initial learning rate (after the potential warmup period) to use.",
173
+ )
174
+ parser.add_argument(
175
+ "--learning_rate_lora",
176
+ type=float,
177
+ default=1e-5,
178
+ help="Initial learning rate (after the potential warmup period) to use.",
179
+ )
180
+ parser.add_argument(
181
+ "--learning_rate_prompts",
182
+ type=float,
183
+ default=1e-5,
184
+ help="Initial learning rate (after the potential warmup period) to use.",
185
+ )
186
+ parser.add_argument(
187
+ "--scale_lr",
188
+ action="store_false",
189
+ default=False,
190
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
191
+ )
192
+ parser.add_argument(
193
+ "--lr_scheduler",
194
+ type=str,
195
+ default="constant_with_warmup",
196
+ help=(
197
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
198
+ ' "constant", "constant_with_warmup"]'
199
+ ),
200
+ )
201
+ parser.add_argument(
202
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
203
+ )
204
+ parser.add_argument(
205
+ "--use_adafactor", action="store_true", help="Whether or not to use adafactor (should save mem)"
206
+ )
207
+ # Bram Note: Haven't looked @ this yet
208
+ parser.add_argument(
209
+ "--allow_tf32",
210
+ action="store_true",
211
+ help=(
212
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
213
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
214
+ ),
215
+ )
216
+ parser.add_argument(
217
+ "--dataloader_num_workers",
218
+ type=int,
219
+ default=16,
220
+ help=(
221
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
222
+ ),
223
+ )
224
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
225
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
226
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
227
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
228
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
229
+ parser.add_argument(
230
+ "--hub_model_id",
231
+ type=str,
232
+ default=None,
233
+ help="The name of the repository to keep in sync with the local `output_dir`.",
234
+ )
235
+ parser.add_argument(
236
+ "--logging_dir",
237
+ type=str,
238
+ default="logs",
239
+ help=(
240
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
241
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
242
+ ),
243
+ )
244
+ parser.add_argument(
245
+ "--mixed_precision",
246
+ type=str,
247
+ default="no",
248
+ choices=["no", "fp16", "bf16"],
249
+ help=(
250
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
251
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
252
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
253
+ ),
254
+ )
255
+ parser.add_argument(
256
+ "--report_to",
257
+ type=str,
258
+ default="wandb",
259
+ help=(
260
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
261
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
262
+ ),
263
+ )
264
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
265
+ parser.add_argument(
266
+ "--checkpointing_steps",
267
+ type=int,
268
+ default=100,
269
+ help=(
270
+ "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
271
+ " training using `--resume_from_checkpoint`."
272
+ ),
273
+ )
274
+ parser.add_argument(
275
+ "--resume_from_checkpoint",
276
+ type=str,
277
+ default='latest',
278
+ help=(
279
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
280
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
281
+ ),
282
+ )
283
+ parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
284
+ parser.add_argument(
285
+ "--tracker_project_name",
286
+ type=str,
287
+ default="EmotionDPO",
288
+ help="exp group name",
289
+ )
290
+ parser.add_argument(
291
+ "--tracker_run_name",
292
+ type=str,
293
+ default="emotion_dpo_lora_v0_0_3_sdxl_2",
294
+ help="exp name",
295
+ )
296
+ parser.add_argument(
297
+ "--output_dir",
298
+ type=str,
299
+ default="log_training/emotion_dpo_lora_v0_0_3_sdxl_2",
300
+ help="The output directory where the model predictions and checkpoints will be written.",
301
+ )
302
+
303
+ ## SDXL
304
+ parser.add_argument(
305
+ "--pretrained_vae_model_name_or_path",
306
+ type=str,
307
+ default=None,
308
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
309
+ )
310
+ parser.add_argument("--sdxl", action='store_false', help="Train sdxl")
311
+
312
+ ## DPO
313
+ parser.add_argument("--sft", action='store_true', help="Run Supervised Fine-Tuning instead of Direct Preference Optimization")
314
+ parser.add_argument("--beta_dpo", type=float, default=5000, help="The beta DPO temperature controlling strength of KL penalty")
315
+ parser.add_argument(
316
+ "--hard_skip_resume", action="store_true", help="Load weights etc. but don't iter through loader for loader resume, useful b/c resume takes forever"
317
+ )
318
+ parser.add_argument(
319
+ "--unet_init", type=str, default='', help="Initialize start of run from unet (not compatible w/ checkpoint load)"
320
+ )
321
+ parser.add_argument(
322
+ "--proportion_empty_prompts",
323
+ type=float,
324
+ default=0.2,
325
+ help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
326
+ )
327
+ parser.add_argument(
328
+ "--split", type=str, default='train', help="Datasplit"
329
+ )
330
+ parser.add_argument(
331
+ "--choice_model", type=str, default='', help="Model to use for ranking (override dataset PS label_0/1). choices: aes, clip, hps, pickscore"
332
+ )
333
+ parser.add_argument(
334
+ "--dreamlike_pairs_only", action="store_true", help="Only train on pairs where both generations are from dreamlike"
335
+ )
336
+ parser.add_argument(
337
+ "--use_lora",
338
+ action="store_true",
339
+ default=True,
340
+ help="Whether or not to use LoRA (Low-Rank Adaptation).",
341
+ )
342
+ parser.add_argument(
343
+ "--lora_rank",
344
+ type=int,
345
+ default=64,
346
+ help="Rank parameter for LoRA (Low-Rank Adaptation).",
347
+ )
348
+
349
+ args = parser.parse_args()
350
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
351
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
352
+ args.local_rank = env_local_rank
353
+
354
+ # Sanity checks
355
+ if args.dataset_name is None and args.train_data_dir is None:
356
+ raise ValueError("Need either a dataset name or a training folder.")
357
+
358
+ ## SDXL
359
+ if args.sdxl:
360
+ print("Running SDXL")
361
+ if args.resolution is None:
362
+ if args.sdxl:
363
+ args.resolution = 512
364
+ else:
365
+ args.resolution = 512
366
+
367
+ args.train_method = 'sft' if args.sft else 'dpo'
368
+ return args
MMP_Diffusion_Lora_train.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import logging
3
+ import math
4
+ import os
5
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
6
+ import random
7
+ import shutil
8
+ import sys
9
+ sys.path.append('./')
10
+ from pathlib import Path
11
+
12
+ import accelerate
13
+ import datasets
14
+ import numpy as np
15
+ from PIL import Image
16
+ import torch
17
+ import torch.nn.functional as F
18
+ import torch.utils.checkpoint
19
+ import transformers
20
+ from accelerate import Accelerator
21
+ from accelerate.logging import get_logger
22
+ from accelerate.state import AcceleratorState
23
+ from accelerate.utils import ProjectConfiguration, set_seed
24
+ from datasets import load_dataset
25
+ from huggingface_hub import create_repo, upload_folder
26
+ from packaging import version
27
+ from torchvision import transforms
28
+ from tqdm.auto import tqdm
29
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
30
+ from transformers.utils import ContextManagers
31
+
32
+ import diffusers
33
+ from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, StableDiffusionXLPipeline, UNet2DConditionModel
34
+ from models.unet_2d_condition import UNet2DLoRAConditionModel
35
+ from models.lora import add_lora_to_model
36
+ from diffusers.optimization import get_scheduler
37
+ from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid
38
+ from diffusers.utils.import_utils import is_xformers_available
39
+ from MMP_Diffusion_Lora_config import parse_args, import_model_class_from_model_name_or_path
40
+
41
+ from peft.utils import get_peft_model_state_dict
42
+ from diffusers.utils import convert_state_dict_to_diffusers
43
+ from models.visual_prompts import EmotionEmbedding, EmotionEmbedding2
44
+ import copy
45
+
46
+ if is_wandb_available():
47
+ import wandb
48
+
49
+
50
+ ## SDXL
51
+ import functools
52
+ import gc
53
+ from torchvision.transforms.functional import crop
54
+ from transformers import AutoTokenizer
55
+
56
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
57
+ check_min_version("0.20.0")
58
+
59
+ logger = get_logger(__name__, log_level="INFO")
60
+
61
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
62
+ def encode_prompt_sdxl(batch, text_encoders, tokenizers, proportion_empty_prompts, caption_column, is_train=True):
63
+ prompt_embeds_list = []
64
+ prompt_batch = batch[caption_column]
65
+
66
+ captions = []
67
+ for caption in prompt_batch:
68
+ if random.random() < proportion_empty_prompts:
69
+ captions.append("")
70
+ elif isinstance(caption, str):
71
+ captions.append(caption)
72
+ elif isinstance(caption, (list, np.ndarray)):
73
+ # take a random caption if there are multiple
74
+ captions.append(random.choice(caption) if is_train else caption[0])
75
+
76
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
77
+
78
+ text_input_ids = tokenizer(
79
+ captions,
80
+ padding="max_length",
81
+ max_length=tokenizer.model_max_length,
82
+ truncation=True,
83
+ return_tensors="pt",
84
+ ).input_ids
85
+
86
+ with torch.no_grad():
87
+ prompt_embeds = text_encoder(
88
+ text_input_ids.to('cuda'),
89
+ output_hidden_states=True,
90
+ )
91
+
92
+ # We are only ALWAYS interested in the pooled output of the final text encoder
93
+ # torch.Size([32, 1280]) this
94
+ # odict_keys(['text_embeds', 'last_hidden_state', 'hidden_states'])
95
+ if isinstance(text_encoder, CLIPTextModel):
96
+ pass
97
+ elif isinstance(text_encoder, CLIPTextModelWithProjection):
98
+ pooled_prompt_embeds = prompt_embeds[0]
99
+
100
+ # "2" because SDXL always indexes from the penultimate layer.
101
+ # torch.Size([32, 77, 768/1280])
102
+ prompt_embeds = prompt_embeds.hidden_states[-2]
103
+ bs_embed, seq_len, _ = prompt_embeds.shape
104
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
105
+ prompt_embeds_list.append(prompt_embeds)
106
+
107
+ # torch.Size([32, 77, 768+1280=2048])
108
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
109
+ # torch.Size([32, 1280])
110
+ pooled_prompt_embeds = pooled_prompt_embeds.view(bs_embed, -1)
111
+
112
+ return {
113
+ "prompt_embeds": prompt_embeds,
114
+ "pooled_prompt_embeds": pooled_prompt_embeds,
115
+ }
116
+
117
+
118
+ def init_emotion_prompts(visual_prompts_dir, is_sdxl=True, prompt_len=16):
119
+ emotions = ["amusement", "anger", "awe", "contentment",
120
+ "disgust", "excitement", "fear", "sadness"]
121
+ if is_sdxl:
122
+ output_dim = 2048
123
+ else:
124
+ output_dim = 768
125
+ feature_names = ["clip", "vgg", "dinov2"]
126
+ visual_prompts = EmotionEmbedding(emotions, visual_prompts_dir,
127
+ feature_names, output_dim=output_dim, prompt_len=prompt_len)
128
+ return visual_prompts
129
+
130
+ def init_emotion_prompts2(is_sdxl=True):
131
+ emotions = ["amusement", "anger", "awe", "contentment",
132
+ "disgust", "excitement", "fear", "sadness"]
133
+ if is_sdxl:
134
+ output_dim = 2048
135
+ else:
136
+ output_dim = 768
137
+ input_dim = 2048
138
+ visual_prompts = EmotionEmbedding2(emotions, input_dim, output_dim=output_dim)
139
+ return visual_prompts
140
+
141
+ def random_sample_emotions(anchor_emotions):
142
+ emotions = ["amusement", "anger", "awe", "contentment", "disgust",
143
+ "excitement", "fear", "sadness"]
144
+ random_emotions = []
145
+ for anchor in anchor_emotions:
146
+ available_emotions = [emotion for emotion in emotions if emotion != anchor]
147
+ random_choice = random.choice(available_emotions)
148
+ random_emotions.append(random_choice)
149
+ return random_emotions
150
+
151
+
152
+ def main():
153
+ args = parse_args()
154
+ #### START ACCELERATOR BOILERPLATE ###
155
+ logging_dir = os.path.join(args.output_dir, args.logging_dir)
156
+
157
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
158
+
159
+ accelerator = Accelerator(
160
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
161
+ mixed_precision=args.mixed_precision,
162
+ log_with=args.report_to,
163
+ project_config=accelerator_project_config,
164
+ )
165
+
166
+ # Make one log on every process with the configuration for debugging.
167
+ logging.basicConfig(
168
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
169
+ datefmt="%m/%d/%Y %H:%M:%S",
170
+ level=logging.INFO,
171
+ )
172
+ logger.info(accelerator.state, main_process_only=False)
173
+ if accelerator.is_local_main_process:
174
+ datasets.utils.logging.set_verbosity_warning()
175
+ transformers.utils.logging.set_verbosity_warning()
176
+ diffusers.utils.logging.set_verbosity_info()
177
+ else:
178
+ datasets.utils.logging.set_verbosity_error()
179
+ transformers.utils.logging.set_verbosity_error()
180
+ diffusers.utils.logging.set_verbosity_error()
181
+
182
+ # If passed along, set the training seed now.
183
+ if args.seed is not None:
184
+ set_seed(args.seed + accelerator.process_index) # added in + term, untested
185
+
186
+ # Handle the repository creation
187
+ if accelerator.is_main_process:
188
+ if args.output_dir is not None:
189
+ os.makedirs(args.output_dir, exist_ok=True)
190
+ ### END ACCELERATOR BOILERPLATE
191
+
192
+
193
+ ### START DIFFUSION BOILERPLATE ###
194
+ # Load scheduler, tokenizer and models.
195
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path,
196
+ subfolder="scheduler")
197
+
198
+ # SDXL has two text encoders
199
+ if args.sdxl:
200
+ tokenizer_and_encoder_name = args.pretrained_model_name_or_path
201
+ tokenizer_one = AutoTokenizer.from_pretrained(tokenizer_and_encoder_name, subfolder="tokenizer", revision=args.revision, use_fast=False)
202
+ tokenizer_two = AutoTokenizer.from_pretrained(tokenizer_and_encoder_name, subfolder="tokenizer_2", revision=args.revision, use_fast=False)
203
+ else:
204
+ tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision)
205
+
206
+ # Not sure if we're hitting this at all
207
+ def deepspeed_zero_init_disabled_context_manager():
208
+ """
209
+ returns either a context list that includes one that will disable zero.Init or an empty context list
210
+ """
211
+ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None
212
+ if deepspeed_plugin is None:
213
+ return []
214
+
215
+ return [deepspeed_plugin.zero3_init_context_manager(enable=False)]
216
+
217
+ with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
218
+ # SDXL has two text encoders
219
+ if args.sdxl:
220
+ # import correct text encoder classes
221
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(tokenizer_and_encoder_name, args.revision, subfolder="text_encoder")
222
+ text_encoder_cls_two = import_model_class_from_model_name_or_path(tokenizer_and_encoder_name, args.revision, subfolder="text_encoder_2")
223
+ text_encoder_one = text_encoder_cls_one.from_pretrained(tokenizer_and_encoder_name, revision=args.revision, subfolder="text_encoder")
224
+ text_encoder_two = text_encoder_cls_two.from_pretrained(tokenizer_and_encoder_name, revision=args.revision, subfolder="text_encoder_2")
225
+ text_encoders = [text_encoder_one, text_encoder_two]
226
+ tokenizers = [tokenizer_one, tokenizer_two]
227
+ else:
228
+ text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision)
229
+ # Can custom-select VAE (used in original SDXL tuning)
230
+ vae_path = (
231
+ args.pretrained_model_name_or_path
232
+ if args.pretrained_vae_model_name_or_path is None
233
+ else args.pretrained_vae_model_name_or_path
234
+ )
235
+ vae = AutoencoderKL.from_pretrained(
236
+ vae_path, subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None, revision=args.revision
237
+ )
238
+ # clone of model
239
+ ref_unet = UNet2DConditionModel.from_pretrained(
240
+ args.pretrained_model_name_or_path,
241
+ subfolder="unet", revision=args.revision
242
+ )
243
+
244
+ unet = UNet2DLoRAConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision)
245
+
246
+ print("======== init_emotion_prompts ================")
247
+ visual_prompts = init_emotion_prompts(args.visual_prompts_dir, is_sdxl=args.sdxl, prompt_len=args.prompt_len).to(accelerator.device)
248
+ # visual_prompts = init_emotion_prompts2(is_sdxl=args.sdxl).to(accelerator.device)
249
+ print("======== init_emotion_prompts done ================")
250
+
251
+ # Freeze vae, text_encoder(s), reference unet
252
+ vae.requires_grad_(False)
253
+ if args.sdxl:
254
+ text_encoder_one.requires_grad_(False)
255
+ text_encoder_two.requires_grad_(False)
256
+ else:
257
+ text_encoder.requires_grad_(False)
258
+ if args.train_method == 'dpo':
259
+ ref_unet.requires_grad_(False)
260
+
261
+ # if args.use_lora:
262
+ # unet.requires_grad_(False)
263
+ # args.lora_rank default 32
264
+ lora_p, negation = add_lora_to_model(unet, dropout=0.1, lora_rank=args.lora_rank, scale=1.0)
265
+
266
+ # xformers efficient attention
267
+ if is_xformers_available():
268
+ import xformers
269
+
270
+ xformers_version = version.parse(xformers.__version__)
271
+ if xformers_version == version.parse("0.0.16"):
272
+ logger.warning(
273
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
274
+ )
275
+ unet.enable_xformers_memory_efficient_attention()
276
+ else:
277
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
278
+
279
+ # BRAM NOTE: We're using >=0.16.0. Below was a bit of a bug hive. I hacked around it, but ideally ref_unet wouldn't
280
+ # be getting passed here
281
+ #
282
+ # `accelerate` 0.16.0 will have better support for customized saving
283
+ if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
284
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
285
+ def save_model_hook(models, weights, output_dir):
286
+
287
+ print("save_model_hook")
288
+ for i in range(len(models)):
289
+ print(models[i].__class__.__name__)
290
+
291
+ if len(models) > 1:
292
+ assert args.train_method == 'dpo' # 2nd model is just ref_unet in DPO case
293
+
294
+ if args.sdxl:
295
+
296
+ # UNet2DLoRAConditionModel
297
+ models[0].save_pretrained(os.path.join(output_dir, 'unet_with_lora'))
298
+ weights.pop()
299
+
300
+ # EmotionEmbedding
301
+ torch.save(models[1].state_dict(), os.path.join(output_dir, "EmotionEmbedding.pth"))
302
+ weights.pop()
303
+
304
+ def load_model_hook(models, input_dir):
305
+
306
+ print("load_model_hook")
307
+ for i in range(len(models)):
308
+ print(models[i].__class__.__name__)
309
+
310
+ if len(models) > 1:
311
+ assert args.train_method == 'dpo' # 2nd model is just ref_unet in DPO case
312
+
313
+ if args.sdxl:
314
+
315
+ # UNet2DLoRAConditionModel
316
+ model = models.pop(0)
317
+ from safetensors.torch import load_file
318
+ # 加载两个safetensors文件
319
+ state_dict_1 = load_file(os.path.join(input_dir, 'unet_with_lora', 'diffusion_pytorch_model-00001-of-00002.safetensors'))
320
+ state_dict_2 = load_file(os.path.join(input_dir, 'unet_with_lora', 'diffusion_pytorch_model-00002-of-00002.safetensors'))
321
+ # 合并状态字典
322
+ state_dict = {**state_dict_1, **state_dict_2}
323
+ model.load_state_dict(state_dict)
324
+
325
+ # EmotionEmbedding
326
+ model = models.pop(0)
327
+ state_dict = torch.load(os.path.join(input_dir, "EmotionEmbedding.pth"), weights_only=True)
328
+ model.load_state_dict(state_dict)
329
+
330
+ accelerator.register_save_state_pre_hook(save_model_hook)
331
+ accelerator.register_load_state_pre_hook(load_model_hook)
332
+
333
+ if args.gradient_checkpointing or args.sdxl: # (args.sdxl and ('turbo' not in args.pretrained_model_name_or_path) ):
334
+ print("Enabling gradient checkpointing, either because you asked for this or because you're using SDXL")
335
+ unet.enable_gradient_checkpointing()
336
+
337
+ # Bram Note: haven't touched
338
+ # Enable TF32 for faster training on Ampere GPUs,
339
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
340
+ if args.allow_tf32:
341
+ torch.backends.cuda.matmul.allow_tf32 = True
342
+
343
+ if args.scale_lr:
344
+ args.learning_rate = (
345
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
346
+ )
347
+
348
+ unet_params = []
349
+ lora_params = []
350
+ for name, param in unet.named_parameters():
351
+ if 'lora' in name.lower():
352
+ lora_params.append(param)
353
+ else:
354
+ if param.requires_grad:
355
+ unet_params.append(param)
356
+
357
+ # if args.use_adafactor or args.sdxl:
358
+ print("Using Adafactor either because you asked for it or you're using SDXL")
359
+ param_groups = [
360
+ {
361
+ "params": unet_params,
362
+ "lr": args.learning_rate_unet,
363
+ },
364
+ {
365
+ "params": lora_params,
366
+ "lr": args.learning_rate_lora,
367
+ },
368
+ {
369
+ "params": visual_prompts.parameters(),
370
+ "lr": args.learning_rate_prompts,
371
+ }
372
+ ]
373
+ optimizer = transformers.Adafactor(
374
+ param_groups,
375
+ weight_decay=args.adam_weight_decay,
376
+ clip_threshold=1.0,
377
+ scale_parameter=False,
378
+ relative_step=False
379
+ )
380
+
381
+ # else:
382
+ # optimizer = torch.optim.AdamW([
383
+ # {"params": unet_params, "lr": args.learning_rate,
384
+ # "beta": (args.adam_beta1, args.adam_beta2), "weight_decay": args.adam_weight_decay,
385
+ # "eps": args.adam_epsilon},
386
+ # {"params": lora_params, "lr": args.learning_rate*5,
387
+ # "beta": (args.adam_beta1, args.adam_beta2), "weight_decay": args.adam_weight_decay,
388
+ # "eps": args.adam_epsilon},
389
+ # {"params": visual_prompts.parameters(), "lr": args.learning_rate*5,
390
+ # "beta": (args.adam_beta1, args.adam_beta2), "weight_decay": args.adam_weight_decay,
391
+ # "eps": args.adam_epsilon}
392
+ # ])
393
+
394
+
395
+ # In distributed training, the load_dataset function guarantees that only one local process can concurrently
396
+ # download the dataset.
397
+ dataset = load_dataset(path='parquet', data_files=args.dataset_path)
398
+ caption_column = args.caption_column
399
+
400
+ def tokenize_captions(examples, is_train=True):
401
+ captions = []
402
+ for caption in examples[caption_column]:
403
+ if random.random() < args.proportion_empty_prompts:
404
+ captions.append("")
405
+ elif isinstance(caption, str):
406
+ captions.append(caption)
407
+ elif isinstance(caption, (list, np.ndarray)):
408
+ # take a random caption if there are multiple
409
+ captions.append(random.choice(caption) if is_train else caption[0])
410
+ else:
411
+ raise ValueError(
412
+ f"Caption column `{caption_column}` should contain either strings or lists of strings."
413
+ )
414
+ inputs = tokenizer(
415
+ captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
416
+ )
417
+ return inputs.input_ids
418
+
419
+ # Preprocessing the datasets.
420
+ train_transforms = transforms.Compose(
421
+ [
422
+ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
423
+ transforms.RandomCrop(args.resolution) if args.random_crop else transforms.CenterCrop(args.resolution),
424
+ transforms.Lambda(lambda x: x) if args.no_hflip else transforms.RandomHorizontalFlip(),
425
+ transforms.ToTensor(),
426
+ transforms.Normalize([0.5], [0.5]),
427
+ ]
428
+ )
429
+
430
+ #### START PREPROCESSING/COLLATION ####
431
+ if args.train_method == 'dpo':
432
+ print("Ignoring image_column variable, reading from jpg_0 and jpg_1")
433
+ def preprocess_train(examples):
434
+ all_pixel_values = []
435
+ for col_name in ['jpg_0', 'jpg_1']:
436
+ images = [Image.open(io.BytesIO(im_bytes)).convert("RGB")
437
+ for im_bytes in examples[col_name]]
438
+ pixel_values = [train_transforms(image) for image in images]
439
+ all_pixel_values.append(pixel_values)
440
+ # DOUBLE win images for visual prompts optimization
441
+ # all_pixel_values
442
+ # [[jpg_0,...],[jpg_1,...]]
443
+ # => [[jpg_0,...],[jpg_1,...],[jpg_0,...]]
444
+ all_pixel_values.append(copy.deepcopy(all_pixel_values[0]))
445
+
446
+ # Triple on channel dim, jpg_y then jpg_w and jpg_y
447
+ # im_tup_iterator = [(jpg_0,jpg_1,jpg_0),...]
448
+ im_tup_iterator = zip(*all_pixel_values)
449
+ combined_pixel_values = []
450
+ # item = (jpg_0,jpg_1,jpg_0), label
451
+ for im_tup, label_0 in zip(im_tup_iterator, examples['label_0']):
452
+ # print(len(im_tup), im_tup[0].shape)
453
+ # 3 torch.Size([3, 512, 512])
454
+ if label_0==0 and (not args.choice_model): # don't want to flip things if using choice_model for AI feedback
455
+ im_tup = im_tup[::-1]
456
+ # [3+3+3, 512, 512]
457
+ combined_im = torch.cat(im_tup, dim=0) # no batch dim
458
+ combined_pixel_values.append(combined_im)
459
+ # [[9, 512, 512],...]
460
+ examples["pixel_values"] = combined_pixel_values
461
+ # SDXL takes raw prompts
462
+ if not args.sdxl:
463
+ examples["input_ids"] = tokenize_captions(examples)
464
+ return examples
465
+
466
+ def collate_fn(examples):
467
+ # [bs, 9, 512, 512]
468
+ pixel_values = torch.stack([example["pixel_values"] for example in examples])
469
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
470
+ return_d = {"pixel_values": pixel_values}
471
+ return_d["emotions"] = [example["emotion"] for example in examples]
472
+
473
+ # SDXL takes raw prompts
474
+ if args.sdxl:
475
+ return_d["caption"] = [example["caption"] for example in examples]
476
+ else:
477
+ return_d["input_ids"] = torch.stack([example["input_ids"] for example in examples])
478
+
479
+ if args.choice_model:
480
+ # If using AIF then deliver image data for choice model to determine if should flip pixel values
481
+ for k in ['jpg_0', 'jpg_1']:
482
+ return_d[k] = [Image.open(io.BytesIO( example[k])).convert("RGB")
483
+ for example in examples]
484
+ return_d["caption"] = [example["caption"] for example in examples]
485
+ return return_d
486
+
487
+ ### DATASET #####
488
+ with accelerator.main_process_first():
489
+ if args.max_train_samples is not None:
490
+ dataset[args.split] = dataset[args.split].shuffle(seed=args.seed).select(range(args.max_train_samples))
491
+ train_dataset = dataset[args.split].with_transform(preprocess_train)
492
+
493
+ # DataLoaders creation:
494
+ train_dataloader = torch.utils.data.DataLoader(
495
+ train_dataset,
496
+ shuffle=(args.split=='train'),
497
+ collate_fn=collate_fn,
498
+ batch_size=args.train_batch_size,
499
+ num_workers=args.dataloader_num_workers,
500
+ drop_last=True
501
+ )
502
+ ##### END BIG OLD DATASET BLOCK #####
503
+
504
+ # Scheduler and math around the number of training steps.
505
+ overrode_max_train_steps = False
506
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
507
+ if args.max_train_steps is None:
508
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
509
+ overrode_max_train_steps = True
510
+
511
+ lr_scheduler = get_scheduler(
512
+ args.lr_scheduler,
513
+ optimizer=optimizer,
514
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
515
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
516
+ )
517
+
518
+ #### START ACCELERATOR PREP ####
519
+ unet, visual_prompts, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
520
+ unet, visual_prompts, optimizer, train_dataloader, lr_scheduler
521
+ )
522
+
523
+ weight_dtype = torch.float32
524
+
525
+ # Move text_encode and vae to gpu and cast to weight_dtype
526
+ vae.to(accelerator.device, dtype=weight_dtype)
527
+ if args.sdxl:
528
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
529
+ text_encoder_two.to(accelerator.device, dtype=weight_dtype)
530
+ # print("offload vae (this actually stays as CPU)")
531
+ # vae = accelerate.cpu_offload(vae)
532
+ # print("Offloading text encoders to cpu")
533
+ text_encoder_one = accelerate.cpu_offload(text_encoder_one)
534
+ text_encoder_two = accelerate.cpu_offload(text_encoder_two)
535
+ if args.train_method == 'dpo':
536
+ ref_unet.to(accelerator.device, dtype=weight_dtype)
537
+ # print("offload ref_unet")
538
+ # ref_unet = accelerate.cpu_offload(ref_unet)
539
+ else:
540
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
541
+ if args.train_method == 'dpo':
542
+ ref_unet.to(accelerator.device, dtype=weight_dtype)
543
+ ### END ACCELERATOR PREP ###
544
+
545
+
546
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
547
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
548
+ if overrode_max_train_steps:
549
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
550
+ # Afterwards we recalculate our number of training epochs
551
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
552
+
553
+ # We need to initialize the trackers we use, and also store our configuration.
554
+ # The trackers initializes automatically on the main process.
555
+ if accelerator.is_main_process:
556
+ tracker_config = dict(vars(args))
557
+ init_kwargs = {
558
+ "wandb": {
559
+ "name": args.tracker_run_name,
560
+ }
561
+ }
562
+ accelerator.init_trackers(args.tracker_project_name, tracker_config, init_kwargs)
563
+
564
+ # Training initialization
565
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
566
+
567
+ logger.info("***** Running training *****")
568
+ logger.info(f" Num examples = {len(train_dataset)}")
569
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
570
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
571
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
572
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
573
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
574
+ global_step = 0
575
+ first_epoch = 0
576
+
577
+ # Potentially load in the weights and states from a previous save
578
+ if args.resume_from_checkpoint:
579
+ if args.resume_from_checkpoint != "latest":
580
+ path = os.path.basename(args.resume_from_checkpoint)
581
+ else:
582
+ # Get the most recent checkpoint
583
+ dirs = os.listdir(args.output_dir)
584
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
585
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
586
+ path = dirs[-1] if len(dirs) > 0 else None
587
+
588
+ if path is None:
589
+ accelerator.print(
590
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
591
+ )
592
+ args.resume_from_checkpoint = None
593
+ else:
594
+ accelerator.print(f"Resuming from checkpoint {path}")
595
+ accelerator.load_state(os.path.join(args.output_dir, path))
596
+ global_step = int(path.split("-")[1])
597
+
598
+ resume_global_step = global_step * args.gradient_accumulation_steps
599
+ first_epoch = global_step // num_update_steps_per_epoch
600
+ resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
601
+
602
+
603
+ # Bram Note: This was pretty janky to wrangle to look proper but works to my liking now
604
+ progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
605
+ progress_bar.set_description("Steps")
606
+
607
+
608
+ #### START MAIN TRAINING LOOP #####
609
+ for epoch in range(first_epoch, args.num_train_epochs):
610
+ unet.train()
611
+ train_loss = 0.0
612
+ implicit_acc_accumulated_d, implicit_acc_accumulated_c = 0.0, 0.0
613
+ for step, batch in enumerate(train_dataloader):
614
+ # Skip steps until we reach the resumed step
615
+ if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step and (not args.hard_skip_resume):
616
+ if step % args.gradient_accumulation_steps == 0:
617
+ print(f"Dummy processing step {step}, will start training at {resume_step}")
618
+ continue
619
+ with accelerator.accumulate(unet):
620
+ # Convert images to latent space
621
+ if args.train_method == 'dpo':
622
+ # [bs, 6, 512, 512] =>
623
+ # [[bs, 3, 512, 512]*3] =>
624
+ # [bs*3, 3, 512, 512]
625
+ feed_pixel_values = torch.cat(batch["pixel_values"].chunk(3, dim=1))
626
+ elif args.train_method == 'sft':
627
+ feed_pixel_values = batch["pixel_values"]
628
+
629
+ #### Diffusion Stuff ####
630
+ # encode pixels --> latents
631
+ with torch.no_grad():
632
+ latents = vae.encode(feed_pixel_values.to(weight_dtype)).latent_dist.sample()
633
+ latents = latents * vae.config.scaling_factor
634
+
635
+ # Sample noise that we'll add to the latents
636
+ noise = torch.randn_like(latents)
637
+
638
+ bsz = latents.shape[0]
639
+ # Sample a random timestep for each image
640
+ timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
641
+ timesteps = timesteps.long()
642
+
643
+ if args.train_method == 'dpo':
644
+ # make timesteps and noise same for pairs in DPO
645
+ # [bs] => [1/3bs, 1/3bs, 1/3bs] => [1/3bs] => [bs]
646
+ timesteps = timesteps.chunk(3)[0].repeat(3)
647
+ noise = noise.chunk(3)[0].repeat(3, 1, 1, 1)
648
+
649
+ # Add noise to the latents according to the noise magnitude at each timestep
650
+ # (this is the forward diffusion process)
651
+
652
+ noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
653
+ ### START PREP BATCH ###
654
+ if args.sdxl:
655
+ # Get the text embedding for conditioning
656
+ with torch.no_grad():
657
+ # Need to compute "time_ids" https://github.com/huggingface/diffusers/blob/v0.20.0-release/examples/text_to_image/train_text_to_image_sdxl.py#L969
658
+ # for SDXL-base these are torch.tensor([args.resolution, args.resolution, *crop_coords_top_left, *target_size))
659
+ add_time_ids = torch.tensor([args.resolution,
660
+ args.resolution,
661
+ 0,
662
+ 0,
663
+ args.resolution,
664
+ args.resolution],
665
+ dtype=weight_dtype,
666
+ device=accelerator.device)[None, :].repeat(timesteps.size(0), 1)
667
+ prompt_batch = encode_prompt_sdxl(batch,
668
+ text_encoders,
669
+ tokenizers,
670
+ args.proportion_empty_prompts,
671
+ caption_column,
672
+ is_train=True,
673
+ )
674
+ if args.train_method == 'dpo':
675
+ prompt_batch["prompt_embeds"] = prompt_batch["prompt_embeds"].repeat(3, 1, 1)
676
+ prompt_batch["pooled_prompt_embeds"] = prompt_batch["pooled_prompt_embeds"].repeat(3, 1)
677
+ unet_added_conditions = {"time_ids": add_time_ids,
678
+ "text_embeds": prompt_batch["pooled_prompt_embeds"]}
679
+ else: # sd1.5
680
+ # Get the text embedding for conditioning
681
+ encoder_hidden_states = text_encoder(batch["input_ids"])[0]
682
+ if args.train_method == 'dpo':
683
+ encoder_hidden_states = encoder_hidden_states.repeat(2, 1, 1)
684
+
685
+ emotion_visual_prompts = visual_prompts(batch['emotions'])
686
+
687
+ if args.train_method == 'dpo':
688
+ random_emotions = random_sample_emotions(batch['emotions'])
689
+ random_emotion_visual_prompts = visual_prompts(random_emotions)
690
+ emotion_visual_prompts = torch.cat([emotion_visual_prompts, emotion_visual_prompts, random_emotion_visual_prompts], dim=0)
691
+
692
+ #### END PREP BATCH ####
693
+
694
+ assert noise_scheduler.config.prediction_type == "epsilon"
695
+ target = noise
696
+
697
+ # Make the prediction from the model we're learning
698
+ model_batch_args = (
699
+ noisy_latents,
700
+ timesteps,
701
+ prompt_batch["prompt_embeds"] if args.sdxl else encoder_hidden_states
702
+ )
703
+ lora_model_batch_args = (
704
+ noisy_latents,
705
+ timesteps,
706
+ prompt_batch["prompt_embeds"] if args.sdxl else encoder_hidden_states,
707
+ emotion_visual_prompts
708
+ )
709
+ added_cond_kwargs = unet_added_conditions if args.sdxl else None
710
+
711
+ model_pred = unet(
712
+ *lora_model_batch_args,
713
+ added_cond_kwargs = added_cond_kwargs
714
+ ).sample
715
+ #### START LOSS COMPUTATION ####
716
+ if args.train_method == 'sft': # SFT, casting for F.mse_loss
717
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
718
+ elif args.train_method == 'dpo':
719
+ # model_pred and ref_pred will be (2 * LBS) x 4 x latent_spatial_dim x latent_spatial_dim
720
+ # losses are both 2 * LBS
721
+ # 1st half of tensors is preferred (y_w), second half is unpreferred
722
+ model_losses = (model_pred - target).pow(2).mean(dim=[1,2,3])
723
+ model_losses_w, model_losses_l_d, model_losses_l_c = model_losses.chunk(3)
724
+ # below for logging purposes
725
+ raw_model_loss = (model_losses_w.mean() + model_losses_l_d.mean() + model_losses_l_c.mean()) / 3
726
+
727
+ model_diff_d = model_losses_w - model_losses_l_d # These are both LBS (as is t)
728
+ model_diff_c = model_losses_w - model_losses_l_c
729
+
730
+ with torch.no_grad(): # Get the reference policy (unet) prediction
731
+ ref_pred = ref_unet(
732
+ *model_batch_args,
733
+ added_cond_kwargs = added_cond_kwargs
734
+ ).sample.detach()
735
+ ref_losses = (ref_pred - target).pow(2).mean(dim=[1,2,3])
736
+ ref_losses_w, ref_losses_l_d, ref_losses_l_c = ref_losses.chunk(3)
737
+ ref_diff = ref_losses_w - ref_losses_l_d
738
+ raw_ref_loss = ref_losses.mean()
739
+
740
+ scale_term = -0.5 * args.beta_dpo # beta_dpo = 5000
741
+ inside_term_d = scale_term * (model_diff_d - ref_diff)
742
+ implicit_acc_d = (inside_term_d > 0).sum().float() / inside_term_d.size(0)
743
+ # the scale_term may need to be adjust
744
+ # inside_term_c = -1 * model_diff_c
745
+ inside_term_c = scale_term * model_diff_c
746
+ implicit_acc_c = (inside_term_c > 0).sum().float() / inside_term_c.size(0)
747
+ loss = -1 * 0.5 * (F.logsigmoid(inside_term_d).mean() + F.logsigmoid(inside_term_c).mean())
748
+ #### END LOSS COMPUTATION ###
749
+
750
+ # Gather the losses across all processes for logging
751
+ avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
752
+ train_loss += avg_loss.item() / args.gradient_accumulation_steps
753
+ # Also gather:
754
+ # - model MSE vs reference MSE (useful to observe divergent behavior)
755
+ # - Implicit accuracy
756
+ if args.train_method == 'dpo':
757
+ avg_model_mse = accelerator.gather(raw_model_loss.repeat(args.train_batch_size)).mean().item()
758
+ avg_ref_mse = accelerator.gather(raw_ref_loss.repeat(args.train_batch_size)).mean().item()
759
+ avg_acc_d = accelerator.gather(implicit_acc_d).mean().item()
760
+ avg_acc_c = accelerator.gather(implicit_acc_c).mean().item()
761
+ implicit_acc_accumulated_d += avg_acc_d / args.gradient_accumulation_steps
762
+ implicit_acc_accumulated_c += avg_acc_c / args.gradient_accumulation_steps
763
+
764
+ # Backpropagate
765
+ accelerator.backward(loss)
766
+ if accelerator.sync_gradients:
767
+ if not args.use_adafactor: # Adafactor does itself, maybe could do here to cut down on code
768
+ accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
769
+
770
+ # # 打印看看梯度
771
+ # for name, param in unet.named_parameters():
772
+ # # if "mid_block.attentions.0.transformer_blocks" in name and "lora" in name:
773
+ # if param.grad is not None:
774
+ # print(f"{name} has gradient ✅, grad mean: {param.grad.mean().item()}")
775
+ # else:
776
+ # print(f"{name} has NO gradient ❌")
777
+ # for name, param in visual_prompts.named_parameters():
778
+ # if param.grad is not None:
779
+ # print(f"{name} has gradient ✅, grad mean: {param.grad.mean().item()}")
780
+ # else:
781
+ # print(f"{name} has NO gradient ❌")
782
+
783
+ optimizer.step()
784
+ lr_scheduler.step()
785
+ optimizer.zero_grad()
786
+
787
+ # Checks if the accelerator has just performed an optimization step, if so do "end of batch" logging
788
+ if accelerator.sync_gradients:
789
+ progress_bar.update(1)
790
+ global_step += 1
791
+ accelerator.log({"train_loss": train_loss}, step=global_step)
792
+ if args.train_method == 'dpo':
793
+ accelerator.log({"model_mse_unaccumulated": avg_model_mse}, step=global_step)
794
+ accelerator.log({"ref_mse_unaccumulated": avg_ref_mse}, step=global_step)
795
+ accelerator.log({"avg_acc_d": implicit_acc_accumulated_d}, step=global_step)
796
+ accelerator.log({"avg_acc_c": implicit_acc_accumulated_c}, step=global_step)
797
+ train_loss = 0.0
798
+ implicit_acc_accumulated_d, implicit_acc_accumulated_c = 0.0, 0.0
799
+
800
+ if global_step % args.checkpointing_steps == 0:
801
+ if accelerator.is_main_process:
802
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
803
+ accelerator.save_state(save_path)
804
+ logger.info(f"Saved state to {save_path}")
805
+ logger.info("Pretty sure saving/loading is fixed but proceed cautiously")
806
+
807
+ logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
808
+ if args.train_method == 'dpo':
809
+ logs["implicit_acc_d"] = avg_acc_d
810
+ logs["implicit_acc_c"] = avg_acc_c
811
+ progress_bar.set_postfix(**logs)
812
+
813
+ if global_step >= args.max_train_steps:
814
+ break
815
+
816
+
817
+ # Create the pipeline using the trained modules and save it.
818
+ # This will save to top level of output_dir instead of a checkpoint directory
819
+ accelerator.wait_for_everyone()
820
+ if accelerator.is_main_process:
821
+ unet = accelerator.unwrap_model(unet)
822
+ if args.sdxl:
823
+ # Serialize pipeline.
824
+ if args.use_lora:
825
+ unet_lora_state_dict = convert_state_dict_to_diffusers(
826
+ get_peft_model_state_dict(unet)
827
+ )
828
+ StableDiffusionXLPipeline.save_lora_weights(
829
+ save_directory=os.path.join(args.output_dir, 'lora_weights_64'),
830
+ unet_lora_layers=unet_lora_state_dict,
831
+ safe_serialization=True,
832
+ )
833
+ logger.info("Saved LoRA Model to {}".format(os.path.join(args.output_dir, 'lora_weights_64')))
834
+ else:
835
+ vae = AutoencoderKL.from_pretrained(
836
+ vae_path,
837
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
838
+ revision=args.revision,
839
+ torch_dtype=weight_dtype,
840
+ )
841
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
842
+ args.pretrained_model_name_or_path, unet=unet, vae=vae, revision=args.revision, torch_dtype=weight_dtype
843
+ )
844
+ pipeline.save_pretrained(args.output_dir)
845
+ logger.info("Saved Model to {}".format(args.output_dir))
846
+ else:
847
+ pipeline = StableDiffusionPipeline.from_pretrained(
848
+ args.pretrained_model_name_or_path,
849
+ text_encoder=text_encoder,
850
+ vae=vae,
851
+ unet=unet,
852
+ revision=args.revision,
853
+ )
854
+ if not args.use_lora: pipeline.save_pretrained(args.output_dir)
855
+
856
+
857
+ accelerator.end_training()
858
+
859
+
860
+ if __name__ == "__main__":
861
+ main()
models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
models/__init__.py ADDED
File without changes
models/attention_processor.py ADDED
The diff for this file is too large to render. See raw diff
 
models/lora.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ import torch.nn as nn
4
+ from typing import Set, List, Optional, Type
5
+
6
+ UNET_DEFAULT_TARGET_REPLACE = {"CrossAttention", "Attention", "GEGLU"}
7
+ DEFAULT_TARGET_REPLACE = UNET_DEFAULT_TARGET_REPLACE
8
+
9
+
10
+ class LoraInjectedLinear(nn.Module):
11
+ def __init__(
12
+ self, in_features, out_features, bias=False, r=4, dropout_p=0.1, scale=1.0
13
+ ):
14
+ super().__init__()
15
+
16
+ if r > min(in_features, out_features):
17
+ #raise ValueError(
18
+ # f"LoRA rank {r} must be less or equal than {min(in_features, out_features)}"
19
+ #)
20
+ print(f"LoRA rank {r} is too large. setting to: {min(in_features, out_features)}")
21
+ r = min(in_features, out_features)
22
+
23
+ self.r = r
24
+ self.linear = nn.Linear(in_features, out_features, bias)
25
+ self.lora_down = nn.Linear(in_features, r, bias=False)
26
+ self.dropout = nn.Dropout(dropout_p)
27
+ self.lora_up = nn.Linear(r, out_features, bias=False)
28
+ self.scale = scale
29
+ self.selector = nn.Identity()
30
+
31
+ nn.init.normal_(self.lora_down.weight, std=1 / r)
32
+ nn.init.zeros_(self.lora_up.weight)
33
+
34
+ def update_step(self, cur_step):
35
+ self.cur_step = cur_step
36
+
37
+ def forward(self, input, return_format='linear'):
38
+ assert return_format in ['linear', 'lora', 'added', 'full']
39
+ if return_format == 'linear': return self.linear(input)
40
+ elif return_format == 'lora': return self.dropout(self.lora_up(self.selector(self.lora_down(input))))
41
+ elif return_format == 'added':
42
+ return (
43
+ self.linear(input)
44
+ + self.dropout(self.lora_up(self.selector(self.lora_down(input))))
45
+ * self.scale
46
+ )
47
+ linear_res = self.linear(input)
48
+ lora_res = self.dropout(self.lora_up(self.selector(self.lora_down(input))))
49
+ return linear_res, lora_res, self.scale
50
+
51
+ def realize_as_lora(self):
52
+ return self.lora_up.weight.data * self.scale, self.lora_down.weight.data
53
+
54
+ def set_selector_from_diag(self, diag: torch.Tensor):
55
+ # diag is a 1D tensor of size (r,)
56
+ assert diag.shape == (self.r,)
57
+ self.selector = nn.Linear(self.r, self.r, bias=False)
58
+ self.selector.weight.data = torch.diag(diag)
59
+ self.selector.weight.data = self.selector.weight.data.to(
60
+ self.lora_up.weight.device
61
+ ).to(self.lora_up.weight.dtype)
62
+
63
+
64
+ def _find_modules_v2(
65
+ model,
66
+ ancestor_class: Optional[Set[str]] = None,
67
+ search_name = 'attn2',
68
+ include_names = ['to_q', 'to_k', 'to_v'],
69
+ search_class: List[Type[nn.Module]] = [nn.Linear],
70
+ exclude_children_of: Optional[List[Type[nn.Module]]] = [
71
+ LoraInjectedLinear,
72
+ ],
73
+ ):
74
+ """
75
+ Find all modules of a certain class (or union of classes) that are direct or
76
+ indirect descendants of other modules of a certain class (or union of classes).
77
+
78
+ Returns all matching modules, along with the parent of those moduless and the
79
+ names they are referenced by.
80
+ """
81
+
82
+ # Get the targets we should replace all linears under
83
+ if ancestor_class is not None:
84
+ ancestors = (
85
+ module
86
+ for module in model.modules()
87
+ if module.__class__.__name__ in ancestor_class
88
+ )
89
+ else:
90
+ # this, incase you want to naively iterate over all modules.
91
+ ancestors = [module for module in model.modules()]
92
+
93
+ # For each target find every linear_class module that isn't a child of a LoraInjectedLinear
94
+ for ancestor in ancestors:
95
+ for fullname, module in ancestor.named_modules():
96
+ if search_name in fullname:
97
+ *path, base_name = fullname.split('.')
98
+ parent = ancestor
99
+ while path:
100
+ parent = parent.get_submodule(path.pop(0))
101
+ if base_name in include_names:
102
+ assert isinstance(module, search_class[0])
103
+ yield parent, base_name, module
104
+ # if any([isinstance(module, _class) for _class in search_class]):
105
+ # # Find the direct parent if this is a descendant, not a child, of target
106
+ # *path, name = fullname.split(".")
107
+ # parent = ancestor
108
+ # while path:
109
+ # parent = parent.get_submodule(path.pop(0))
110
+ # # Skip this linear if it's a child of a LoraInjectedLinear
111
+ # if exclude_children_of and any(
112
+ # [isinstance(parent, _class) for _class in exclude_children_of]
113
+ # ):
114
+ # continue
115
+ # # Otherwise, yield it
116
+ # yield parent, name, module
117
+
118
+
119
+ def inject_trainable_lora(
120
+ model: nn.Module,
121
+ target_replace_module: Set[str] = DEFAULT_TARGET_REPLACE,
122
+ lora_rank: int = 4,
123
+ loras=None, # path to lora .pt
124
+ verbose: bool = False,
125
+ dropout: float = 0.0,
126
+ scale: float = 1.0,
127
+ ):
128
+ """
129
+ inject lora into model, and returns lora parameter groups.
130
+ """
131
+
132
+ require_grad_params = []
133
+ names = []
134
+
135
+ if loras != None:
136
+ loras = torch.load(loras)
137
+
138
+ for _module, name, _child_module in _find_modules_v2(
139
+ model, target_replace_module, search_class=[nn.Linear]
140
+ ):
141
+ weight = _child_module.weight
142
+ bias = _child_module.bias
143
+ if verbose:
144
+ print("LoRA Injection : injecting lora into ", name)
145
+ print("LoRA Injection : weight shape", weight.shape)
146
+ _tmp = LoraInjectedLinear(
147
+ _child_module.in_features,
148
+ _child_module.out_features,
149
+ _child_module.bias is not None,
150
+ r=lora_rank,
151
+ dropout_p=dropout,
152
+ scale=scale,
153
+ )
154
+ _tmp.linear.weight = weight
155
+ if bias is not None:
156
+ _tmp.linear.bias = bias
157
+
158
+ # switch the module
159
+ _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)
160
+ _module._modules[name] = _tmp
161
+
162
+ require_grad_params.append(_module._modules[name].lora_up.parameters())
163
+ require_grad_params.append(_module._modules[name].lora_down.parameters())
164
+
165
+ if loras != None:
166
+ _module._modules[name].lora_up.weight = loras.pop(0)
167
+ _module._modules[name].lora_down.weight = loras.pop(0)
168
+
169
+ _module._modules[name].lora_up.weight.requires_grad = True
170
+ _module._modules[name].lora_down.weight.requires_grad = True
171
+ names.append(name)
172
+
173
+ return require_grad_params, names
174
+
175
+
176
+ lora_args = dict(
177
+ model = None,
178
+ loras = None,
179
+ target_replace_module = [],
180
+ lora_rank = 4,
181
+ dropout = 0,
182
+ scale = 0
183
+ )
184
+
185
+
186
+ def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):
187
+ loras = []
188
+ for _m, _n, _child_module in _find_modules_v2(
189
+ model,
190
+ target_replace_module,
191
+ search_class=[LoraInjectedLinear],
192
+ ):
193
+ loras.append((_child_module.lora_up, _child_module.lora_down))
194
+
195
+ if len(loras) == 0:
196
+ raise ValueError("No lora injected.")
197
+
198
+ return loras
199
+
200
+
201
+ def do_lora_injection(model, replace_modules, lora_loader_args=None):
202
+ REPLACE_MODULES = replace_modules
203
+
204
+ params = None
205
+ negation = None
206
+ injector_args = lora_loader_args
207
+
208
+ params, negation = inject_trainable_lora(**injector_args)
209
+
210
+ success_inject = True
211
+ for _up, _down in extract_lora_ups_down(model, target_replace_module=REPLACE_MODULES):
212
+
213
+ if not all(x is not None for x in [_up, _down]): success_inject = False
214
+
215
+ if success_inject:
216
+ print(f"Lora successfully injected into {model.__class__.__name__}.")
217
+ else:
218
+ print(f'Fail to inject Lora into {model.__class__.__name__}')
219
+ exit(-1)
220
+
221
+ return params, negation
222
+
223
+
224
+ def add_lora_to_model(model, dropout=0.0, lora_rank=16,
225
+ scale=0, replace_modules: str = ["Transformer2DModel"]):
226
+ '''
227
+ replace_modules needs to be fixed to the proper block
228
+ '''
229
+ params = None
230
+ negation = None
231
+
232
+ lora_loader_args = lora_args.copy()
233
+ lora_loader_args.update({
234
+ "model": model,
235
+ "loras": None,
236
+ "target_replace_module": replace_modules,
237
+ "lora_rank": lora_rank,
238
+ "dropout": dropout,
239
+ "scale": scale
240
+ })
241
+
242
+ params, negation = do_lora_injection(model, replace_modules, lora_loader_args=lora_loader_args)
243
+
244
+ params = model if params is None else params
245
+ return params, negation
246
+
models/mm_attention.py ADDED
@@ -0,0 +1,1254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, List, Optional, Tuple
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from diffusers.utils import deprecate, logging
21
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
22
+ from diffusers.models.activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, LinearActivation, SwiGLU
23
+ from models.attention_processor import Attention, JointAttnProcessor2_0
24
+ from diffusers.models.embeddings import SinusoidalPositionalEmbedding
25
+ from diffusers.models.normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm, SD35AdaLayerNormZeroX
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int):
32
+ # "feed_forward_chunk_size" can be used to save memory
33
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
34
+ raise ValueError(
35
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
36
+ )
37
+
38
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
39
+ ff_output = torch.cat(
40
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
41
+ dim=chunk_dim,
42
+ )
43
+ return ff_output
44
+
45
+
46
+ @maybe_allow_in_graph
47
+ class GatedSelfAttentionDense(nn.Module):
48
+ r"""
49
+ A gated self-attention dense layer that combines visual features and object features.
50
+
51
+ Parameters:
52
+ query_dim (`int`): The number of channels in the query.
53
+ context_dim (`int`): The number of channels in the context.
54
+ n_heads (`int`): The number of heads to use for attention.
55
+ d_head (`int`): The number of channels in each head.
56
+ """
57
+
58
+ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
59
+ super().__init__()
60
+
61
+ # we need a linear projection since we need cat visual feature and obj feature
62
+ self.linear = nn.Linear(context_dim, query_dim)
63
+
64
+ self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
65
+ self.ff = FeedForward(query_dim, activation_fn="geglu")
66
+
67
+ self.norm1 = nn.LayerNorm(query_dim)
68
+ self.norm2 = nn.LayerNorm(query_dim)
69
+
70
+ self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
71
+ self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
72
+
73
+ self.enabled = True
74
+
75
+ def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
76
+ if not self.enabled:
77
+ return x
78
+
79
+ n_visual = x.shape[1]
80
+ objs = self.linear(objs)
81
+
82
+ x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
83
+ x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
84
+
85
+ return x
86
+
87
+
88
+ @maybe_allow_in_graph
89
+ class JointTransformerBlock(nn.Module):
90
+ r"""
91
+ A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3.
92
+
93
+ Reference: https://arxiv.org/abs/2403.03206
94
+
95
+ Parameters:
96
+ dim (`int`): The number of channels in the input and output.
97
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
98
+ attention_head_dim (`int`): The number of channels in each head.
99
+ context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the
100
+ processing of `context` conditions.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ dim: int,
106
+ num_attention_heads: int,
107
+ attention_head_dim: int,
108
+ context_pre_only: bool = False,
109
+ qk_norm: Optional[str] = None,
110
+ use_dual_attention: bool = False,
111
+ ):
112
+ super().__init__()
113
+
114
+ self.use_dual_attention = use_dual_attention
115
+ self.context_pre_only = context_pre_only
116
+ context_norm_type = "ada_norm_continous" if context_pre_only else "ada_norm_zero"
117
+
118
+ if use_dual_attention:
119
+ self.norm1 = SD35AdaLayerNormZeroX(dim)
120
+ else:
121
+ self.norm1 = AdaLayerNormZero(dim)
122
+
123
+ if context_norm_type == "ada_norm_continous":
124
+ self.norm1_context = AdaLayerNormContinuous(
125
+ dim, dim, elementwise_affine=False, eps=1e-6, bias=True, norm_type="layer_norm"
126
+ )
127
+ elif context_norm_type == "ada_norm_zero":
128
+ self.norm1_context = AdaLayerNormZero(dim)
129
+ else:
130
+ raise ValueError(
131
+ f"Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`"
132
+ )
133
+
134
+ if hasattr(F, "scaled_dot_product_attention"):
135
+ processor = JointAttnProcessor2_0()
136
+ else:
137
+ raise ValueError(
138
+ "The current PyTorch version does not support the `scaled_dot_product_attention` function."
139
+ )
140
+
141
+ self.attn = Attention(
142
+ query_dim=dim,
143
+ cross_attention_dim=None,
144
+ added_kv_proj_dim=dim,
145
+ dim_head=attention_head_dim,
146
+ heads=num_attention_heads,
147
+ out_dim=dim,
148
+ context_pre_only=context_pre_only,
149
+ bias=True,
150
+ processor=processor,
151
+ qk_norm=qk_norm,
152
+ eps=1e-6,
153
+ )
154
+
155
+ if use_dual_attention:
156
+ self.attn2 = Attention(
157
+ query_dim=dim,
158
+ cross_attention_dim=None,
159
+ dim_head=attention_head_dim,
160
+ heads=num_attention_heads,
161
+ out_dim=dim,
162
+ bias=True,
163
+ processor=processor,
164
+ qk_norm=qk_norm,
165
+ eps=1e-6,
166
+ )
167
+ else:
168
+ self.attn2 = None
169
+
170
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
171
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
172
+
173
+ if not context_pre_only:
174
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
175
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
176
+ else:
177
+ self.norm2_context = None
178
+ self.ff_context = None
179
+
180
+ # let chunk size default to None
181
+ self._chunk_size = None
182
+ self._chunk_dim = 0
183
+
184
+ # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
185
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
186
+ # Sets chunk feed-forward
187
+ self._chunk_size = chunk_size
188
+ self._chunk_dim = dim
189
+
190
+ def forward(
191
+ self,
192
+ hidden_states: torch.FloatTensor,
193
+ encoder_hidden_states: torch.FloatTensor,
194
+ temb: torch.FloatTensor,
195
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
196
+ ):
197
+ joint_attention_kwargs = joint_attention_kwargs or {}
198
+ if self.use_dual_attention:
199
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2 = self.norm1(
200
+ hidden_states, emb=temb
201
+ )
202
+ else:
203
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
204
+
205
+ if self.context_pre_only:
206
+ norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb)
207
+ else:
208
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
209
+ encoder_hidden_states, emb=temb
210
+ )
211
+
212
+ # Attention.
213
+ attn_output, context_attn_output = self.attn(
214
+ hidden_states=norm_hidden_states,
215
+ encoder_hidden_states=norm_encoder_hidden_states,
216
+ **joint_attention_kwargs,
217
+ )
218
+
219
+ # Process attention outputs for the `hidden_states`.
220
+ attn_output = gate_msa.unsqueeze(1) * attn_output
221
+ hidden_states = hidden_states + attn_output
222
+
223
+ if self.use_dual_attention:
224
+ attn_output2 = self.attn2(hidden_states=norm_hidden_states2, **joint_attention_kwargs)
225
+ attn_output2 = gate_msa2.unsqueeze(1) * attn_output2
226
+ hidden_states = hidden_states + attn_output2
227
+
228
+ norm_hidden_states = self.norm2(hidden_states)
229
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
230
+ if self._chunk_size is not None:
231
+ # "feed_forward_chunk_size" can be used to save memory
232
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
233
+ else:
234
+ ff_output = self.ff(norm_hidden_states)
235
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
236
+
237
+ hidden_states = hidden_states + ff_output
238
+
239
+ # Process attention outputs for the `encoder_hidden_states`.
240
+ if self.context_pre_only:
241
+ encoder_hidden_states = None
242
+ else:
243
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
244
+ encoder_hidden_states = encoder_hidden_states + context_attn_output
245
+
246
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
247
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
248
+ if self._chunk_size is not None:
249
+ # "feed_forward_chunk_size" can be used to save memory
250
+ context_ff_output = _chunked_feed_forward(
251
+ self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size
252
+ )
253
+ else:
254
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
255
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
256
+
257
+ return encoder_hidden_states, hidden_states
258
+
259
+
260
+ @maybe_allow_in_graph
261
+ class BasicTransformerBlock(nn.Module):
262
+ r"""
263
+ A basic Transformer block.
264
+
265
+ Parameters:
266
+ dim (`int`): The number of channels in the input and output.
267
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
268
+ attention_head_dim (`int`): The number of channels in each head.
269
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
270
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
271
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
272
+ num_embeds_ada_norm (:
273
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
274
+ attention_bias (:
275
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
276
+ only_cross_attention (`bool`, *optional*):
277
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
278
+ double_self_attention (`bool`, *optional*):
279
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
280
+ upcast_attention (`bool`, *optional*):
281
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
282
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
283
+ Whether to use learnable elementwise affine parameters for normalization.
284
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
285
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
286
+ final_dropout (`bool` *optional*, defaults to False):
287
+ Whether to apply a final dropout after the last feed-forward layer.
288
+ attention_type (`str`, *optional*, defaults to `"default"`):
289
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
290
+ positional_embeddings (`str`, *optional*, defaults to `None`):
291
+ The type of positional embeddings to apply to.
292
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
293
+ The maximum number of positional embeddings to apply.
294
+ """
295
+
296
+ def __init__(
297
+ self,
298
+ dim: int,
299
+ num_attention_heads: int,
300
+ attention_head_dim: int,
301
+ dropout=0.0,
302
+ cross_attention_dim: Optional[int] = None,
303
+ activation_fn: str = "geglu",
304
+ num_embeds_ada_norm: Optional[int] = None,
305
+ attention_bias: bool = False,
306
+ only_cross_attention: bool = False,
307
+ double_self_attention: bool = False,
308
+ upcast_attention: bool = False,
309
+ norm_elementwise_affine: bool = True,
310
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
311
+ norm_eps: float = 1e-5,
312
+ final_dropout: bool = False,
313
+ attention_type: str = "default",
314
+ positional_embeddings: Optional[str] = None,
315
+ num_positional_embeddings: Optional[int] = None,
316
+ ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
317
+ ada_norm_bias: Optional[int] = None,
318
+ ff_inner_dim: Optional[int] = None,
319
+ ff_bias: bool = True,
320
+ attention_out_bias: bool = True,
321
+ ):
322
+ super().__init__()
323
+ self.dim = dim
324
+ self.num_attention_heads = num_attention_heads
325
+ self.attention_head_dim = attention_head_dim
326
+ self.dropout = dropout
327
+ self.cross_attention_dim = cross_attention_dim
328
+ self.activation_fn = activation_fn
329
+ self.attention_bias = attention_bias
330
+ self.double_self_attention = double_self_attention
331
+ self.norm_elementwise_affine = norm_elementwise_affine
332
+ self.positional_embeddings = positional_embeddings
333
+ self.num_positional_embeddings = num_positional_embeddings
334
+ self.only_cross_attention = only_cross_attention
335
+
336
+ # We keep these boolean flags for backward-compatibility.
337
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
338
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
339
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
340
+ self.use_layer_norm = norm_type == "layer_norm"
341
+ self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
342
+
343
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
344
+ raise ValueError(
345
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
346
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
347
+ )
348
+
349
+ self.norm_type = norm_type
350
+ self.num_embeds_ada_norm = num_embeds_ada_norm
351
+
352
+ if positional_embeddings and (num_positional_embeddings is None):
353
+ raise ValueError(
354
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
355
+ )
356
+
357
+ if positional_embeddings == "sinusoidal":
358
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
359
+ else:
360
+ self.pos_embed = None
361
+
362
+ # Define 3 blocks. Each block has its own normalization layer.
363
+ # 1. Self-Attn
364
+ if norm_type == "ada_norm":
365
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
366
+ elif norm_type == "ada_norm_zero":
367
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
368
+ elif norm_type == "ada_norm_continuous":
369
+ self.norm1 = AdaLayerNormContinuous(
370
+ dim,
371
+ ada_norm_continous_conditioning_embedding_dim,
372
+ norm_elementwise_affine,
373
+ norm_eps,
374
+ ada_norm_bias,
375
+ "rms_norm",
376
+ )
377
+ else:
378
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
379
+
380
+ self.attn1 = Attention(
381
+ query_dim=dim,
382
+ heads=num_attention_heads,
383
+ dim_head=attention_head_dim,
384
+ dropout=dropout,
385
+ bias=attention_bias,
386
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
387
+ upcast_attention=upcast_attention,
388
+ out_bias=attention_out_bias,
389
+ )
390
+
391
+ # 2. Cross-Attn
392
+ if cross_attention_dim is not None or double_self_attention:
393
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
394
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
395
+ # the second cross attention block.
396
+ if norm_type == "ada_norm":
397
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
398
+ elif norm_type == "ada_norm_continuous":
399
+ self.norm2 = AdaLayerNormContinuous(
400
+ dim,
401
+ ada_norm_continous_conditioning_embedding_dim,
402
+ norm_elementwise_affine,
403
+ norm_eps,
404
+ ada_norm_bias,
405
+ "rms_norm",
406
+ )
407
+ else:
408
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
409
+
410
+ self.attn2 = Attention(
411
+ query_dim=dim,
412
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
413
+ heads=num_attention_heads,
414
+ dim_head=attention_head_dim,
415
+ dropout=dropout,
416
+ bias=attention_bias,
417
+ upcast_attention=upcast_attention,
418
+ out_bias=attention_out_bias,
419
+ ) # is self-attn if encoder_hidden_states is none
420
+ else:
421
+ if norm_type == "ada_norm_single": # For Latte
422
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
423
+ else:
424
+ self.norm2 = None
425
+ self.attn2 = None
426
+
427
+ # 3. Feed-forward
428
+ if norm_type == "ada_norm_continuous":
429
+ self.norm3 = AdaLayerNormContinuous(
430
+ dim,
431
+ ada_norm_continous_conditioning_embedding_dim,
432
+ norm_elementwise_affine,
433
+ norm_eps,
434
+ ada_norm_bias,
435
+ "layer_norm",
436
+ )
437
+
438
+ elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm"]:
439
+ self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
440
+ elif norm_type == "layer_norm_i2vgen":
441
+ self.norm3 = None
442
+
443
+ self.ff = FeedForward(
444
+ dim,
445
+ dropout=dropout,
446
+ activation_fn=activation_fn,
447
+ final_dropout=final_dropout,
448
+ inner_dim=ff_inner_dim,
449
+ bias=ff_bias,
450
+ )
451
+
452
+ # 4. Fuser
453
+ if attention_type == "gated" or attention_type == "gated-text-image":
454
+ self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
455
+
456
+ # 5. Scale-shift for PixArt-Alpha.
457
+ if norm_type == "ada_norm_single":
458
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
459
+
460
+ # let chunk size default to None
461
+ self._chunk_size = None
462
+ self._chunk_dim = 0
463
+
464
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
465
+ # Sets chunk feed-forward
466
+ self._chunk_size = chunk_size
467
+ self._chunk_dim = dim
468
+
469
+ def forward(
470
+ self,
471
+ hidden_states: torch.Tensor,
472
+ attention_mask: Optional[torch.Tensor] = None,
473
+ encoder_hidden_states: Optional[torch.Tensor] = None,
474
+ encoder_lora_states: Optional[torch.Tensor] = None,
475
+ encoder_attention_mask: Optional[torch.Tensor] = None,
476
+ timestep: Optional[torch.LongTensor] = None,
477
+ cross_attention_kwargs: Dict[str, Any] = None,
478
+ class_labels: Optional[torch.LongTensor] = None,
479
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
480
+ ) -> torch.Tensor:
481
+ if cross_attention_kwargs is not None:
482
+ if cross_attention_kwargs.get("scale", None) is not None:
483
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
484
+
485
+ # Notice that normalization is always applied before the real computation in the following blocks.
486
+ # 0. Self-Attention
487
+ batch_size = hidden_states.shape[0]
488
+
489
+ if self.norm_type == "ada_norm":
490
+ norm_hidden_states = self.norm1(hidden_states, timestep)
491
+ elif self.norm_type == "ada_norm_zero":
492
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
493
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
494
+ )
495
+ elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]:
496
+ norm_hidden_states = self.norm1(hidden_states)
497
+ elif self.norm_type == "ada_norm_continuous":
498
+ norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
499
+ elif self.norm_type == "ada_norm_single":
500
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
501
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
502
+ ).chunk(6, dim=1)
503
+ norm_hidden_states = self.norm1(hidden_states)
504
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
505
+ else:
506
+ raise ValueError("Incorrect norm used")
507
+
508
+ if self.pos_embed is not None:
509
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
510
+
511
+ # 1. Prepare GLIGEN inputs
512
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
513
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
514
+
515
+ attn_output = self.attn1(
516
+ norm_hidden_states,
517
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
518
+ attention_mask=attention_mask,
519
+ **cross_attention_kwargs,
520
+ )
521
+
522
+ if self.norm_type == "ada_norm_zero":
523
+ attn_output = gate_msa.unsqueeze(1) * attn_output
524
+ elif self.norm_type == "ada_norm_single":
525
+ attn_output = gate_msa * attn_output
526
+
527
+ hidden_states = attn_output + hidden_states
528
+ if hidden_states.ndim == 4:
529
+ hidden_states = hidden_states.squeeze(1)
530
+
531
+ # 1.2 GLIGEN Control
532
+ if gligen_kwargs is not None:
533
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
534
+
535
+ # 3. Cross-Attention
536
+ if self.attn2 is not None:
537
+ if self.norm_type == "ada_norm":
538
+ norm_hidden_states = self.norm2(hidden_states, timestep)
539
+ elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]:
540
+ norm_hidden_states = self.norm2(hidden_states)
541
+ elif self.norm_type == "ada_norm_single":
542
+ # For PixArt norm2 isn't applied here:
543
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
544
+ norm_hidden_states = hidden_states
545
+ elif self.norm_type == "ada_norm_continuous":
546
+ norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
547
+ else:
548
+ raise ValueError("Incorrect norm")
549
+
550
+ if self.pos_embed is not None and self.norm_type != "ada_norm_single":
551
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
552
+
553
+ attn_output = self.attn2(
554
+ norm_hidden_states,
555
+ encoder_hidden_states=encoder_hidden_states,
556
+ encoder_lora_states=encoder_lora_states,
557
+ attention_mask=encoder_attention_mask,
558
+ **cross_attention_kwargs,
559
+ )
560
+ hidden_states = attn_output + hidden_states
561
+
562
+ # 4. Feed-forward
563
+ # i2vgen doesn't have this norm 🤷‍♂️
564
+ if self.norm_type == "ada_norm_continuous":
565
+ norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
566
+ elif not self.norm_type == "ada_norm_single":
567
+ norm_hidden_states = self.norm3(hidden_states)
568
+
569
+ if self.norm_type == "ada_norm_zero":
570
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
571
+
572
+ if self.norm_type == "ada_norm_single":
573
+ norm_hidden_states = self.norm2(hidden_states)
574
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
575
+
576
+ if self._chunk_size is not None:
577
+ # "feed_forward_chunk_size" can be used to save memory
578
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
579
+ else:
580
+ ff_output = self.ff(norm_hidden_states)
581
+
582
+ if self.norm_type == "ada_norm_zero":
583
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
584
+ elif self.norm_type == "ada_norm_single":
585
+ ff_output = gate_mlp * ff_output
586
+
587
+ hidden_states = ff_output + hidden_states
588
+ if hidden_states.ndim == 4:
589
+ hidden_states = hidden_states.squeeze(1)
590
+
591
+ return hidden_states
592
+
593
+
594
+ class LuminaFeedForward(nn.Module):
595
+ r"""
596
+ A feed-forward layer.
597
+
598
+ Parameters:
599
+ hidden_size (`int`):
600
+ The dimensionality of the hidden layers in the model. This parameter determines the width of the model's
601
+ hidden representations.
602
+ intermediate_size (`int`): The intermediate dimension of the feedforward layer.
603
+ multiple_of (`int`, *optional*): Value to ensure hidden dimension is a multiple
604
+ of this value.
605
+ ffn_dim_multiplier (float, *optional*): Custom multiplier for hidden
606
+ dimension. Defaults to None.
607
+ """
608
+
609
+ def __init__(
610
+ self,
611
+ dim: int,
612
+ inner_dim: int,
613
+ multiple_of: Optional[int] = 256,
614
+ ffn_dim_multiplier: Optional[float] = None,
615
+ ):
616
+ super().__init__()
617
+ inner_dim = int(2 * inner_dim / 3)
618
+ # custom hidden_size factor multiplier
619
+ if ffn_dim_multiplier is not None:
620
+ inner_dim = int(ffn_dim_multiplier * inner_dim)
621
+ inner_dim = multiple_of * ((inner_dim + multiple_of - 1) // multiple_of)
622
+
623
+ self.linear_1 = nn.Linear(
624
+ dim,
625
+ inner_dim,
626
+ bias=False,
627
+ )
628
+ self.linear_2 = nn.Linear(
629
+ inner_dim,
630
+ dim,
631
+ bias=False,
632
+ )
633
+ self.linear_3 = nn.Linear(
634
+ dim,
635
+ inner_dim,
636
+ bias=False,
637
+ )
638
+ self.silu = FP32SiLU()
639
+
640
+ def forward(self, x):
641
+ return self.linear_2(self.silu(self.linear_1(x)) * self.linear_3(x))
642
+
643
+
644
+ @maybe_allow_in_graph
645
+ class TemporalBasicTransformerBlock(nn.Module):
646
+ r"""
647
+ A basic Transformer block for video like data.
648
+
649
+ Parameters:
650
+ dim (`int`): The number of channels in the input and output.
651
+ time_mix_inner_dim (`int`): The number of channels for temporal attention.
652
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
653
+ attention_head_dim (`int`): The number of channels in each head.
654
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
655
+ """
656
+
657
+ def __init__(
658
+ self,
659
+ dim: int,
660
+ time_mix_inner_dim: int,
661
+ num_attention_heads: int,
662
+ attention_head_dim: int,
663
+ cross_attention_dim: Optional[int] = None,
664
+ ):
665
+ super().__init__()
666
+ self.is_res = dim == time_mix_inner_dim
667
+
668
+ self.norm_in = nn.LayerNorm(dim)
669
+
670
+ # Define 3 blocks. Each block has its own normalization layer.
671
+ # 1. Self-Attn
672
+ self.ff_in = FeedForward(
673
+ dim,
674
+ dim_out=time_mix_inner_dim,
675
+ activation_fn="geglu",
676
+ )
677
+
678
+ self.norm1 = nn.LayerNorm(time_mix_inner_dim)
679
+ self.attn1 = Attention(
680
+ query_dim=time_mix_inner_dim,
681
+ heads=num_attention_heads,
682
+ dim_head=attention_head_dim,
683
+ cross_attention_dim=None,
684
+ )
685
+
686
+ # 2. Cross-Attn
687
+ if cross_attention_dim is not None:
688
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
689
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
690
+ # the second cross attention block.
691
+ self.norm2 = nn.LayerNorm(time_mix_inner_dim)
692
+ self.attn2 = Attention(
693
+ query_dim=time_mix_inner_dim,
694
+ cross_attention_dim=cross_attention_dim,
695
+ heads=num_attention_heads,
696
+ dim_head=attention_head_dim,
697
+ ) # is self-attn if encoder_hidden_states is none
698
+ else:
699
+ self.norm2 = None
700
+ self.attn2 = None
701
+
702
+ # 3. Feed-forward
703
+ self.norm3 = nn.LayerNorm(time_mix_inner_dim)
704
+ self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
705
+
706
+ # let chunk size default to None
707
+ self._chunk_size = None
708
+ self._chunk_dim = None
709
+
710
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
711
+ # Sets chunk feed-forward
712
+ self._chunk_size = chunk_size
713
+ # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
714
+ self._chunk_dim = 1
715
+
716
+ def forward(
717
+ self,
718
+ hidden_states: torch.Tensor,
719
+ num_frames: int,
720
+ encoder_hidden_states: Optional[torch.Tensor] = None,
721
+ ) -> torch.Tensor:
722
+ # Notice that normalization is always applied before the real computation in the following blocks.
723
+ # 0. Self-Attention
724
+ batch_size = hidden_states.shape[0]
725
+
726
+ batch_frames, seq_length, channels = hidden_states.shape
727
+ batch_size = batch_frames // num_frames
728
+
729
+ hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
730
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
731
+ hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
732
+
733
+ residual = hidden_states
734
+ hidden_states = self.norm_in(hidden_states)
735
+
736
+ if self._chunk_size is not None:
737
+ hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
738
+ else:
739
+ hidden_states = self.ff_in(hidden_states)
740
+
741
+ if self.is_res:
742
+ hidden_states = hidden_states + residual
743
+
744
+ norm_hidden_states = self.norm1(hidden_states)
745
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
746
+ hidden_states = attn_output + hidden_states
747
+
748
+ # 3. Cross-Attention
749
+ if self.attn2 is not None:
750
+ norm_hidden_states = self.norm2(hidden_states)
751
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
752
+ hidden_states = attn_output + hidden_states
753
+
754
+ # 4. Feed-forward
755
+ norm_hidden_states = self.norm3(hidden_states)
756
+
757
+ if self._chunk_size is not None:
758
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
759
+ else:
760
+ ff_output = self.ff(norm_hidden_states)
761
+
762
+ if self.is_res:
763
+ hidden_states = ff_output + hidden_states
764
+ else:
765
+ hidden_states = ff_output
766
+
767
+ hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
768
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
769
+ hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
770
+
771
+ return hidden_states
772
+
773
+
774
+ class SkipFFTransformerBlock(nn.Module):
775
+ def __init__(
776
+ self,
777
+ dim: int,
778
+ num_attention_heads: int,
779
+ attention_head_dim: int,
780
+ kv_input_dim: int,
781
+ kv_input_dim_proj_use_bias: bool,
782
+ dropout=0.0,
783
+ cross_attention_dim: Optional[int] = None,
784
+ attention_bias: bool = False,
785
+ attention_out_bias: bool = True,
786
+ ):
787
+ super().__init__()
788
+ if kv_input_dim != dim:
789
+ self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
790
+ else:
791
+ self.kv_mapper = None
792
+
793
+ self.norm1 = RMSNorm(dim, 1e-06)
794
+
795
+ self.attn1 = Attention(
796
+ query_dim=dim,
797
+ heads=num_attention_heads,
798
+ dim_head=attention_head_dim,
799
+ dropout=dropout,
800
+ bias=attention_bias,
801
+ cross_attention_dim=cross_attention_dim,
802
+ out_bias=attention_out_bias,
803
+ )
804
+
805
+ self.norm2 = RMSNorm(dim, 1e-06)
806
+
807
+ self.attn2 = Attention(
808
+ query_dim=dim,
809
+ cross_attention_dim=cross_attention_dim,
810
+ heads=num_attention_heads,
811
+ dim_head=attention_head_dim,
812
+ dropout=dropout,
813
+ bias=attention_bias,
814
+ out_bias=attention_out_bias,
815
+ )
816
+
817
+ def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
818
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
819
+
820
+ if self.kv_mapper is not None:
821
+ encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
822
+
823
+ norm_hidden_states = self.norm1(hidden_states)
824
+
825
+ attn_output = self.attn1(
826
+ norm_hidden_states,
827
+ encoder_hidden_states=encoder_hidden_states,
828
+ **cross_attention_kwargs,
829
+ )
830
+
831
+ hidden_states = attn_output + hidden_states
832
+
833
+ norm_hidden_states = self.norm2(hidden_states)
834
+
835
+ attn_output = self.attn2(
836
+ norm_hidden_states,
837
+ encoder_hidden_states=encoder_hidden_states,
838
+ **cross_attention_kwargs,
839
+ )
840
+
841
+ hidden_states = attn_output + hidden_states
842
+
843
+ return hidden_states
844
+
845
+
846
+ @maybe_allow_in_graph
847
+ class FreeNoiseTransformerBlock(nn.Module):
848
+ r"""
849
+ A FreeNoise Transformer block.
850
+
851
+ Parameters:
852
+ dim (`int`):
853
+ The number of channels in the input and output.
854
+ num_attention_heads (`int`):
855
+ The number of heads to use for multi-head attention.
856
+ attention_head_dim (`int`):
857
+ The number of channels in each head.
858
+ dropout (`float`, *optional*, defaults to 0.0):
859
+ The dropout probability to use.
860
+ cross_attention_dim (`int`, *optional*):
861
+ The size of the encoder_hidden_states vector for cross attention.
862
+ activation_fn (`str`, *optional*, defaults to `"geglu"`):
863
+ Activation function to be used in feed-forward.
864
+ num_embeds_ada_norm (`int`, *optional*):
865
+ The number of diffusion steps used during training. See `Transformer2DModel`.
866
+ attention_bias (`bool`, defaults to `False`):
867
+ Configure if the attentions should contain a bias parameter.
868
+ only_cross_attention (`bool`, defaults to `False`):
869
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
870
+ double_self_attention (`bool`, defaults to `False`):
871
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
872
+ upcast_attention (`bool`, defaults to `False`):
873
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
874
+ norm_elementwise_affine (`bool`, defaults to `True`):
875
+ Whether to use learnable elementwise affine parameters for normalization.
876
+ norm_type (`str`, defaults to `"layer_norm"`):
877
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
878
+ final_dropout (`bool` defaults to `False`):
879
+ Whether to apply a final dropout after the last feed-forward layer.
880
+ attention_type (`str`, defaults to `"default"`):
881
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
882
+ positional_embeddings (`str`, *optional*):
883
+ The type of positional embeddings to apply to.
884
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
885
+ The maximum number of positional embeddings to apply.
886
+ ff_inner_dim (`int`, *optional*):
887
+ Hidden dimension of feed-forward MLP.
888
+ ff_bias (`bool`, defaults to `True`):
889
+ Whether or not to use bias in feed-forward MLP.
890
+ attention_out_bias (`bool`, defaults to `True`):
891
+ Whether or not to use bias in attention output project layer.
892
+ context_length (`int`, defaults to `16`):
893
+ The maximum number of frames that the FreeNoise block processes at once.
894
+ context_stride (`int`, defaults to `4`):
895
+ The number of frames to be skipped before starting to process a new batch of `context_length` frames.
896
+ weighting_scheme (`str`, defaults to `"pyramid"`):
897
+ The weighting scheme to use for weighting averaging of processed latent frames. As described in the
898
+ Equation 9. of the [FreeNoise](https://arxiv.org/abs/2310.15169) paper, "pyramid" is the default setting
899
+ used.
900
+ """
901
+
902
+ def __init__(
903
+ self,
904
+ dim: int,
905
+ num_attention_heads: int,
906
+ attention_head_dim: int,
907
+ dropout: float = 0.0,
908
+ cross_attention_dim: Optional[int] = None,
909
+ activation_fn: str = "geglu",
910
+ num_embeds_ada_norm: Optional[int] = None,
911
+ attention_bias: bool = False,
912
+ only_cross_attention: bool = False,
913
+ double_self_attention: bool = False,
914
+ upcast_attention: bool = False,
915
+ norm_elementwise_affine: bool = True,
916
+ norm_type: str = "layer_norm",
917
+ norm_eps: float = 1e-5,
918
+ final_dropout: bool = False,
919
+ positional_embeddings: Optional[str] = None,
920
+ num_positional_embeddings: Optional[int] = None,
921
+ ff_inner_dim: Optional[int] = None,
922
+ ff_bias: bool = True,
923
+ attention_out_bias: bool = True,
924
+ context_length: int = 16,
925
+ context_stride: int = 4,
926
+ weighting_scheme: str = "pyramid",
927
+ ):
928
+ super().__init__()
929
+ self.dim = dim
930
+ self.num_attention_heads = num_attention_heads
931
+ self.attention_head_dim = attention_head_dim
932
+ self.dropout = dropout
933
+ self.cross_attention_dim = cross_attention_dim
934
+ self.activation_fn = activation_fn
935
+ self.attention_bias = attention_bias
936
+ self.double_self_attention = double_self_attention
937
+ self.norm_elementwise_affine = norm_elementwise_affine
938
+ self.positional_embeddings = positional_embeddings
939
+ self.num_positional_embeddings = num_positional_embeddings
940
+ self.only_cross_attention = only_cross_attention
941
+
942
+ self.set_free_noise_properties(context_length, context_stride, weighting_scheme)
943
+
944
+ # We keep these boolean flags for backward-compatibility.
945
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
946
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
947
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
948
+ self.use_layer_norm = norm_type == "layer_norm"
949
+ self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
950
+
951
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
952
+ raise ValueError(
953
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
954
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
955
+ )
956
+
957
+ self.norm_type = norm_type
958
+ self.num_embeds_ada_norm = num_embeds_ada_norm
959
+
960
+ if positional_embeddings and (num_positional_embeddings is None):
961
+ raise ValueError(
962
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
963
+ )
964
+
965
+ if positional_embeddings == "sinusoidal":
966
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
967
+ else:
968
+ self.pos_embed = None
969
+
970
+ # Define 3 blocks. Each block has its own normalization layer.
971
+ # 1. Self-Attn
972
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
973
+
974
+ self.attn1 = Attention(
975
+ query_dim=dim,
976
+ heads=num_attention_heads,
977
+ dim_head=attention_head_dim,
978
+ dropout=dropout,
979
+ bias=attention_bias,
980
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
981
+ upcast_attention=upcast_attention,
982
+ out_bias=attention_out_bias,
983
+ )
984
+
985
+ # 2. Cross-Attn
986
+ if cross_attention_dim is not None or double_self_attention:
987
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
988
+
989
+ self.attn2 = Attention(
990
+ query_dim=dim,
991
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
992
+ heads=num_attention_heads,
993
+ dim_head=attention_head_dim,
994
+ dropout=dropout,
995
+ bias=attention_bias,
996
+ upcast_attention=upcast_attention,
997
+ out_bias=attention_out_bias,
998
+ ) # is self-attn if encoder_hidden_states is none
999
+
1000
+ # 3. Feed-forward
1001
+ self.ff = FeedForward(
1002
+ dim,
1003
+ dropout=dropout,
1004
+ activation_fn=activation_fn,
1005
+ final_dropout=final_dropout,
1006
+ inner_dim=ff_inner_dim,
1007
+ bias=ff_bias,
1008
+ )
1009
+
1010
+ self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
1011
+
1012
+ # let chunk size default to None
1013
+ self._chunk_size = None
1014
+ self._chunk_dim = 0
1015
+
1016
+ def _get_frame_indices(self, num_frames: int) -> List[Tuple[int, int]]:
1017
+ frame_indices = []
1018
+ for i in range(0, num_frames - self.context_length + 1, self.context_stride):
1019
+ window_start = i
1020
+ window_end = min(num_frames, i + self.context_length)
1021
+ frame_indices.append((window_start, window_end))
1022
+ return frame_indices
1023
+
1024
+ def _get_frame_weights(self, num_frames: int, weighting_scheme: str = "pyramid") -> List[float]:
1025
+ if weighting_scheme == "flat":
1026
+ weights = [1.0] * num_frames
1027
+
1028
+ elif weighting_scheme == "pyramid":
1029
+ if num_frames % 2 == 0:
1030
+ # num_frames = 4 => [1, 2, 2, 1]
1031
+ mid = num_frames // 2
1032
+ weights = list(range(1, mid + 1))
1033
+ weights = weights + weights[::-1]
1034
+ else:
1035
+ # num_frames = 5 => [1, 2, 3, 2, 1]
1036
+ mid = (num_frames + 1) // 2
1037
+ weights = list(range(1, mid))
1038
+ weights = weights + [mid] + weights[::-1]
1039
+
1040
+ elif weighting_scheme == "delayed_reverse_sawtooth":
1041
+ if num_frames % 2 == 0:
1042
+ # num_frames = 4 => [0.01, 2, 2, 1]
1043
+ mid = num_frames // 2
1044
+ weights = [0.01] * (mid - 1) + [mid]
1045
+ weights = weights + list(range(mid, 0, -1))
1046
+ else:
1047
+ # num_frames = 5 => [0.01, 0.01, 3, 2, 1]
1048
+ mid = (num_frames + 1) // 2
1049
+ weights = [0.01] * mid
1050
+ weights = weights + list(range(mid, 0, -1))
1051
+ else:
1052
+ raise ValueError(f"Unsupported value for weighting_scheme={weighting_scheme}")
1053
+
1054
+ return weights
1055
+
1056
+ def set_free_noise_properties(
1057
+ self, context_length: int, context_stride: int, weighting_scheme: str = "pyramid"
1058
+ ) -> None:
1059
+ self.context_length = context_length
1060
+ self.context_stride = context_stride
1061
+ self.weighting_scheme = weighting_scheme
1062
+
1063
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0) -> None:
1064
+ # Sets chunk feed-forward
1065
+ self._chunk_size = chunk_size
1066
+ self._chunk_dim = dim
1067
+
1068
+ def forward(
1069
+ self,
1070
+ hidden_states: torch.Tensor,
1071
+ attention_mask: Optional[torch.Tensor] = None,
1072
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1073
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1074
+ cross_attention_kwargs: Dict[str, Any] = None,
1075
+ *args,
1076
+ **kwargs,
1077
+ ) -> torch.Tensor:
1078
+ if cross_attention_kwargs is not None:
1079
+ if cross_attention_kwargs.get("scale", None) is not None:
1080
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
1081
+
1082
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
1083
+
1084
+ # hidden_states: [B x H x W, F, C]
1085
+ device = hidden_states.device
1086
+ dtype = hidden_states.dtype
1087
+
1088
+ num_frames = hidden_states.size(1)
1089
+ frame_indices = self._get_frame_indices(num_frames)
1090
+ frame_weights = self._get_frame_weights(self.context_length, self.weighting_scheme)
1091
+ frame_weights = torch.tensor(frame_weights, device=device, dtype=dtype).unsqueeze(0).unsqueeze(-1)
1092
+ is_last_frame_batch_complete = frame_indices[-1][1] == num_frames
1093
+
1094
+ # Handle out-of-bounds case if num_frames isn't perfectly divisible by context_length
1095
+ # For example, num_frames=25, context_length=16, context_stride=4, then we expect the ranges:
1096
+ # [(0, 16), (4, 20), (8, 24), (10, 26)]
1097
+ if not is_last_frame_batch_complete:
1098
+ if num_frames < self.context_length:
1099
+ raise ValueError(f"Expected {num_frames=} to be greater or equal than {self.context_length=}")
1100
+ last_frame_batch_length = num_frames - frame_indices[-1][1]
1101
+ frame_indices.append((num_frames - self.context_length, num_frames))
1102
+
1103
+ num_times_accumulated = torch.zeros((1, num_frames, 1), device=device)
1104
+ accumulated_values = torch.zeros_like(hidden_states)
1105
+
1106
+ for i, (frame_start, frame_end) in enumerate(frame_indices):
1107
+ # The reason for slicing here is to ensure that if (frame_end - frame_start) is to handle
1108
+ # cases like frame_indices=[(0, 16), (16, 20)], if the user provided a video with 19 frames, or
1109
+ # essentially a non-multiple of `context_length`.
1110
+ weights = torch.ones_like(num_times_accumulated[:, frame_start:frame_end])
1111
+ weights *= frame_weights
1112
+
1113
+ hidden_states_chunk = hidden_states[:, frame_start:frame_end]
1114
+
1115
+ # Notice that normalization is always applied before the real computation in the following blocks.
1116
+ # 1. Self-Attention
1117
+ norm_hidden_states = self.norm1(hidden_states_chunk)
1118
+
1119
+ if self.pos_embed is not None:
1120
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
1121
+
1122
+ attn_output = self.attn1(
1123
+ norm_hidden_states,
1124
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
1125
+ attention_mask=attention_mask,
1126
+ **cross_attention_kwargs,
1127
+ )
1128
+
1129
+ hidden_states_chunk = attn_output + hidden_states_chunk
1130
+ if hidden_states_chunk.ndim == 4:
1131
+ hidden_states_chunk = hidden_states_chunk.squeeze(1)
1132
+
1133
+ # 2. Cross-Attention
1134
+ if self.attn2 is not None:
1135
+ norm_hidden_states = self.norm2(hidden_states_chunk)
1136
+
1137
+ if self.pos_embed is not None and self.norm_type != "ada_norm_single":
1138
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
1139
+
1140
+ attn_output = self.attn2(
1141
+ norm_hidden_states,
1142
+ encoder_hidden_states=encoder_hidden_states,
1143
+ attention_mask=encoder_attention_mask,
1144
+ **cross_attention_kwargs,
1145
+ )
1146
+ hidden_states_chunk = attn_output + hidden_states_chunk
1147
+
1148
+ if i == len(frame_indices) - 1 and not is_last_frame_batch_complete:
1149
+ accumulated_values[:, -last_frame_batch_length:] += (
1150
+ hidden_states_chunk[:, -last_frame_batch_length:] * weights[:, -last_frame_batch_length:]
1151
+ )
1152
+ num_times_accumulated[:, -last_frame_batch_length:] += weights[:, -last_frame_batch_length]
1153
+ else:
1154
+ accumulated_values[:, frame_start:frame_end] += hidden_states_chunk * weights
1155
+ num_times_accumulated[:, frame_start:frame_end] += weights
1156
+
1157
+ # TODO(aryan): Maybe this could be done in a better way.
1158
+ #
1159
+ # Previously, this was:
1160
+ # hidden_states = torch.where(
1161
+ # num_times_accumulated > 0, accumulated_values / num_times_accumulated, accumulated_values
1162
+ # )
1163
+ #
1164
+ # The reasoning for the change here is `torch.where` became a bottleneck at some point when golfing memory
1165
+ # spikes. It is particularly noticeable when the number of frames is high. My understanding is that this comes
1166
+ # from tensors being copied - which is why we resort to spliting and concatenating here. I've not particularly
1167
+ # looked into this deeply because other memory optimizations led to more pronounced reductions.
1168
+ hidden_states = torch.cat(
1169
+ [
1170
+ torch.where(num_times_split > 0, accumulated_split / num_times_split, accumulated_split)
1171
+ for accumulated_split, num_times_split in zip(
1172
+ accumulated_values.split(self.context_length, dim=1),
1173
+ num_times_accumulated.split(self.context_length, dim=1),
1174
+ )
1175
+ ],
1176
+ dim=1,
1177
+ ).to(dtype)
1178
+
1179
+ # 3. Feed-forward
1180
+ norm_hidden_states = self.norm3(hidden_states)
1181
+
1182
+ if self._chunk_size is not None:
1183
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
1184
+ else:
1185
+ ff_output = self.ff(norm_hidden_states)
1186
+
1187
+ hidden_states = ff_output + hidden_states
1188
+ if hidden_states.ndim == 4:
1189
+ hidden_states = hidden_states.squeeze(1)
1190
+
1191
+ return hidden_states
1192
+
1193
+
1194
+ class FeedForward(nn.Module):
1195
+ r"""
1196
+ A feed-forward layer.
1197
+
1198
+ Parameters:
1199
+ dim (`int`): The number of channels in the input.
1200
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
1201
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
1202
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
1203
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
1204
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
1205
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
1206
+ """
1207
+
1208
+ def __init__(
1209
+ self,
1210
+ dim: int,
1211
+ dim_out: Optional[int] = None,
1212
+ mult: int = 4,
1213
+ dropout: float = 0.0,
1214
+ activation_fn: str = "geglu",
1215
+ final_dropout: bool = False,
1216
+ inner_dim=None,
1217
+ bias: bool = True,
1218
+ ):
1219
+ super().__init__()
1220
+ if inner_dim is None:
1221
+ inner_dim = int(dim * mult)
1222
+ dim_out = dim_out if dim_out is not None else dim
1223
+
1224
+ if activation_fn == "gelu":
1225
+ act_fn = GELU(dim, inner_dim, bias=bias)
1226
+ if activation_fn == "gelu-approximate":
1227
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
1228
+ elif activation_fn == "geglu":
1229
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
1230
+ elif activation_fn == "geglu-approximate":
1231
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
1232
+ elif activation_fn == "swiglu":
1233
+ act_fn = SwiGLU(dim, inner_dim, bias=bias)
1234
+ elif activation_fn == "linear-silu":
1235
+ act_fn = LinearActivation(dim, inner_dim, bias=bias, activation="silu")
1236
+
1237
+ self.net = nn.ModuleList([])
1238
+ # project in
1239
+ self.net.append(act_fn)
1240
+ # project dropout
1241
+ self.net.append(nn.Dropout(dropout))
1242
+ # project out
1243
+ self.net.append(nn.Linear(inner_dim, dim_out, bias=bias))
1244
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
1245
+ if final_dropout:
1246
+ self.net.append(nn.Dropout(dropout))
1247
+
1248
+ def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor:
1249
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
1250
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
1251
+ deprecate("scale", "1.0.0", deprecation_message)
1252
+ for module in self.net:
1253
+ hidden_states = module(hidden_states)
1254
+ return hidden_states
models/transformers_2d.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import Any, Dict, Optional
15
+
16
+ import torch
17
+ import torch.nn.functional as F
18
+ from torch import nn
19
+
20
+ from diffusers.configuration_utils import LegacyConfigMixin, register_to_config
21
+ from diffusers.utils import deprecate, is_torch_version, logging
22
+ from models.mm_attention import BasicTransformerBlock
23
+ from diffusers.models.embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection
24
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
25
+ from diffusers.models.modeling_utils import LegacyModelMixin
26
+ from diffusers.models.normalization import AdaLayerNormSingle
27
+
28
+
29
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
30
+
31
+
32
+ class Transformer2DModelOutput(Transformer2DModelOutput):
33
+ def __init__(self, *args, **kwargs):
34
+ deprecation_message = "Importing `Transformer2DModelOutput` from `diffusers.models.transformer_2d` is deprecated and this will be removed in a future version. Please use `from diffusers.models.modeling_outputs import Transformer2DModelOutput`, instead."
35
+ deprecate("Transformer2DModelOutput", "1.0.0", deprecation_message)
36
+ super().__init__(*args, **kwargs)
37
+
38
+
39
+ class Transformer2DModel(LegacyModelMixin, LegacyConfigMixin):
40
+ """
41
+ A 2D Transformer model for image-like data.
42
+
43
+ Parameters:
44
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
45
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
46
+ in_channels (`int`, *optional*):
47
+ The number of channels in the input and output (specify if the input is **continuous**).
48
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
49
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
50
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
51
+ sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
52
+ This is fixed during training since it is used to learn a number of position embeddings.
53
+ num_vector_embeds (`int`, *optional*):
54
+ The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
55
+ Includes the class for the masked latent pixel.
56
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
57
+ num_embeds_ada_norm ( `int`, *optional*):
58
+ The number of diffusion steps used during training. Pass if at least one of the norm_layers is
59
+ `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
60
+ added to the hidden states.
61
+
62
+ During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
63
+ attention_bias (`bool`, *optional*):
64
+ Configure if the `TransformerBlocks` attention should contain a bias parameter.
65
+ """
66
+
67
+ _supports_gradient_checkpointing = True
68
+ _no_split_modules = ["BasicTransformerBlock"]
69
+
70
+ @register_to_config
71
+ def __init__(
72
+ self,
73
+ num_attention_heads: int = 16,
74
+ attention_head_dim: int = 88,
75
+ in_channels: Optional[int] = None,
76
+ out_channels: Optional[int] = None,
77
+ num_layers: int = 1,
78
+ dropout: float = 0.0,
79
+ norm_num_groups: int = 32,
80
+ cross_attention_dim: Optional[int] = None,
81
+ attention_bias: bool = False,
82
+ sample_size: Optional[int] = None,
83
+ num_vector_embeds: Optional[int] = None,
84
+ patch_size: Optional[int] = None,
85
+ activation_fn: str = "geglu",
86
+ num_embeds_ada_norm: Optional[int] = None,
87
+ use_linear_projection: bool = False,
88
+ only_cross_attention: bool = False,
89
+ double_self_attention: bool = False,
90
+ upcast_attention: bool = False,
91
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
92
+ norm_elementwise_affine: bool = True,
93
+ norm_eps: float = 1e-5,
94
+ attention_type: str = "default",
95
+ caption_channels: int = None,
96
+ interpolation_scale: float = None,
97
+ use_additional_conditions: Optional[bool] = None,
98
+ ):
99
+ super().__init__()
100
+
101
+ # Validate inputs.
102
+ if patch_size is not None:
103
+ if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]:
104
+ raise NotImplementedError(
105
+ f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'."
106
+ )
107
+ elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None:
108
+ raise ValueError(
109
+ f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None."
110
+ )
111
+
112
+ # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
113
+ # Define whether input is continuous or discrete depending on configuration
114
+ self.is_input_continuous = (in_channels is not None) and (patch_size is None)
115
+ self.is_input_vectorized = num_vector_embeds is not None
116
+ self.is_input_patches = in_channels is not None and patch_size is not None
117
+
118
+ if self.is_input_continuous and self.is_input_vectorized:
119
+ raise ValueError(
120
+ f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
121
+ " sure that either `in_channels` or `num_vector_embeds` is None."
122
+ )
123
+ elif self.is_input_vectorized and self.is_input_patches:
124
+ raise ValueError(
125
+ f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make"
126
+ " sure that either `num_vector_embeds` or `num_patches` is None."
127
+ )
128
+ elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:
129
+ raise ValueError(
130
+ f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:"
131
+ f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None."
132
+ )
133
+
134
+ if norm_type == "layer_norm" and num_embeds_ada_norm is not None:
135
+ deprecation_message = (
136
+ f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or"
137
+ " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config."
138
+ " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect"
139
+ " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it"
140
+ " would be very nice if you could open a Pull request for the `transformer/config.json` file"
141
+ )
142
+ deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False)
143
+ norm_type = "ada_norm"
144
+
145
+ # Set some common variables used across the board.
146
+ self.use_linear_projection = use_linear_projection
147
+ self.interpolation_scale = interpolation_scale
148
+ self.caption_channels = caption_channels
149
+ self.num_attention_heads = num_attention_heads
150
+ self.attention_head_dim = attention_head_dim
151
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
152
+ self.in_channels = in_channels
153
+ self.out_channels = in_channels if out_channels is None else out_channels
154
+ self.gradient_checkpointing = False
155
+
156
+ if use_additional_conditions is None:
157
+ if norm_type == "ada_norm_single" and sample_size == 128:
158
+ use_additional_conditions = True
159
+ else:
160
+ use_additional_conditions = False
161
+ self.use_additional_conditions = use_additional_conditions
162
+
163
+ # 2. Initialize the right blocks.
164
+ # These functions follow a common structure:
165
+ # a. Initialize the input blocks. b. Initialize the transformer blocks.
166
+ # c. Initialize the output blocks and other projection blocks when necessary.
167
+ if self.is_input_continuous:
168
+ self._init_continuous_input(norm_type=norm_type)
169
+ elif self.is_input_vectorized:
170
+ self._init_vectorized_inputs(norm_type=norm_type)
171
+ elif self.is_input_patches:
172
+ self._init_patched_inputs(norm_type=norm_type)
173
+
174
+ def _init_continuous_input(self, norm_type):
175
+ self.norm = torch.nn.GroupNorm(
176
+ num_groups=self.config.norm_num_groups, num_channels=self.in_channels, eps=1e-6, affine=True
177
+ )
178
+ if self.use_linear_projection:
179
+ self.proj_in = torch.nn.Linear(self.in_channels, self.inner_dim)
180
+ else:
181
+ self.proj_in = torch.nn.Conv2d(self.in_channels, self.inner_dim, kernel_size=1, stride=1, padding=0)
182
+
183
+ self.transformer_blocks = nn.ModuleList(
184
+ [
185
+ BasicTransformerBlock(
186
+ self.inner_dim,
187
+ self.config.num_attention_heads,
188
+ self.config.attention_head_dim,
189
+ dropout=self.config.dropout,
190
+ cross_attention_dim=self.config.cross_attention_dim,
191
+ activation_fn=self.config.activation_fn,
192
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
193
+ attention_bias=self.config.attention_bias,
194
+ only_cross_attention=self.config.only_cross_attention,
195
+ double_self_attention=self.config.double_self_attention,
196
+ upcast_attention=self.config.upcast_attention,
197
+ norm_type=norm_type,
198
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
199
+ norm_eps=self.config.norm_eps,
200
+ attention_type=self.config.attention_type,
201
+ )
202
+ for _ in range(self.config.num_layers)
203
+ ]
204
+ )
205
+
206
+ if self.use_linear_projection:
207
+ self.proj_out = torch.nn.Linear(self.inner_dim, self.out_channels)
208
+ else:
209
+ self.proj_out = torch.nn.Conv2d(self.inner_dim, self.out_channels, kernel_size=1, stride=1, padding=0)
210
+
211
+ def _init_vectorized_inputs(self, norm_type):
212
+ assert self.config.sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
213
+ assert (
214
+ self.config.num_vector_embeds is not None
215
+ ), "Transformer2DModel over discrete input must provide num_embed"
216
+
217
+ self.height = self.config.sample_size
218
+ self.width = self.config.sample_size
219
+ self.num_latent_pixels = self.height * self.width
220
+
221
+ self.latent_image_embedding = ImagePositionalEmbeddings(
222
+ num_embed=self.config.num_vector_embeds, embed_dim=self.inner_dim, height=self.height, width=self.width
223
+ )
224
+
225
+ self.transformer_blocks = nn.ModuleList(
226
+ [
227
+ BasicTransformerBlock(
228
+ self.inner_dim,
229
+ self.config.num_attention_heads,
230
+ self.config.attention_head_dim,
231
+ dropout=self.config.dropout,
232
+ cross_attention_dim=self.config.cross_attention_dim,
233
+ activation_fn=self.config.activation_fn,
234
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
235
+ attention_bias=self.config.attention_bias,
236
+ only_cross_attention=self.config.only_cross_attention,
237
+ double_self_attention=self.config.double_self_attention,
238
+ upcast_attention=self.config.upcast_attention,
239
+ norm_type=norm_type,
240
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
241
+ norm_eps=self.config.norm_eps,
242
+ attention_type=self.config.attention_type,
243
+ )
244
+ for _ in range(self.config.num_layers)
245
+ ]
246
+ )
247
+
248
+ self.norm_out = nn.LayerNorm(self.inner_dim)
249
+ self.out = nn.Linear(self.inner_dim, self.config.num_vector_embeds - 1)
250
+
251
+ def _init_patched_inputs(self, norm_type):
252
+ assert self.config.sample_size is not None, "Transformer2DModel over patched input must provide sample_size"
253
+
254
+ self.height = self.config.sample_size
255
+ self.width = self.config.sample_size
256
+
257
+ self.patch_size = self.config.patch_size
258
+ interpolation_scale = (
259
+ self.config.interpolation_scale
260
+ if self.config.interpolation_scale is not None
261
+ else max(self.config.sample_size // 64, 1)
262
+ )
263
+ self.pos_embed = PatchEmbed(
264
+ height=self.config.sample_size,
265
+ width=self.config.sample_size,
266
+ patch_size=self.config.patch_size,
267
+ in_channels=self.in_channels,
268
+ embed_dim=self.inner_dim,
269
+ interpolation_scale=interpolation_scale,
270
+ )
271
+
272
+ self.transformer_blocks = nn.ModuleList(
273
+ [
274
+ BasicTransformerBlock(
275
+ self.inner_dim,
276
+ self.config.num_attention_heads,
277
+ self.config.attention_head_dim,
278
+ dropout=self.config.dropout,
279
+ cross_attention_dim=self.config.cross_attention_dim,
280
+ activation_fn=self.config.activation_fn,
281
+ num_embeds_ada_norm=self.config.num_embeds_ada_norm,
282
+ attention_bias=self.config.attention_bias,
283
+ only_cross_attention=self.config.only_cross_attention,
284
+ double_self_attention=self.config.double_self_attention,
285
+ upcast_attention=self.config.upcast_attention,
286
+ norm_type=norm_type,
287
+ norm_elementwise_affine=self.config.norm_elementwise_affine,
288
+ norm_eps=self.config.norm_eps,
289
+ attention_type=self.config.attention_type,
290
+ )
291
+ for _ in range(self.config.num_layers)
292
+ ]
293
+ )
294
+
295
+ if self.config.norm_type != "ada_norm_single":
296
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
297
+ self.proj_out_1 = nn.Linear(self.inner_dim, 2 * self.inner_dim)
298
+ self.proj_out_2 = nn.Linear(
299
+ self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels
300
+ )
301
+ elif self.config.norm_type == "ada_norm_single":
302
+ self.norm_out = nn.LayerNorm(self.inner_dim, elementwise_affine=False, eps=1e-6)
303
+ self.scale_shift_table = nn.Parameter(torch.randn(2, self.inner_dim) / self.inner_dim**0.5)
304
+ self.proj_out = nn.Linear(
305
+ self.inner_dim, self.config.patch_size * self.config.patch_size * self.out_channels
306
+ )
307
+
308
+ # PixArt-Alpha blocks.
309
+ self.adaln_single = None
310
+ if self.config.norm_type == "ada_norm_single":
311
+ # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use
312
+ # additional conditions until we find better name
313
+ self.adaln_single = AdaLayerNormSingle(
314
+ self.inner_dim, use_additional_conditions=self.use_additional_conditions
315
+ )
316
+
317
+ self.caption_projection = None
318
+ if self.caption_channels is not None:
319
+ self.caption_projection = PixArtAlphaTextProjection(
320
+ in_features=self.caption_channels, hidden_size=self.inner_dim
321
+ )
322
+
323
+ def _set_gradient_checkpointing(self, module, value=False):
324
+ if hasattr(module, "gradient_checkpointing"):
325
+ module.gradient_checkpointing = value
326
+
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ encoder_hidden_states: Optional[torch.Tensor] = None,
331
+ encoder_lora_states: Optional[torch.Tensor] = None,
332
+ timestep: Optional[torch.LongTensor] = None,
333
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
334
+ class_labels: Optional[torch.LongTensor] = None,
335
+ cross_attention_kwargs: Dict[str, Any] = None,
336
+ attention_mask: Optional[torch.Tensor] = None,
337
+ encoder_attention_mask: Optional[torch.Tensor] = None,
338
+ return_dict: bool = True,
339
+ ):
340
+ """
341
+ The [`Transformer2DModel`] forward method.
342
+
343
+ Args:
344
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous):
345
+ Input `hidden_states`.
346
+ encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
347
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
348
+ self-attention.
349
+ timestep ( `torch.LongTensor`, *optional*):
350
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
351
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
352
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
353
+ `AdaLayerZeroNorm`.
354
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
355
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
356
+ `self.processor` in
357
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
358
+ attention_mask ( `torch.Tensor`, *optional*):
359
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
360
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
361
+ negative values to the attention scores corresponding to "discard" tokens.
362
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
363
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
364
+
365
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
366
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
367
+
368
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
369
+ above. This bias will be added to the cross-attention scores.
370
+ return_dict (`bool`, *optional*, defaults to `True`):
371
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
372
+ tuple.
373
+
374
+ Returns:
375
+ If `return_dict` is True, an [`~models.transformers.transformer_2d.Transformer2DModelOutput`] is returned,
376
+ otherwise a `tuple` where the first element is the sample tensor.
377
+ """
378
+ if cross_attention_kwargs is not None:
379
+ if cross_attention_kwargs.get("scale", None) is not None:
380
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
381
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
382
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
383
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
384
+ # expects mask of shape:
385
+ # [batch, key_tokens]
386
+ # adds singleton query_tokens dimension:
387
+ # [batch, 1, key_tokens]
388
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
389
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
390
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
391
+ if attention_mask is not None and attention_mask.ndim == 2:
392
+ # assume that mask is expressed as:
393
+ # (1 = keep, 0 = discard)
394
+ # convert mask into a bias that can be added to attention scores:
395
+ # (keep = +0, discard = -10000.0)
396
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
397
+ attention_mask = attention_mask.unsqueeze(1)
398
+
399
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
400
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
401
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
402
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
403
+
404
+ # 1. Input
405
+ if self.is_input_continuous:
406
+ batch_size, _, height, width = hidden_states.shape
407
+ residual = hidden_states
408
+ hidden_states, inner_dim = self._operate_on_continuous_inputs(hidden_states)
409
+ elif self.is_input_vectorized:
410
+ hidden_states = self.latent_image_embedding(hidden_states)
411
+ elif self.is_input_patches:
412
+ height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
413
+ hidden_states, encoder_hidden_states, timestep, embedded_timestep = self._operate_on_patched_inputs(
414
+ hidden_states, encoder_hidden_states, timestep, added_cond_kwargs
415
+ )
416
+
417
+ # 2. Blocks
418
+ for block in self.transformer_blocks:
419
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
420
+
421
+ def create_custom_forward(module, return_dict=None):
422
+ def custom_forward(*inputs):
423
+ if return_dict is not None:
424
+ return module(*inputs, return_dict=return_dict)
425
+ else:
426
+ return module(*inputs)
427
+
428
+ return custom_forward
429
+
430
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
431
+ hidden_states = torch.utils.checkpoint.checkpoint(
432
+ create_custom_forward(block),
433
+ hidden_states,
434
+ attention_mask,
435
+ encoder_hidden_states,
436
+ encoder_lora_states,
437
+ encoder_attention_mask,
438
+ timestep,
439
+ cross_attention_kwargs,
440
+ class_labels,
441
+ **ckpt_kwargs,
442
+ )
443
+ else:
444
+ hidden_states = block(
445
+ hidden_states,
446
+ attention_mask=attention_mask,
447
+ encoder_hidden_states=encoder_hidden_states,
448
+ encoder_lora_states=encoder_lora_states,
449
+ encoder_attention_mask=encoder_attention_mask,
450
+ timestep=timestep,
451
+ cross_attention_kwargs=cross_attention_kwargs,
452
+ class_labels=class_labels,
453
+ )
454
+
455
+ # 3. Output
456
+ if self.is_input_continuous:
457
+ output = self._get_output_for_continuous_inputs(
458
+ hidden_states=hidden_states,
459
+ residual=residual,
460
+ batch_size=batch_size,
461
+ height=height,
462
+ width=width,
463
+ inner_dim=inner_dim,
464
+ )
465
+ elif self.is_input_vectorized:
466
+ output = self._get_output_for_vectorized_inputs(hidden_states)
467
+ elif self.is_input_patches:
468
+ output = self._get_output_for_patched_inputs(
469
+ hidden_states=hidden_states,
470
+ timestep=timestep,
471
+ class_labels=class_labels,
472
+ embedded_timestep=embedded_timestep,
473
+ height=height,
474
+ width=width,
475
+ )
476
+
477
+ if not return_dict:
478
+ return (output,)
479
+
480
+ return Transformer2DModelOutput(sample=output)
481
+
482
+ def _operate_on_continuous_inputs(self, hidden_states):
483
+ batch, _, height, width = hidden_states.shape
484
+ hidden_states = self.norm(hidden_states)
485
+
486
+ if not self.use_linear_projection:
487
+ hidden_states = self.proj_in(hidden_states)
488
+ inner_dim = hidden_states.shape[1]
489
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
490
+ else:
491
+ inner_dim = hidden_states.shape[1]
492
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
493
+ hidden_states = self.proj_in(hidden_states)
494
+
495
+ return hidden_states, inner_dim
496
+
497
+ def _operate_on_patched_inputs(self, hidden_states, encoder_hidden_states, timestep, added_cond_kwargs):
498
+ batch_size = hidden_states.shape[0]
499
+ hidden_states = self.pos_embed(hidden_states)
500
+ embedded_timestep = None
501
+
502
+ if self.adaln_single is not None:
503
+ if self.use_additional_conditions and added_cond_kwargs is None:
504
+ raise ValueError(
505
+ "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`."
506
+ )
507
+ timestep, embedded_timestep = self.adaln_single(
508
+ timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
509
+ )
510
+
511
+ if self.caption_projection is not None:
512
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
513
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
514
+
515
+ return hidden_states, encoder_hidden_states, timestep, embedded_timestep
516
+
517
+ def _get_output_for_continuous_inputs(self, hidden_states, residual, batch_size, height, width, inner_dim):
518
+ if not self.use_linear_projection:
519
+ hidden_states = (
520
+ hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
521
+ )
522
+ hidden_states = self.proj_out(hidden_states)
523
+ else:
524
+ hidden_states = self.proj_out(hidden_states)
525
+ hidden_states = (
526
+ hidden_states.reshape(batch_size, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
527
+ )
528
+
529
+ output = hidden_states + residual
530
+ return output
531
+
532
+ def _get_output_for_vectorized_inputs(self, hidden_states):
533
+ hidden_states = self.norm_out(hidden_states)
534
+ logits = self.out(hidden_states)
535
+ # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
536
+ logits = logits.permute(0, 2, 1)
537
+ # log(p(x_0))
538
+ output = F.log_softmax(logits.double(), dim=1).float()
539
+ return output
540
+
541
+ def _get_output_for_patched_inputs(
542
+ self, hidden_states, timestep, class_labels, embedded_timestep, height=None, width=None
543
+ ):
544
+ if self.config.norm_type != "ada_norm_single":
545
+ conditioning = self.transformer_blocks[0].norm1.emb(
546
+ timestep, class_labels, hidden_dtype=hidden_states.dtype
547
+ )
548
+ shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
549
+ hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
550
+ hidden_states = self.proj_out_2(hidden_states)
551
+ elif self.config.norm_type == "ada_norm_single":
552
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
553
+ hidden_states = self.norm_out(hidden_states)
554
+ # Modulation
555
+ hidden_states = hidden_states * (1 + scale) + shift
556
+ hidden_states = self.proj_out(hidden_states)
557
+ hidden_states = hidden_states.squeeze(1)
558
+
559
+ # unpatchify
560
+ if self.adaln_single is None:
561
+ height = width = int(hidden_states.shape[1] ** 0.5)
562
+ hidden_states = hidden_states.reshape(
563
+ shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
564
+ )
565
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
566
+ output = hidden_states.reshape(
567
+ shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
568
+ )
569
+ return output
models/unet_2d_blocks.py ADDED
The diff for this file is too large to render. See raw diff
 
models/unet_2d_condition.py ADDED
@@ -0,0 +1,1316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from dataclasses import dataclass
15
+ from typing import Any, Dict, List, Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.utils.checkpoint
20
+
21
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
22
+ from diffusers.loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
23
+ from diffusers.loaders.single_file_model import FromOriginalModelMixin
24
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
25
+ from diffusers.models.activations import get_activation
26
+ from diffusers.models.attention_processor import (
27
+ ADDED_KV_ATTENTION_PROCESSORS,
28
+ CROSS_ATTENTION_PROCESSORS,
29
+ Attention,
30
+ AttentionProcessor,
31
+ AttnAddedKVProcessor,
32
+ AttnProcessor,
33
+ FusedAttnProcessor2_0,
34
+ )
35
+ from diffusers.models.embeddings import (
36
+ GaussianFourierProjection,
37
+ GLIGENTextBoundingboxProjection,
38
+ ImageHintTimeEmbedding,
39
+ ImageProjection,
40
+ ImageTimeEmbedding,
41
+ TextImageProjection,
42
+ TextImageTimeEmbedding,
43
+ TextTimeEmbedding,
44
+ TimestepEmbedding,
45
+ Timesteps,
46
+ )
47
+ from diffusers.models.modeling_utils import ModelMixin
48
+ from models.unet_2d_blocks import (
49
+ get_down_block,
50
+ get_mid_block,
51
+ get_up_block,
52
+ )
53
+
54
+
55
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
56
+
57
+
58
+ @dataclass
59
+ class UNet2DConditionOutput(BaseOutput):
60
+ """
61
+ The output of [`UNet2DConditionModel`].
62
+
63
+ Args:
64
+ sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
65
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
66
+ """
67
+
68
+ sample: torch.Tensor = None
69
+
70
+
71
+ class UNet2DLoRAConditionModel(
72
+ ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin
73
+ ):
74
+ r"""
75
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
76
+ shaped output.
77
+
78
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
79
+ for all models (such as downloading or saving).
80
+
81
+ Parameters:
82
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
83
+ Height and width of input/output sample.
84
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
85
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
86
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
87
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
88
+ Whether to flip the sin to cos in the time embedding.
89
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
90
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
91
+ The tuple of downsample blocks to use.
92
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
93
+ Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
94
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
95
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
96
+ The tuple of upsample blocks to use.
97
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
98
+ Whether to include self-attention in the basic transformer blocks, see
99
+ [`~models.attention.BasicTransformerBlock`].
100
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
101
+ The tuple of output channels for each block.
102
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
103
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
104
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
105
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
106
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
107
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
108
+ If `None`, normalization and activation layers is skipped in post-processing.
109
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
110
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
111
+ The dimension of the cross attention features.
112
+ transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
113
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
114
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
115
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
116
+ reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
117
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
118
+ blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
119
+ [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
120
+ [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
121
+ encoder_hid_dim (`int`, *optional*, defaults to None):
122
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
123
+ dimension to `cross_attention_dim`.
124
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
125
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
126
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
127
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
128
+ num_attention_heads (`int`, *optional*):
129
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
130
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
131
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
132
+ class_embed_type (`str`, *optional*, defaults to `None`):
133
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
134
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
135
+ addition_embed_type (`str`, *optional*, defaults to `None`):
136
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
137
+ "text". "text" will use the `TextTimeEmbedding` layer.
138
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
139
+ Dimension for the timestep embeddings.
140
+ num_class_embeds (`int`, *optional*, defaults to `None`):
141
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
142
+ class conditioning with `class_embed_type` equal to `None`.
143
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
144
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
145
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
146
+ An optional override for the dimension of the projected time embedding.
147
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
148
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
149
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
150
+ timestep_post_act (`str`, *optional*, defaults to `None`):
151
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
152
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
153
+ The dimension of `cond_proj` layer in the timestep embedding.
154
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
155
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
156
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
157
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
158
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
159
+ embeddings with the class embeddings.
160
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
161
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
162
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
163
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
164
+ otherwise.
165
+ """
166
+
167
+ _supports_gradient_checkpointing = True
168
+ _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
169
+
170
+ @register_to_config
171
+ def __init__(
172
+ self,
173
+ sample_size: Optional[Union[int, Tuple[int, int]]] = None,
174
+ in_channels: int = 4,
175
+ out_channels: int = 4,
176
+ center_input_sample: bool = False,
177
+ flip_sin_to_cos: bool = True,
178
+ freq_shift: int = 0,
179
+ down_block_types: Tuple[str] = (
180
+ "CrossAttnDownBlock2D",
181
+ "CrossAttnDownBlock2D",
182
+ "CrossAttnDownBlock2D",
183
+ "DownBlock2D",
184
+ ),
185
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
186
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
187
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
188
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
189
+ layers_per_block: Union[int, Tuple[int]] = 2,
190
+ downsample_padding: int = 1,
191
+ mid_block_scale_factor: float = 1,
192
+ dropout: float = 0.0,
193
+ act_fn: str = "silu",
194
+ norm_num_groups: Optional[int] = 32,
195
+ norm_eps: float = 1e-5,
196
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
197
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
198
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
199
+ encoder_hid_dim: Optional[int] = None,
200
+ encoder_hid_dim_type: Optional[str] = None,
201
+ attention_head_dim: Union[int, Tuple[int]] = 8,
202
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
203
+ dual_cross_attention: bool = False,
204
+ use_linear_projection: bool = False,
205
+ class_embed_type: Optional[str] = None,
206
+ addition_embed_type: Optional[str] = None,
207
+ addition_time_embed_dim: Optional[int] = None,
208
+ num_class_embeds: Optional[int] = None,
209
+ upcast_attention: bool = False,
210
+ resnet_time_scale_shift: str = "default",
211
+ resnet_skip_time_act: bool = False,
212
+ resnet_out_scale_factor: float = 1.0,
213
+ time_embedding_type: str = "positional",
214
+ time_embedding_dim: Optional[int] = None,
215
+ time_embedding_act_fn: Optional[str] = None,
216
+ timestep_post_act: Optional[str] = None,
217
+ time_cond_proj_dim: Optional[int] = None,
218
+ conv_in_kernel: int = 3,
219
+ conv_out_kernel: int = 3,
220
+ projection_class_embeddings_input_dim: Optional[int] = None,
221
+ attention_type: str = "default",
222
+ class_embeddings_concat: bool = False,
223
+ mid_block_only_cross_attention: Optional[bool] = None,
224
+ cross_attention_norm: Optional[str] = None,
225
+ addition_embed_type_num_heads: int = 64,
226
+ ):
227
+ super().__init__()
228
+
229
+ self.sample_size = sample_size
230
+
231
+ if num_attention_heads is not None:
232
+ raise ValueError(
233
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
234
+ )
235
+
236
+ # If `num_attention_heads` is not defined (which is the case for most models)
237
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
238
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
239
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
240
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
241
+ # which is why we correct for the naming here.
242
+ num_attention_heads = num_attention_heads or attention_head_dim
243
+
244
+ # Check inputs
245
+ self._check_config(
246
+ down_block_types=down_block_types,
247
+ up_block_types=up_block_types,
248
+ only_cross_attention=only_cross_attention,
249
+ block_out_channels=block_out_channels,
250
+ layers_per_block=layers_per_block,
251
+ cross_attention_dim=cross_attention_dim,
252
+ transformer_layers_per_block=transformer_layers_per_block,
253
+ reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
254
+ attention_head_dim=attention_head_dim,
255
+ num_attention_heads=num_attention_heads,
256
+ )
257
+
258
+ # input
259
+ conv_in_padding = (conv_in_kernel - 1) // 2
260
+ self.conv_in = nn.Conv2d(
261
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
262
+ )
263
+
264
+ # time
265
+ time_embed_dim, timestep_input_dim = self._set_time_proj(
266
+ time_embedding_type,
267
+ block_out_channels=block_out_channels,
268
+ flip_sin_to_cos=flip_sin_to_cos,
269
+ freq_shift=freq_shift,
270
+ time_embedding_dim=time_embedding_dim,
271
+ )
272
+
273
+ self.time_embedding = TimestepEmbedding(
274
+ timestep_input_dim,
275
+ time_embed_dim,
276
+ act_fn=act_fn,
277
+ post_act_fn=timestep_post_act,
278
+ cond_proj_dim=time_cond_proj_dim,
279
+ )
280
+
281
+ self._set_encoder_hid_proj(
282
+ encoder_hid_dim_type,
283
+ cross_attention_dim=cross_attention_dim,
284
+ encoder_hid_dim=encoder_hid_dim,
285
+ )
286
+
287
+ # class embedding
288
+ self._set_class_embedding(
289
+ class_embed_type,
290
+ act_fn=act_fn,
291
+ num_class_embeds=num_class_embeds,
292
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
293
+ time_embed_dim=time_embed_dim,
294
+ timestep_input_dim=timestep_input_dim,
295
+ )
296
+
297
+ self._set_add_embedding(
298
+ addition_embed_type,
299
+ addition_embed_type_num_heads=addition_embed_type_num_heads,
300
+ addition_time_embed_dim=addition_time_embed_dim,
301
+ cross_attention_dim=cross_attention_dim,
302
+ encoder_hid_dim=encoder_hid_dim,
303
+ flip_sin_to_cos=flip_sin_to_cos,
304
+ freq_shift=freq_shift,
305
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
306
+ time_embed_dim=time_embed_dim,
307
+ )
308
+
309
+ if time_embedding_act_fn is None:
310
+ self.time_embed_act = None
311
+ else:
312
+ self.time_embed_act = get_activation(time_embedding_act_fn)
313
+
314
+ self.down_blocks = nn.ModuleList([])
315
+ self.up_blocks = nn.ModuleList([])
316
+
317
+ if isinstance(only_cross_attention, bool):
318
+ if mid_block_only_cross_attention is None:
319
+ mid_block_only_cross_attention = only_cross_attention
320
+
321
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
322
+
323
+ if mid_block_only_cross_attention is None:
324
+ mid_block_only_cross_attention = False
325
+
326
+ if isinstance(num_attention_heads, int):
327
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
328
+
329
+ if isinstance(attention_head_dim, int):
330
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
331
+
332
+ if isinstance(cross_attention_dim, int):
333
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
334
+
335
+ if isinstance(layers_per_block, int):
336
+ layers_per_block = [layers_per_block] * len(down_block_types)
337
+
338
+ if isinstance(transformer_layers_per_block, int):
339
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
340
+
341
+ if class_embeddings_concat:
342
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
343
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
344
+ # regular time embeddings
345
+ blocks_time_embed_dim = time_embed_dim * 2
346
+ else:
347
+ blocks_time_embed_dim = time_embed_dim
348
+
349
+ # down
350
+ output_channel = block_out_channels[0]
351
+ for i, down_block_type in enumerate(down_block_types):
352
+ input_channel = output_channel
353
+ output_channel = block_out_channels[i]
354
+ is_final_block = i == len(block_out_channels) - 1
355
+
356
+ down_block = get_down_block(
357
+ down_block_type,
358
+ num_layers=layers_per_block[i],
359
+ transformer_layers_per_block=transformer_layers_per_block[i],
360
+ in_channels=input_channel,
361
+ out_channels=output_channel,
362
+ temb_channels=blocks_time_embed_dim,
363
+ add_downsample=not is_final_block,
364
+ resnet_eps=norm_eps,
365
+ resnet_act_fn=act_fn,
366
+ resnet_groups=norm_num_groups,
367
+ cross_attention_dim=cross_attention_dim[i],
368
+ num_attention_heads=num_attention_heads[i],
369
+ downsample_padding=downsample_padding,
370
+ dual_cross_attention=dual_cross_attention,
371
+ use_linear_projection=use_linear_projection,
372
+ only_cross_attention=only_cross_attention[i],
373
+ upcast_attention=upcast_attention,
374
+ resnet_time_scale_shift=resnet_time_scale_shift,
375
+ attention_type=attention_type,
376
+ resnet_skip_time_act=resnet_skip_time_act,
377
+ resnet_out_scale_factor=resnet_out_scale_factor,
378
+ cross_attention_norm=cross_attention_norm,
379
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
380
+ dropout=dropout,
381
+ )
382
+ self.down_blocks.append(down_block)
383
+
384
+ # mid
385
+ self.mid_block = get_mid_block(
386
+ mid_block_type,
387
+ temb_channels=blocks_time_embed_dim,
388
+ in_channels=block_out_channels[-1],
389
+ resnet_eps=norm_eps,
390
+ resnet_act_fn=act_fn,
391
+ resnet_groups=norm_num_groups,
392
+ output_scale_factor=mid_block_scale_factor,
393
+ transformer_layers_per_block=transformer_layers_per_block[-1],
394
+ num_attention_heads=num_attention_heads[-1],
395
+ cross_attention_dim=cross_attention_dim[-1],
396
+ dual_cross_attention=dual_cross_attention,
397
+ use_linear_projection=use_linear_projection,
398
+ mid_block_only_cross_attention=mid_block_only_cross_attention,
399
+ upcast_attention=upcast_attention,
400
+ resnet_time_scale_shift=resnet_time_scale_shift,
401
+ attention_type=attention_type,
402
+ resnet_skip_time_act=resnet_skip_time_act,
403
+ cross_attention_norm=cross_attention_norm,
404
+ attention_head_dim=attention_head_dim[-1],
405
+ dropout=dropout,
406
+ )
407
+
408
+ # count how many layers upsample the images
409
+ self.num_upsamplers = 0
410
+
411
+ # up
412
+ reversed_block_out_channels = list(reversed(block_out_channels))
413
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
414
+ reversed_layers_per_block = list(reversed(layers_per_block))
415
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
416
+ reversed_transformer_layers_per_block = (
417
+ list(reversed(transformer_layers_per_block))
418
+ if reverse_transformer_layers_per_block is None
419
+ else reverse_transformer_layers_per_block
420
+ )
421
+ only_cross_attention = list(reversed(only_cross_attention))
422
+
423
+ output_channel = reversed_block_out_channels[0]
424
+ for i, up_block_type in enumerate(up_block_types):
425
+ is_final_block = i == len(block_out_channels) - 1
426
+
427
+ prev_output_channel = output_channel
428
+ output_channel = reversed_block_out_channels[i]
429
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
430
+
431
+ # add upsample block for all BUT final layer
432
+ if not is_final_block:
433
+ add_upsample = True
434
+ self.num_upsamplers += 1
435
+ else:
436
+ add_upsample = False
437
+
438
+ up_block = get_up_block(
439
+ up_block_type,
440
+ num_layers=reversed_layers_per_block[i] + 1,
441
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
442
+ in_channels=input_channel,
443
+ out_channels=output_channel,
444
+ prev_output_channel=prev_output_channel,
445
+ temb_channels=blocks_time_embed_dim,
446
+ add_upsample=add_upsample,
447
+ resnet_eps=norm_eps,
448
+ resnet_act_fn=act_fn,
449
+ resolution_idx=i,
450
+ resnet_groups=norm_num_groups,
451
+ cross_attention_dim=reversed_cross_attention_dim[i],
452
+ num_attention_heads=reversed_num_attention_heads[i],
453
+ dual_cross_attention=dual_cross_attention,
454
+ use_linear_projection=use_linear_projection,
455
+ only_cross_attention=only_cross_attention[i],
456
+ upcast_attention=upcast_attention,
457
+ resnet_time_scale_shift=resnet_time_scale_shift,
458
+ attention_type=attention_type,
459
+ resnet_skip_time_act=resnet_skip_time_act,
460
+ resnet_out_scale_factor=resnet_out_scale_factor,
461
+ cross_attention_norm=cross_attention_norm,
462
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
463
+ dropout=dropout,
464
+ )
465
+ self.up_blocks.append(up_block)
466
+
467
+ # out
468
+ if norm_num_groups is not None:
469
+ self.conv_norm_out = nn.GroupNorm(
470
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
471
+ )
472
+
473
+ self.conv_act = get_activation(act_fn)
474
+
475
+ else:
476
+ self.conv_norm_out = None
477
+ self.conv_act = None
478
+
479
+ conv_out_padding = (conv_out_kernel - 1) // 2
480
+ self.conv_out = nn.Conv2d(
481
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
482
+ )
483
+
484
+ self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
485
+
486
+ def _check_config(
487
+ self,
488
+ down_block_types: Tuple[str],
489
+ up_block_types: Tuple[str],
490
+ only_cross_attention: Union[bool, Tuple[bool]],
491
+ block_out_channels: Tuple[int],
492
+ layers_per_block: Union[int, Tuple[int]],
493
+ cross_attention_dim: Union[int, Tuple[int]],
494
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
495
+ reverse_transformer_layers_per_block: bool,
496
+ attention_head_dim: int,
497
+ num_attention_heads: Optional[Union[int, Tuple[int]]],
498
+ ):
499
+ if len(down_block_types) != len(up_block_types):
500
+ raise ValueError(
501
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
502
+ )
503
+
504
+ if len(block_out_channels) != len(down_block_types):
505
+ raise ValueError(
506
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
507
+ )
508
+
509
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
510
+ raise ValueError(
511
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
512
+ )
513
+
514
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
515
+ raise ValueError(
516
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
517
+ )
518
+
519
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
520
+ raise ValueError(
521
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
522
+ )
523
+
524
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
525
+ raise ValueError(
526
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
527
+ )
528
+
529
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
530
+ raise ValueError(
531
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
532
+ )
533
+ if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
534
+ for layer_number_per_block in transformer_layers_per_block:
535
+ if isinstance(layer_number_per_block, list):
536
+ raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
537
+
538
+ def _set_time_proj(
539
+ self,
540
+ time_embedding_type: str,
541
+ block_out_channels: int,
542
+ flip_sin_to_cos: bool,
543
+ freq_shift: float,
544
+ time_embedding_dim: int,
545
+ ) -> Tuple[int, int]:
546
+ if time_embedding_type == "fourier":
547
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
548
+ if time_embed_dim % 2 != 0:
549
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
550
+ self.time_proj = GaussianFourierProjection(
551
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
552
+ )
553
+ timestep_input_dim = time_embed_dim
554
+ elif time_embedding_type == "positional":
555
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
556
+
557
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
558
+ timestep_input_dim = block_out_channels[0]
559
+ else:
560
+ raise ValueError(
561
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
562
+ )
563
+
564
+ return time_embed_dim, timestep_input_dim
565
+
566
+ def _set_encoder_hid_proj(
567
+ self,
568
+ encoder_hid_dim_type: Optional[str],
569
+ cross_attention_dim: Union[int, Tuple[int]],
570
+ encoder_hid_dim: Optional[int],
571
+ ):
572
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
573
+ encoder_hid_dim_type = "text_proj"
574
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
575
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
576
+
577
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
578
+ raise ValueError(
579
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
580
+ )
581
+
582
+ if encoder_hid_dim_type == "text_proj":
583
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
584
+ elif encoder_hid_dim_type == "text_image_proj":
585
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
586
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
587
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
588
+ self.encoder_hid_proj = TextImageProjection(
589
+ text_embed_dim=encoder_hid_dim,
590
+ image_embed_dim=cross_attention_dim,
591
+ cross_attention_dim=cross_attention_dim,
592
+ )
593
+ elif encoder_hid_dim_type == "image_proj":
594
+ # Kandinsky 2.2
595
+ self.encoder_hid_proj = ImageProjection(
596
+ image_embed_dim=encoder_hid_dim,
597
+ cross_attention_dim=cross_attention_dim,
598
+ )
599
+ elif encoder_hid_dim_type is not None:
600
+ raise ValueError(
601
+ f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj', or 'image_proj'."
602
+ )
603
+ else:
604
+ self.encoder_hid_proj = None
605
+
606
+ def _set_class_embedding(
607
+ self,
608
+ class_embed_type: Optional[str],
609
+ act_fn: str,
610
+ num_class_embeds: Optional[int],
611
+ projection_class_embeddings_input_dim: Optional[int],
612
+ time_embed_dim: int,
613
+ timestep_input_dim: int,
614
+ ):
615
+ if class_embed_type is None and num_class_embeds is not None:
616
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
617
+ elif class_embed_type == "timestep":
618
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
619
+ elif class_embed_type == "identity":
620
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
621
+ elif class_embed_type == "projection":
622
+ if projection_class_embeddings_input_dim is None:
623
+ raise ValueError(
624
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
625
+ )
626
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
627
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
628
+ # 2. it projects from an arbitrary input dimension.
629
+ #
630
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
631
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
632
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
633
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
634
+ elif class_embed_type == "simple_projection":
635
+ if projection_class_embeddings_input_dim is None:
636
+ raise ValueError(
637
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
638
+ )
639
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
640
+ else:
641
+ self.class_embedding = None
642
+
643
+ def _set_add_embedding(
644
+ self,
645
+ addition_embed_type: str,
646
+ addition_embed_type_num_heads: int,
647
+ addition_time_embed_dim: Optional[int],
648
+ flip_sin_to_cos: bool,
649
+ freq_shift: float,
650
+ cross_attention_dim: Optional[int],
651
+ encoder_hid_dim: Optional[int],
652
+ projection_class_embeddings_input_dim: Optional[int],
653
+ time_embed_dim: int,
654
+ ):
655
+ if addition_embed_type == "text":
656
+ if encoder_hid_dim is not None:
657
+ text_time_embedding_from_dim = encoder_hid_dim
658
+ else:
659
+ text_time_embedding_from_dim = cross_attention_dim
660
+
661
+ self.add_embedding = TextTimeEmbedding(
662
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
663
+ )
664
+ elif addition_embed_type == "text_image":
665
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
666
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
667
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
668
+ self.add_embedding = TextImageTimeEmbedding(
669
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
670
+ )
671
+ elif addition_embed_type == "text_time":
672
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
673
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
674
+ elif addition_embed_type == "image":
675
+ # Kandinsky 2.2
676
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
677
+ elif addition_embed_type == "image_hint":
678
+ # Kandinsky 2.2 ControlNet
679
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
680
+ elif addition_embed_type is not None:
681
+ raise ValueError(
682
+ f"`addition_embed_type`: {addition_embed_type} must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'."
683
+ )
684
+
685
+ def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
686
+ if attention_type in ["gated", "gated-text-image"]:
687
+ positive_len = 768
688
+ if isinstance(cross_attention_dim, int):
689
+ positive_len = cross_attention_dim
690
+ elif isinstance(cross_attention_dim, (list, tuple)):
691
+ positive_len = cross_attention_dim[0]
692
+
693
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
694
+ self.position_net = GLIGENTextBoundingboxProjection(
695
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
696
+ )
697
+
698
+ @property
699
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
700
+ r"""
701
+ Returns:
702
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
703
+ indexed by its weight name.
704
+ """
705
+ # set recursively
706
+ processors = {}
707
+
708
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
709
+ if hasattr(module, "get_processor"):
710
+ processors[f"{name}.processor"] = module.get_processor()
711
+
712
+ for sub_name, child in module.named_children():
713
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
714
+
715
+ return processors
716
+
717
+ for name, module in self.named_children():
718
+ fn_recursive_add_processors(name, module, processors)
719
+
720
+ return processors
721
+
722
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
723
+ r"""
724
+ Sets the attention processor to use to compute attention.
725
+
726
+ Parameters:
727
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
728
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
729
+ for **all** `Attention` layers.
730
+
731
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
732
+ processor. This is strongly recommended when setting trainable attention processors.
733
+
734
+ """
735
+ count = len(self.attn_processors.keys())
736
+
737
+ if isinstance(processor, dict) and len(processor) != count:
738
+ raise ValueError(
739
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
740
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
741
+ )
742
+
743
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
744
+ if hasattr(module, "set_processor"):
745
+ if not isinstance(processor, dict):
746
+ module.set_processor(processor)
747
+ else:
748
+ module.set_processor(processor.pop(f"{name}.processor"))
749
+
750
+ for sub_name, child in module.named_children():
751
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
752
+
753
+ for name, module in self.named_children():
754
+ fn_recursive_attn_processor(name, module, processor)
755
+
756
+ def set_default_attn_processor(self):
757
+ """
758
+ Disables custom attention processors and sets the default attention implementation.
759
+ """
760
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
761
+ processor = AttnAddedKVProcessor()
762
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
763
+ processor = AttnProcessor()
764
+ else:
765
+ raise ValueError(
766
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
767
+ )
768
+
769
+ self.set_attn_processor(processor)
770
+
771
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
772
+ r"""
773
+ Enable sliced attention computation.
774
+
775
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
776
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
777
+
778
+ Args:
779
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
780
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
781
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
782
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
783
+ must be a multiple of `slice_size`.
784
+ """
785
+ sliceable_head_dims = []
786
+
787
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
788
+ if hasattr(module, "set_attention_slice"):
789
+ sliceable_head_dims.append(module.sliceable_head_dim)
790
+
791
+ for child in module.children():
792
+ fn_recursive_retrieve_sliceable_dims(child)
793
+
794
+ # retrieve number of attention layers
795
+ for module in self.children():
796
+ fn_recursive_retrieve_sliceable_dims(module)
797
+
798
+ num_sliceable_layers = len(sliceable_head_dims)
799
+
800
+ if slice_size == "auto":
801
+ # half the attention head size is usually a good trade-off between
802
+ # speed and memory
803
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
804
+ elif slice_size == "max":
805
+ # make smallest slice possible
806
+ slice_size = num_sliceable_layers * [1]
807
+
808
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
809
+
810
+ if len(slice_size) != len(sliceable_head_dims):
811
+ raise ValueError(
812
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
813
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
814
+ )
815
+
816
+ for i in range(len(slice_size)):
817
+ size = slice_size[i]
818
+ dim = sliceable_head_dims[i]
819
+ if size is not None and size > dim:
820
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
821
+
822
+ # Recursively walk through all the children.
823
+ # Any children which exposes the set_attention_slice method
824
+ # gets the message
825
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
826
+ if hasattr(module, "set_attention_slice"):
827
+ module.set_attention_slice(slice_size.pop())
828
+
829
+ for child in module.children():
830
+ fn_recursive_set_attention_slice(child, slice_size)
831
+
832
+ reversed_slice_size = list(reversed(slice_size))
833
+ for module in self.children():
834
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
835
+
836
+ def _set_gradient_checkpointing(self, module, value=False):
837
+ if hasattr(module, "gradient_checkpointing"):
838
+ module.gradient_checkpointing = value
839
+
840
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
841
+ r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
842
+
843
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
844
+
845
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
846
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
847
+
848
+ Args:
849
+ s1 (`float`):
850
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
851
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
852
+ s2 (`float`):
853
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
854
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
855
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
856
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
857
+ """
858
+ for i, upsample_block in enumerate(self.up_blocks):
859
+ setattr(upsample_block, "s1", s1)
860
+ setattr(upsample_block, "s2", s2)
861
+ setattr(upsample_block, "b1", b1)
862
+ setattr(upsample_block, "b2", b2)
863
+
864
+ def disable_freeu(self):
865
+ """Disables the FreeU mechanism."""
866
+ freeu_keys = {"s1", "s2", "b1", "b2"}
867
+ for i, upsample_block in enumerate(self.up_blocks):
868
+ for k in freeu_keys:
869
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
870
+ setattr(upsample_block, k, None)
871
+
872
+ def fuse_qkv_projections(self):
873
+ """
874
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
875
+ are fused. For cross-attention modules, key and value projection matrices are fused.
876
+
877
+ <Tip warning={true}>
878
+
879
+ This API is 🧪 experimental.
880
+
881
+ </Tip>
882
+ """
883
+ self.original_attn_processors = None
884
+
885
+ for _, attn_processor in self.attn_processors.items():
886
+ if "Added" in str(attn_processor.__class__.__name__):
887
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
888
+
889
+ self.original_attn_processors = self.attn_processors
890
+
891
+ for module in self.modules():
892
+ if isinstance(module, Attention):
893
+ module.fuse_projections(fuse=True)
894
+
895
+ self.set_attn_processor(FusedAttnProcessor2_0())
896
+
897
+ def unfuse_qkv_projections(self):
898
+ """Disables the fused QKV projection if enabled.
899
+
900
+ <Tip warning={true}>
901
+
902
+ This API is 🧪 experimental.
903
+
904
+ </Tip>
905
+
906
+ """
907
+ if self.original_attn_processors is not None:
908
+ self.set_attn_processor(self.original_attn_processors)
909
+
910
+ def get_time_embed(
911
+ self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
912
+ ) -> Optional[torch.Tensor]:
913
+ timesteps = timestep
914
+ if not torch.is_tensor(timesteps):
915
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
916
+ # This would be a good case for the `match` statement (Python 3.10+)
917
+ is_mps = sample.device.type == "mps"
918
+ if isinstance(timestep, float):
919
+ dtype = torch.float32 if is_mps else torch.float64
920
+ else:
921
+ dtype = torch.int32 if is_mps else torch.int64
922
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
923
+ elif len(timesteps.shape) == 0:
924
+ timesteps = timesteps[None].to(sample.device)
925
+
926
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
927
+ timesteps = timesteps.expand(sample.shape[0])
928
+
929
+ t_emb = self.time_proj(timesteps)
930
+ # `Timesteps` does not contain any weights and will always return f32 tensors
931
+ # but time_embedding might actually be running in fp16. so we need to cast here.
932
+ # there might be better ways to encapsulate this.
933
+ t_emb = t_emb.to(dtype=sample.dtype)
934
+ return t_emb
935
+
936
+ def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
937
+ class_emb = None
938
+ if self.class_embedding is not None:
939
+ if class_labels is None:
940
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
941
+
942
+ if self.config.class_embed_type == "timestep":
943
+ class_labels = self.time_proj(class_labels)
944
+
945
+ # `Timesteps` does not contain any weights and will always return f32 tensors
946
+ # there might be better ways to encapsulate this.
947
+ class_labels = class_labels.to(dtype=sample.dtype)
948
+
949
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
950
+ return class_emb
951
+
952
+ def get_aug_embed(
953
+ self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
954
+ ) -> Optional[torch.Tensor]:
955
+ aug_emb = None
956
+ if self.config.addition_embed_type == "text":
957
+ aug_emb = self.add_embedding(encoder_hidden_states)
958
+ elif self.config.addition_embed_type == "text_image":
959
+ # Kandinsky 2.1 - style
960
+ if "image_embeds" not in added_cond_kwargs:
961
+ raise ValueError(
962
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
963
+ )
964
+
965
+ image_embs = added_cond_kwargs.get("image_embeds")
966
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
967
+ aug_emb = self.add_embedding(text_embs, image_embs)
968
+ elif self.config.addition_embed_type == "text_time":
969
+ # SDXL - style
970
+ if "text_embeds" not in added_cond_kwargs:
971
+ raise ValueError(
972
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
973
+ )
974
+ text_embeds = added_cond_kwargs.get("text_embeds")
975
+ if "time_ids" not in added_cond_kwargs:
976
+ raise ValueError(
977
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
978
+ )
979
+ time_ids = added_cond_kwargs.get("time_ids")
980
+ time_embeds = self.add_time_proj(time_ids.flatten())
981
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
982
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
983
+ add_embeds = add_embeds.to(emb.dtype)
984
+ aug_emb = self.add_embedding(add_embeds)
985
+ elif self.config.addition_embed_type == "image":
986
+ # Kandinsky 2.2 - style
987
+ if "image_embeds" not in added_cond_kwargs:
988
+ raise ValueError(
989
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
990
+ )
991
+ image_embs = added_cond_kwargs.get("image_embeds")
992
+ aug_emb = self.add_embedding(image_embs)
993
+ elif self.config.addition_embed_type == "image_hint":
994
+ # Kandinsky 2.2 ControlNet - style
995
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
996
+ raise ValueError(
997
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
998
+ )
999
+ image_embs = added_cond_kwargs.get("image_embeds")
1000
+ hint = added_cond_kwargs.get("hint")
1001
+ aug_emb = self.add_embedding(image_embs, hint)
1002
+ return aug_emb
1003
+
1004
+ def process_encoder_hidden_states(
1005
+ self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
1006
+ ) -> torch.Tensor:
1007
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
1008
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
1009
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
1010
+ # Kandinsky 2.1 - style
1011
+ if "image_embeds" not in added_cond_kwargs:
1012
+ raise ValueError(
1013
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1014
+ )
1015
+
1016
+ image_embeds = added_cond_kwargs.get("image_embeds")
1017
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1018
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1019
+ # Kandinsky 2.2 - style
1020
+ if "image_embeds" not in added_cond_kwargs:
1021
+ raise ValueError(
1022
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1023
+ )
1024
+ image_embeds = added_cond_kwargs.get("image_embeds")
1025
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1026
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
1027
+ if "image_embeds" not in added_cond_kwargs:
1028
+ raise ValueError(
1029
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1030
+ )
1031
+
1032
+ if hasattr(self, "text_encoder_hid_proj") and self.text_encoder_hid_proj is not None:
1033
+ encoder_hidden_states = self.text_encoder_hid_proj(encoder_hidden_states)
1034
+
1035
+ image_embeds = added_cond_kwargs.get("image_embeds")
1036
+ image_embeds = self.encoder_hid_proj(image_embeds)
1037
+ encoder_hidden_states = (encoder_hidden_states, image_embeds)
1038
+ return encoder_hidden_states
1039
+
1040
+ def forward(
1041
+ self,
1042
+ sample: torch.Tensor,
1043
+ timestep: Union[torch.Tensor, float, int],
1044
+ encoder_hidden_states: torch.Tensor,
1045
+ encoder_lora_states: torch.Tensor,
1046
+ class_labels: Optional[torch.Tensor] = None,
1047
+ timestep_cond: Optional[torch.Tensor] = None,
1048
+ attention_mask: Optional[torch.Tensor] = None,
1049
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1050
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
1051
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1052
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
1053
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1054
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1055
+ return_dict: bool = True,
1056
+ ) -> Union[UNet2DConditionOutput, Tuple]:
1057
+ r"""
1058
+ The [`UNet2DConditionModel`] forward method.
1059
+
1060
+ Args:
1061
+ sample (`torch.Tensor`):
1062
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
1063
+ timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
1064
+ encoder_hidden_states (`torch.Tensor`):
1065
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
1066
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
1067
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1068
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
1069
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
1070
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
1071
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
1072
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
1073
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
1074
+ negative values to the attention scores corresponding to "discard" tokens.
1075
+ cross_attention_kwargs (`dict`, *optional*):
1076
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1077
+ `self.processor` in
1078
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1079
+ added_cond_kwargs: (`dict`, *optional*):
1080
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
1081
+ are passed along to the UNet blocks.
1082
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
1083
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
1084
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
1085
+ A tensor that if specified is added to the residual of the middle unet block.
1086
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
1087
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
1088
+ encoder_attention_mask (`torch.Tensor`):
1089
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
1090
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
1091
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
1092
+ return_dict (`bool`, *optional*, defaults to `True`):
1093
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
1094
+ tuple.
1095
+
1096
+ Returns:
1097
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
1098
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
1099
+ otherwise a `tuple` is returned where the first element is the sample tensor.
1100
+ """
1101
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
1102
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
1103
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
1104
+ # on the fly if necessary.
1105
+ default_overall_up_factor = 2**self.num_upsamplers
1106
+
1107
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
1108
+ forward_upsample_size = False
1109
+ upsample_size = None
1110
+
1111
+ for dim in sample.shape[-2:]:
1112
+ if dim % default_overall_up_factor != 0:
1113
+ # Forward upsample size to force interpolation output size.
1114
+ forward_upsample_size = True
1115
+ break
1116
+
1117
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
1118
+ # expects mask of shape:
1119
+ # [batch, key_tokens]
1120
+ # adds singleton query_tokens dimension:
1121
+ # [batch, 1, key_tokens]
1122
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
1123
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
1124
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
1125
+ if attention_mask is not None:
1126
+ # assume that mask is expressed as:
1127
+ # (1 = keep, 0 = discard)
1128
+ # convert mask into a bias that can be added to attention scores:
1129
+ # (keep = +0, discard = -10000.0)
1130
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
1131
+ attention_mask = attention_mask.unsqueeze(1)
1132
+
1133
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
1134
+ if encoder_attention_mask is not None:
1135
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
1136
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
1137
+
1138
+ # 0. center input if necessary
1139
+ if self.config.center_input_sample:
1140
+ sample = 2 * sample - 1.0
1141
+
1142
+ # 1. time
1143
+ t_emb = self.get_time_embed(sample=sample, timestep=timestep)
1144
+ emb = self.time_embedding(t_emb, timestep_cond)
1145
+
1146
+ class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
1147
+ if class_emb is not None:
1148
+ if self.config.class_embeddings_concat:
1149
+ emb = torch.cat([emb, class_emb], dim=-1)
1150
+ else:
1151
+ emb = emb + class_emb
1152
+
1153
+ aug_emb = self.get_aug_embed(
1154
+ emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1155
+ )
1156
+ if self.config.addition_embed_type == "image_hint":
1157
+ aug_emb, hint = aug_emb
1158
+ sample = torch.cat([sample, hint], dim=1)
1159
+
1160
+ emb = emb + aug_emb if aug_emb is not None else emb
1161
+
1162
+ if self.time_embed_act is not None:
1163
+ emb = self.time_embed_act(emb)
1164
+
1165
+ encoder_hidden_states = self.process_encoder_hidden_states(
1166
+ encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1167
+ )
1168
+
1169
+ # 2. pre-process
1170
+ sample = self.conv_in(sample)
1171
+
1172
+ # 2.5 GLIGEN position net
1173
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
1174
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1175
+ gligen_args = cross_attention_kwargs.pop("gligen")
1176
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
1177
+
1178
+ # 3. down
1179
+ # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
1180
+ # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
1181
+ if cross_attention_kwargs is not None:
1182
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1183
+ lora_scale = cross_attention_kwargs.pop("scale", 1.0)
1184
+ else:
1185
+ lora_scale = 1.0
1186
+
1187
+ if USE_PEFT_BACKEND:
1188
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
1189
+ scale_lora_layers(self, lora_scale)
1190
+
1191
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1192
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
1193
+ is_adapter = down_intrablock_additional_residuals is not None
1194
+ # maintain backward compatibility for legacy usage, where
1195
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
1196
+ # but can only use one or the other
1197
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
1198
+ deprecate(
1199
+ "T2I should not use down_block_additional_residuals",
1200
+ "1.3.0",
1201
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
1202
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
1203
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
1204
+ standard_warn=False,
1205
+ )
1206
+ down_intrablock_additional_residuals = down_block_additional_residuals
1207
+ is_adapter = True
1208
+
1209
+ down_block_res_samples = (sample,)
1210
+ for downsample_block in self.down_blocks:
1211
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1212
+ # For t2i-adapter CrossAttnDownBlock2D
1213
+ additional_residuals = {}
1214
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1215
+ additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
1216
+
1217
+ sample, res_samples = downsample_block(
1218
+ hidden_states=sample,
1219
+ temb=emb,
1220
+ encoder_hidden_states=encoder_hidden_states,
1221
+ encoder_lora_states=encoder_lora_states,
1222
+ attention_mask=attention_mask,
1223
+ cross_attention_kwargs=cross_attention_kwargs,
1224
+ encoder_attention_mask=encoder_attention_mask,
1225
+ **additional_residuals,
1226
+ )
1227
+ else:
1228
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
1229
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1230
+ sample += down_intrablock_additional_residuals.pop(0)
1231
+
1232
+ down_block_res_samples += res_samples
1233
+
1234
+ if is_controlnet:
1235
+ new_down_block_res_samples = ()
1236
+
1237
+ for down_block_res_sample, down_block_additional_residual in zip(
1238
+ down_block_res_samples, down_block_additional_residuals
1239
+ ):
1240
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
1241
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1242
+
1243
+ down_block_res_samples = new_down_block_res_samples
1244
+
1245
+ # 4. mid
1246
+ if self.mid_block is not None:
1247
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1248
+ sample = self.mid_block(
1249
+ sample,
1250
+ emb,
1251
+ encoder_hidden_states=encoder_hidden_states,
1252
+ encoder_lora_states=encoder_lora_states,
1253
+ attention_mask=attention_mask,
1254
+ cross_attention_kwargs=cross_attention_kwargs,
1255
+ encoder_attention_mask=encoder_attention_mask,
1256
+ )
1257
+ else:
1258
+ sample = self.mid_block(sample, emb)
1259
+
1260
+ # To support T2I-Adapter-XL
1261
+ if (
1262
+ is_adapter
1263
+ and len(down_intrablock_additional_residuals) > 0
1264
+ and sample.shape == down_intrablock_additional_residuals[0].shape
1265
+ ):
1266
+ sample += down_intrablock_additional_residuals.pop(0)
1267
+
1268
+ if is_controlnet:
1269
+ sample = sample + mid_block_additional_residual
1270
+
1271
+ # 5. up
1272
+ for i, upsample_block in enumerate(self.up_blocks):
1273
+ is_final_block = i == len(self.up_blocks) - 1
1274
+
1275
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1276
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1277
+
1278
+ # if we have not reached the final block and need to forward the
1279
+ # upsample size, we do it here
1280
+ if not is_final_block and forward_upsample_size:
1281
+ upsample_size = down_block_res_samples[-1].shape[2:]
1282
+
1283
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1284
+ sample = upsample_block(
1285
+ hidden_states=sample,
1286
+ temb=emb,
1287
+ res_hidden_states_tuple=res_samples,
1288
+ encoder_hidden_states=encoder_hidden_states,
1289
+ encoder_lora_states=encoder_lora_states,
1290
+ cross_attention_kwargs=cross_attention_kwargs,
1291
+ upsample_size=upsample_size,
1292
+ attention_mask=attention_mask,
1293
+ encoder_attention_mask=encoder_attention_mask,
1294
+ )
1295
+ else:
1296
+ sample = upsample_block(
1297
+ hidden_states=sample,
1298
+ temb=emb,
1299
+ res_hidden_states_tuple=res_samples,
1300
+ upsample_size=upsample_size,
1301
+ )
1302
+
1303
+ # 6. post-process
1304
+ if self.conv_norm_out:
1305
+ sample = self.conv_norm_out(sample)
1306
+ sample = self.conv_act(sample)
1307
+ sample = self.conv_out(sample)
1308
+
1309
+ if USE_PEFT_BACKEND:
1310
+ # remove `lora_scale` from each PEFT layer
1311
+ unscale_lora_layers(self, lora_scale)
1312
+
1313
+ if not return_dict:
1314
+ return (sample,)
1315
+
1316
+ return UNet2DConditionOutput(sample=sample)
models/visual_prompts.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import os
4
+ import numpy as np
5
+ from diffusers.models.attention_processor import Attention
6
+
7
+ class VisualTokenSelfAttn(torch.nn.Module):
8
+ def __init__(self, in_dim=2792, out_dim=768, num_heads=8):
9
+ super().__init__()
10
+
11
+ self.meta_token_trans = nn.Sequential(
12
+ nn.Linear(in_dim, out_dim * 4),
13
+ nn.LayerNorm(out_dim * 4),
14
+ nn.GELU(),
15
+ nn.Linear(out_dim * 4, out_dim),
16
+ nn.LayerNorm(out_dim)
17
+ )
18
+
19
+ self.norm1 = nn.LayerNorm(out_dim, eps=1e-6) # important to avoid attention collapsing
20
+ self.attn = Attention(query_dim=out_dim, heads=num_heads)
21
+ self.norm2 = nn.LayerNorm(out_dim, eps=1e-6)
22
+ self.mlp = nn.Sequential(
23
+ nn.Linear(out_dim, out_dim * 4),
24
+ nn.GELU(),
25
+ nn.Linear(out_dim * 4, out_dim)
26
+ )
27
+
28
+ def forward(self, x):
29
+ x = self.meta_token_trans(x)
30
+ x = x + self.attn(self.norm1(x))
31
+ x = x + self.mlp(self.norm2(x))
32
+ return x
33
+
34
+
35
+ class EmotionEmbedding(nn.Module):
36
+ def __init__(self, emotions, prompts_dir, feature_names, output_dim, prompt_len=16):
37
+ super().__init__()
38
+
39
+ input_dim = self.get_input_dim(feature_names=feature_names)
40
+ self.self_attn = VisualTokenSelfAttn(in_dim=input_dim, out_dim=output_dim)
41
+
42
+ self.emotions = emotions
43
+ self.emotion2idx = {emotion: idx for idx, emotion in enumerate(emotions)}
44
+ self.emotion_params = nn.ParameterList()
45
+
46
+ self.emotion_init_features = self.get_features(emotions, prompts_dir, feature_names, prompt_len)
47
+
48
+ for emotion in self.emotions:
49
+ init_params = self.emotion_init_features[emotion]
50
+ # init_params = torch.from_numpy(init_params).float()
51
+ param = nn.Parameter(init_params)
52
+ self.emotion_params.append(param)
53
+
54
+ def get_features(self, emotions, prompts_dir, feature_names, prompt_len):
55
+ emotion_init_features = {}
56
+ for emotion in emotions:
57
+ emotion_features = []
58
+ for feature_name in feature_names:
59
+ features = np.load(os.path.join(prompts_dir, f'{emotion}_{feature_name}.npy'), allow_pickle=True)
60
+ emotion_features.append(features)
61
+ emotion_features = np.concatenate(emotion_features, axis=1)
62
+
63
+ from sklearn.cluster import KMeans
64
+ kmeans = KMeans(n_clusters=prompt_len, random_state=42)
65
+ kmeans.fit_predict(emotion_features)
66
+ token = torch.tensor(kmeans.cluster_centers_).unsqueeze(0)
67
+ # print(token.shape)
68
+ emotion_init_features[emotion] = token
69
+ return emotion_init_features
70
+
71
+ def get_input_dim(self, feature_names):
72
+ if feature_names == ["clip"]:
73
+ in_dim = 768
74
+ elif feature_names == ["vgg"]:
75
+ in_dim = 1000
76
+ elif feature_names == ["dinov2"]:
77
+ in_dim = 1024
78
+ elif feature_names == ["clip", "vgg"]:
79
+ in_dim = 1768
80
+ elif feature_names == ["clip", "dinov2"]:
81
+ in_dim = 1768
82
+ elif feature_names == ["vgg", "dinov2"]:
83
+ in_dim = 2024
84
+ elif feature_names == ["clip", "vgg", "dinov2"]:
85
+ in_dim = 2792
86
+ else:
87
+ raise ValueError("Invalid feature names")
88
+ return in_dim
89
+
90
+ def params_to_prompts(self):
91
+ self.emotion_prompts = {}
92
+ for emotion in self.emotions:
93
+ prompt = self.self_attn(self.emotion_params[self.emotion2idx[emotion]])
94
+ prompt = prompt.squeeze(0)
95
+ self.emotion_prompts[emotion] = prompt
96
+
97
+ def forward(self, emotion):
98
+ if isinstance(emotion, str):
99
+ emotions = [emotion]
100
+ else:
101
+ emotions = emotion
102
+
103
+ self.params_to_prompts()
104
+ selected_prompts = [self.emotion_prompts[emotion] for emotion in emotions]
105
+ prompts = torch.stack(selected_prompts, dim=0)
106
+ del self.emotion_prompts
107
+
108
+ return prompts
109
+
110
+ class EmotionEmbedding2(nn.Module):
111
+ def __init__(self, emotions, input_dim, output_dim):
112
+ super().__init__()
113
+ self.self_attn = VisualTokenSelfAttn(in_dim=input_dim, out_dim=output_dim)
114
+ self.emotions = emotions
115
+ self.emotion2idx = {emotion: idx for idx, emotion in enumerate(emotions)}
116
+ self.emotion_params = nn.Embedding(len(emotions), input_dim)
117
+
118
+ def forward(self, emotion):
119
+ if isinstance(emotion, str):
120
+ emotions = [emotion]
121
+ else:
122
+ emotions = emotion
123
+
124
+ emotions = [self.emotion2idx[emotion] for emotion in emotions]
125
+ emotions = torch.tensor(emotions, device=self.emotion_params.weight.device)
126
+ prompts = self.emotion_params(emotions).unsqueeze(1)
127
+ prompts = self.self_attn(prompts)
128
+ return prompts
129
+
130
+ if __name__ == "__main__":
131
+ # emotions = ["amusement", "anger", "awe", "contentment",
132
+ # "disgust", "excitement", "fear", "sadness"]
133
+ # feature_names = ["clip", "vgg", "dinov2"]
134
+ # prompts_dir = "features/origin"
135
+ # model = EmotionEmbedding(emotions, prompts_dir, feature_names, output_dim=2048, prompt_len=16).to("cuda")
136
+ # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
137
+ # output = model('awe')
138
+ # target = torch.ones_like(output)
139
+ # loss = ((output - target) ** 2).mean()
140
+ # print(output)
141
+
142
+ emotions = ["amusement", "anger", "awe", "contentment",
143
+ "disgust", "excitement", "fear", "sadness"]
144
+ prompts_dir = "features/origin"
145
+ model = EmotionEmbedding2(emotions, input_dim=2048, output_dim=2048, prompt_len=16).to("cuda")
146
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
147
+ output = model('awe')
148
+ target = torch.ones_like(output)
149
+ loss = ((output - target) ** 2).mean()
150
+ print(output)
151
+
152
+ # 反向传播
153
+ loss.backward()
154
+
155
+ # 打印看看梯度
156
+ for name, param in model.named_parameters():
157
+ if param.grad is not None:
158
+ print(f"{name} has gradient ✅, grad mean: {param.grad.mean().item()}")
159
+ if name == "emotion_params.weight":
160
+ print(param.grad)
161
+ else:
162
+ print(f"{name} has NO gradient ❌")
163
+
164
+ # 更新一下参数
165
+ optimizer.step()
166
+ print(output)