Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- v0.36.0/README.md +0 -0
- v0.36.0/README_community_scripts.md +439 -0
- v0.36.0/adaptive_mask_inpainting.py +1469 -0
- v0.36.0/bit_diffusion.py +264 -0
- v0.36.0/checkpoint_merger.py +288 -0
- v0.36.0/clip_guided_images_mixing_stable_diffusion.py +445 -0
- v0.36.0/clip_guided_stable_diffusion.py +337 -0
- v0.36.0/clip_guided_stable_diffusion_img2img.py +490 -0
- v0.36.0/cogvideox_ddim_inversion.py +645 -0
- v0.36.0/composable_stable_diffusion.py +536 -0
- v0.36.0/ddim_noise_comparative_analysis.py +190 -0
- v0.36.0/dps_pipeline.py +466 -0
- v0.36.0/edict_pipeline.py +264 -0
- v0.36.0/fresco_v2v.py +0 -0
- v0.36.0/gluegen.py +816 -0
- v0.36.0/hd_painter.py +1001 -0
- v0.36.0/iadb.py +149 -0
- v0.36.0/imagic_stable_diffusion.py +470 -0
- v0.36.0/img2img_inpainting.py +437 -0
- v0.36.0/instaflow_one_step.py +693 -0
- v0.36.0/interpolate_stable_diffusion.py +498 -0
- v0.36.0/ip_adapter_face_id.py +1129 -0
- v0.36.0/kohya_hires_fix.py +468 -0
- v0.36.0/latent_consistency_img2img.py +821 -0
- v0.36.0/latent_consistency_interpolate.py +999 -0
- v0.36.0/latent_consistency_txt2img.py +729 -0
- v0.36.0/llm_grounded_diffusion.py +1567 -0
- v0.36.0/lpw_stable_diffusion.py +1431 -0
- v0.36.0/lpw_stable_diffusion_onnx.py +1148 -0
- v0.36.0/lpw_stable_diffusion_xl.py +0 -0
- v0.36.0/magic_mix.py +152 -0
- v0.36.0/marigold_depth_estimation.py +673 -0
- v0.36.0/masked_stable_diffusion_img2img.py +262 -0
- v0.36.0/masked_stable_diffusion_xl_img2img.py +682 -0
- v0.36.0/matryoshka.py +0 -0
- v0.36.0/mixture_canvas.py +501 -0
- v0.36.0/mixture_tiling.py +405 -0
- v0.36.0/mixture_tiling_sdxl.py +1219 -0
- v0.36.0/mod_controlnet_tile_sr_sdxl.py +1845 -0
- v0.36.0/multilingual_stable_diffusion.py +410 -0
- v0.36.0/one_step_unet.py +24 -0
- v0.36.0/pipeline_animatediff_controlnet.py +1129 -0
- v0.36.0/pipeline_animatediff_img2video.py +984 -0
- v0.36.0/pipeline_animatediff_ipex.py +1002 -0
- v0.36.0/pipeline_controlnet_xl_kolors.py +1338 -0
- v0.36.0/pipeline_controlnet_xl_kolors_img2img.py +1540 -0
- v0.36.0/pipeline_controlnet_xl_kolors_inpaint.py +1854 -0
- v0.36.0/pipeline_demofusion_sdxl.py +1382 -0
- v0.36.0/pipeline_fabric.py +755 -0
- v0.36.0/pipeline_faithdiff_stable_diffusion_xl.py +0 -0
v0.36.0/README.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.36.0/README_community_scripts.md
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Community Scripts
|
| 2 |
+
|
| 3 |
+
**Community scripts** consist of inference examples using Diffusers pipelines that have been added by the community.
|
| 4 |
+
Please have a look at the following table to get an overview of all community examples. Click on the **Code Example** to get a copy-and-paste code example that you can try out.
|
| 5 |
+
If a community script doesn't work as expected, please open an issue and ping the author on it.
|
| 6 |
+
|
| 7 |
+
| Example | Description | Code Example | Colab | Author |
|
| 8 |
+
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
|
| 9 |
+
| Using IP-Adapter with Negative Noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/ip_adapter_negative_noise.ipynb) | [Álvaro Somoza](https://github.com/asomoza)|
|
| 10 |
+
| Asymmetric Tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#Asymmetric-Tiling ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/asymetric_tiling.ipynb) | [alexisrolland](https://github.com/alexisrolland)|
|
| 11 |
+
| Prompt Scheduling Callback |Allows changing prompts during a generation | [Prompt Scheduling-Callback](#Prompt-Scheduling-Callback ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/prompt_scheduling_callback.ipynb) | [hlky](https://github.com/hlky)|
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
## Example usages
|
| 15 |
+
|
| 16 |
+
### IP Adapter Negative Noise
|
| 17 |
+
|
| 18 |
+
Diffusers pipelines are fully integrated with IP-Adapter, which allows you to prompt the diffusion model with an image. However, it does not support negative image prompts (there is no `negative_ip_adapter_image` argument) the same way it supports negative text prompts. When you pass an `ip_adapter_image,` it will create a zero-filled tensor as a negative image. This script shows you how to create a negative noise from `ip_adapter_image` and use it to significantly improve the generation quality while preserving the composition of images.
|
| 19 |
+
|
| 20 |
+
[cubiq](https://github.com/cubiq) initially developed this feature in his [repository](https://github.com/cubiq/ComfyUI_IPAdapter_plus). The community script was contributed by [asomoza](https://github.com/Somoza). You can find more details about this experimentation [this discussion](https://github.com/huggingface/diffusers/discussions/7167)
|
| 21 |
+
|
| 22 |
+
IP-Adapter without negative noise
|
| 23 |
+
|source|result|
|
| 24 |
+
|---|---|
|
| 25 |
+
|||
|
| 26 |
+
|
| 27 |
+
IP-Adapter with negative noise
|
| 28 |
+
|source|result|
|
| 29 |
+
|---|---|
|
| 30 |
+
|||
|
| 31 |
+
|
| 32 |
+
```python
|
| 33 |
+
import torch
|
| 34 |
+
|
| 35 |
+
from diffusers import AutoencoderKL, DPMSolverMultistepScheduler, StableDiffusionXLPipeline
|
| 36 |
+
from diffusers.models import ImageProjection
|
| 37 |
+
from diffusers.utils import load_image
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def encode_image(
|
| 41 |
+
image_encoder,
|
| 42 |
+
feature_extractor,
|
| 43 |
+
image,
|
| 44 |
+
device,
|
| 45 |
+
num_images_per_prompt,
|
| 46 |
+
output_hidden_states=None,
|
| 47 |
+
negative_image=None,
|
| 48 |
+
):
|
| 49 |
+
dtype = next(image_encoder.parameters()).dtype
|
| 50 |
+
|
| 51 |
+
if not isinstance(image, torch.Tensor):
|
| 52 |
+
image = feature_extractor(image, return_tensors="pt").pixel_values
|
| 53 |
+
|
| 54 |
+
image = image.to(device=device, dtype=dtype)
|
| 55 |
+
if output_hidden_states:
|
| 56 |
+
image_enc_hidden_states = image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 57 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 58 |
+
|
| 59 |
+
if negative_image is None:
|
| 60 |
+
uncond_image_enc_hidden_states = image_encoder(
|
| 61 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 62 |
+
).hidden_states[-2]
|
| 63 |
+
else:
|
| 64 |
+
if not isinstance(negative_image, torch.Tensor):
|
| 65 |
+
negative_image = feature_extractor(negative_image, return_tensors="pt").pixel_values
|
| 66 |
+
negative_image = negative_image.to(device=device, dtype=dtype)
|
| 67 |
+
uncond_image_enc_hidden_states = image_encoder(negative_image, output_hidden_states=True).hidden_states[-2]
|
| 68 |
+
|
| 69 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 70 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 71 |
+
else:
|
| 72 |
+
image_embeds = image_encoder(image).image_embeds
|
| 73 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 74 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 75 |
+
|
| 76 |
+
return image_embeds, uncond_image_embeds
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@torch.no_grad()
|
| 80 |
+
def prepare_ip_adapter_image_embeds(
|
| 81 |
+
unet,
|
| 82 |
+
image_encoder,
|
| 83 |
+
feature_extractor,
|
| 84 |
+
ip_adapter_image,
|
| 85 |
+
do_classifier_free_guidance,
|
| 86 |
+
device,
|
| 87 |
+
num_images_per_prompt,
|
| 88 |
+
ip_adapter_negative_image=None,
|
| 89 |
+
):
|
| 90 |
+
if not isinstance(ip_adapter_image, list):
|
| 91 |
+
ip_adapter_image = [ip_adapter_image]
|
| 92 |
+
|
| 93 |
+
if len(ip_adapter_image) != len(unet.encoder_hid_proj.image_projection_layers):
|
| 94 |
+
raise ValueError(
|
| 95 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
image_embeds = []
|
| 99 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 100 |
+
ip_adapter_image, unet.encoder_hid_proj.image_projection_layers
|
| 101 |
+
):
|
| 102 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 103 |
+
single_image_embeds, single_negative_image_embeds = encode_image(
|
| 104 |
+
image_encoder,
|
| 105 |
+
feature_extractor,
|
| 106 |
+
single_ip_adapter_image,
|
| 107 |
+
device,
|
| 108 |
+
1,
|
| 109 |
+
output_hidden_state,
|
| 110 |
+
negative_image=ip_adapter_negative_image,
|
| 111 |
+
)
|
| 112 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 113 |
+
single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0)
|
| 114 |
+
|
| 115 |
+
if do_classifier_free_guidance:
|
| 116 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 117 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 118 |
+
|
| 119 |
+
image_embeds.append(single_image_embeds)
|
| 120 |
+
|
| 121 |
+
return image_embeds
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
vae = AutoencoderKL.from_pretrained(
|
| 125 |
+
"madebyollin/sdxl-vae-fp16-fix",
|
| 126 |
+
torch_dtype=torch.float16,
|
| 127 |
+
).to("cuda")
|
| 128 |
+
|
| 129 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 130 |
+
"RunDiffusion/Juggernaut-XL-v9",
|
| 131 |
+
torch_dtype=torch.float16,
|
| 132 |
+
vae=vae,
|
| 133 |
+
variant="fp16",
|
| 134 |
+
).to("cuda")
|
| 135 |
+
|
| 136 |
+
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
| 137 |
+
pipeline.scheduler.config.use_karras_sigmas = True
|
| 138 |
+
|
| 139 |
+
pipeline.load_ip_adapter(
|
| 140 |
+
"h94/IP-Adapter",
|
| 141 |
+
subfolder="sdxl_models",
|
| 142 |
+
weight_name="ip-adapter-plus_sdxl_vit-h.safetensors",
|
| 143 |
+
image_encoder_folder="models/image_encoder",
|
| 144 |
+
)
|
| 145 |
+
pipeline.set_ip_adapter_scale(0.7)
|
| 146 |
+
|
| 147 |
+
ip_image = load_image("source.png")
|
| 148 |
+
negative_ip_image = load_image("noise.png")
|
| 149 |
+
|
| 150 |
+
image_embeds = prepare_ip_adapter_image_embeds(
|
| 151 |
+
unet=pipeline.unet,
|
| 152 |
+
image_encoder=pipeline.image_encoder,
|
| 153 |
+
feature_extractor=pipeline.feature_extractor,
|
| 154 |
+
ip_adapter_image=[[ip_image]],
|
| 155 |
+
do_classifier_free_guidance=True,
|
| 156 |
+
device="cuda",
|
| 157 |
+
num_images_per_prompt=1,
|
| 158 |
+
ip_adapter_negative_image=negative_ip_image,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
prompt = "cinematic photo of a cyborg in the city, 4k, high quality, intricate, highly detailed"
|
| 163 |
+
negative_prompt = "blurry, smooth, plastic"
|
| 164 |
+
|
| 165 |
+
image = pipeline(
|
| 166 |
+
prompt=prompt,
|
| 167 |
+
negative_prompt=negative_prompt,
|
| 168 |
+
ip_adapter_image_embeds=image_embeds,
|
| 169 |
+
guidance_scale=6.0,
|
| 170 |
+
num_inference_steps=25,
|
| 171 |
+
generator=torch.Generator(device="cpu").manual_seed(1556265306),
|
| 172 |
+
).images[0]
|
| 173 |
+
|
| 174 |
+
image.save("result.png")
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
### Asymmetric Tiling
|
| 178 |
+
Stable Diffusion is not trained to generate seamless textures. However, you can use this simple script to add tiling to your generation. This script is contributed by [alexisrolland](https://github.com/alexisrolland). See more details in the [this issue](https://github.com/huggingface/diffusers/issues/556)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
|Generated|Tiled|
|
| 182 |
+
|---|---|
|
| 183 |
+
|||
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
```py
|
| 187 |
+
import torch
|
| 188 |
+
from typing import Optional
|
| 189 |
+
from diffusers import StableDiffusionPipeline
|
| 190 |
+
from diffusers.models.lora import LoRACompatibleConv
|
| 191 |
+
|
| 192 |
+
def seamless_tiling(pipeline, x_axis, y_axis):
|
| 193 |
+
def asymmetric_conv2d_convforward(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
|
| 194 |
+
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
|
| 195 |
+
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
|
| 196 |
+
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
|
| 197 |
+
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
|
| 198 |
+
return torch.nn.functional.conv2d(working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups)
|
| 199 |
+
x_mode = 'circular' if x_axis else 'constant'
|
| 200 |
+
y_mode = 'circular' if y_axis else 'constant'
|
| 201 |
+
targets = [pipeline.vae, pipeline.text_encoder, pipeline.unet]
|
| 202 |
+
convolution_layers = []
|
| 203 |
+
for target in targets:
|
| 204 |
+
for module in target.modules():
|
| 205 |
+
if isinstance(module, torch.nn.Conv2d):
|
| 206 |
+
convolution_layers.append(module)
|
| 207 |
+
for layer in convolution_layers:
|
| 208 |
+
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
|
| 209 |
+
layer.lora_layer = lambda * x: 0
|
| 210 |
+
layer._conv_forward = asymmetric_conv2d_convforward.__get__(layer, torch.nn.Conv2d)
|
| 211 |
+
return pipeline
|
| 212 |
+
|
| 213 |
+
pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True)
|
| 214 |
+
pipeline.enable_model_cpu_offload()
|
| 215 |
+
prompt = ["texture of a red brick wall"]
|
| 216 |
+
seed = 123456
|
| 217 |
+
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 218 |
+
|
| 219 |
+
pipeline = seamless_tiling(pipeline=pipeline, x_axis=True, y_axis=True)
|
| 220 |
+
image = pipeline(
|
| 221 |
+
prompt=prompt,
|
| 222 |
+
width=512,
|
| 223 |
+
height=512,
|
| 224 |
+
num_inference_steps=20,
|
| 225 |
+
guidance_scale=7,
|
| 226 |
+
num_images_per_prompt=1,
|
| 227 |
+
generator=generator
|
| 228 |
+
).images[0]
|
| 229 |
+
seamless_tiling(pipeline=pipeline, x_axis=False, y_axis=False)
|
| 230 |
+
|
| 231 |
+
torch.cuda.empty_cache()
|
| 232 |
+
image.save('image.png')
|
| 233 |
+
```
|
| 234 |
+
|
| 235 |
+
### Prompt Scheduling callback
|
| 236 |
+
|
| 237 |
+
Prompt scheduling callback allows changing prompts during a generation, like [prompt editing in A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing)
|
| 238 |
+
|
| 239 |
+
```python
|
| 240 |
+
from diffusers import StableDiffusionPipeline
|
| 241 |
+
from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
|
| 242 |
+
from diffusers.configuration_utils import register_to_config
|
| 243 |
+
import torch
|
| 244 |
+
from typing import Any, Dict, Tuple, Union
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class SDPromptSchedulingCallback(PipelineCallback):
|
| 248 |
+
@register_to_config
|
| 249 |
+
def __init__(
|
| 250 |
+
self,
|
| 251 |
+
encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 252 |
+
cutoff_step_ratio=None,
|
| 253 |
+
cutoff_step_index=None,
|
| 254 |
+
):
|
| 255 |
+
super().__init__(
|
| 256 |
+
cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
tensor_inputs = ["prompt_embeds"]
|
| 260 |
+
|
| 261 |
+
def callback_fn(
|
| 262 |
+
self, pipeline, step_index, timestep, callback_kwargs
|
| 263 |
+
) -> Dict[str, Any]:
|
| 264 |
+
cutoff_step_ratio = self.config.cutoff_step_ratio
|
| 265 |
+
cutoff_step_index = self.config.cutoff_step_index
|
| 266 |
+
if isinstance(self.config.encoded_prompt, tuple):
|
| 267 |
+
prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
|
| 268 |
+
else:
|
| 269 |
+
prompt_embeds = self.config.encoded_prompt
|
| 270 |
+
|
| 271 |
+
# Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
|
| 272 |
+
cutoff_step = (
|
| 273 |
+
cutoff_step_index
|
| 274 |
+
if cutoff_step_index is not None
|
| 275 |
+
else int(pipeline.num_timesteps * cutoff_step_ratio)
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
if step_index == cutoff_step:
|
| 279 |
+
if pipeline.do_classifier_free_guidance:
|
| 280 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 281 |
+
callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
|
| 282 |
+
return callback_kwargs
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
pipeline: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
|
| 286 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
| 287 |
+
torch_dtype=torch.float16,
|
| 288 |
+
variant="fp16",
|
| 289 |
+
use_safetensors=True,
|
| 290 |
+
).to("cuda")
|
| 291 |
+
pipeline.safety_checker = None
|
| 292 |
+
pipeline.requires_safety_checker = False
|
| 293 |
+
|
| 294 |
+
callback = MultiPipelineCallbacks(
|
| 295 |
+
[
|
| 296 |
+
SDPromptSchedulingCallback(
|
| 297 |
+
encoded_prompt=pipeline.encode_prompt(
|
| 298 |
+
prompt=f"prompt {index}",
|
| 299 |
+
negative_prompt=f"negative prompt {index}",
|
| 300 |
+
device=pipeline._execution_device,
|
| 301 |
+
num_images_per_prompt=1,
|
| 302 |
+
# pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
|
| 303 |
+
do_classifier_free_guidance=True,
|
| 304 |
+
),
|
| 305 |
+
cutoff_step_index=index,
|
| 306 |
+
) for index in range(1, 20)
|
| 307 |
+
]
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
image = pipeline(
|
| 311 |
+
prompt="prompt"
|
| 312 |
+
negative_prompt="negative prompt",
|
| 313 |
+
callback_on_step_end=callback,
|
| 314 |
+
callback_on_step_end_tensor_inputs=["prompt_embeds"],
|
| 315 |
+
).images[0]
|
| 316 |
+
torch.cuda.empty_cache()
|
| 317 |
+
image.save('image.png')
|
| 318 |
+
```
|
| 319 |
+
|
| 320 |
+
```python
|
| 321 |
+
from diffusers import StableDiffusionXLPipeline
|
| 322 |
+
from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
|
| 323 |
+
from diffusers.configuration_utils import register_to_config
|
| 324 |
+
import torch
|
| 325 |
+
from typing import Any, Dict, Tuple, Union
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
class SDXLPromptSchedulingCallback(PipelineCallback):
|
| 329 |
+
@register_to_config
|
| 330 |
+
def __init__(
|
| 331 |
+
self,
|
| 332 |
+
encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 333 |
+
add_text_embeds: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 334 |
+
add_time_ids: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
|
| 335 |
+
cutoff_step_ratio=None,
|
| 336 |
+
cutoff_step_index=None,
|
| 337 |
+
):
|
| 338 |
+
super().__init__(
|
| 339 |
+
cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"]
|
| 343 |
+
|
| 344 |
+
def callback_fn(
|
| 345 |
+
self, pipeline, step_index, timestep, callback_kwargs
|
| 346 |
+
) -> Dict[str, Any]:
|
| 347 |
+
cutoff_step_ratio = self.config.cutoff_step_ratio
|
| 348 |
+
cutoff_step_index = self.config.cutoff_step_index
|
| 349 |
+
if isinstance(self.config.encoded_prompt, tuple):
|
| 350 |
+
prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
|
| 351 |
+
else:
|
| 352 |
+
prompt_embeds = self.config.encoded_prompt
|
| 353 |
+
if isinstance(self.config.add_text_embeds, tuple):
|
| 354 |
+
add_text_embeds, negative_add_text_embeds = self.config.add_text_embeds
|
| 355 |
+
else:
|
| 356 |
+
add_text_embeds = self.config.add_text_embeds
|
| 357 |
+
if isinstance(self.config.add_time_ids, tuple):
|
| 358 |
+
add_time_ids, negative_add_time_ids = self.config.add_time_ids
|
| 359 |
+
else:
|
| 360 |
+
add_time_ids = self.config.add_time_ids
|
| 361 |
+
|
| 362 |
+
# Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
|
| 363 |
+
cutoff_step = (
|
| 364 |
+
cutoff_step_index
|
| 365 |
+
if cutoff_step_index is not None
|
| 366 |
+
else int(pipeline.num_timesteps * cutoff_step_ratio)
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
if step_index == cutoff_step:
|
| 370 |
+
if pipeline.do_classifier_free_guidance:
|
| 371 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 372 |
+
add_text_embeds = torch.cat([negative_add_text_embeds, add_text_embeds])
|
| 373 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids])
|
| 374 |
+
callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
|
| 375 |
+
callback_kwargs[self.tensor_inputs[1]] = add_text_embeds
|
| 376 |
+
callback_kwargs[self.tensor_inputs[2]] = add_time_ids
|
| 377 |
+
return callback_kwargs
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
pipeline: StableDiffusionXLPipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 381 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 382 |
+
torch_dtype=torch.float16,
|
| 383 |
+
variant="fp16",
|
| 384 |
+
use_safetensors=True,
|
| 385 |
+
).to("cuda")
|
| 386 |
+
|
| 387 |
+
callbacks = []
|
| 388 |
+
for index in range(1, 20):
|
| 389 |
+
(
|
| 390 |
+
prompt_embeds,
|
| 391 |
+
negative_prompt_embeds,
|
| 392 |
+
pooled_prompt_embeds,
|
| 393 |
+
negative_pooled_prompt_embeds,
|
| 394 |
+
) = pipeline.encode_prompt(
|
| 395 |
+
prompt=f"prompt {index}",
|
| 396 |
+
negative_prompt=f"prompt {index}",
|
| 397 |
+
device=pipeline._execution_device,
|
| 398 |
+
num_images_per_prompt=1,
|
| 399 |
+
# pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
|
| 400 |
+
do_classifier_free_guidance=True,
|
| 401 |
+
)
|
| 402 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 403 |
+
add_time_ids = pipeline._get_add_time_ids(
|
| 404 |
+
(1024, 1024),
|
| 405 |
+
(0, 0),
|
| 406 |
+
(1024, 1024),
|
| 407 |
+
dtype=prompt_embeds.dtype,
|
| 408 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 409 |
+
)
|
| 410 |
+
negative_add_time_ids = pipeline._get_add_time_ids(
|
| 411 |
+
(1024, 1024),
|
| 412 |
+
(0, 0),
|
| 413 |
+
(1024, 1024),
|
| 414 |
+
dtype=prompt_embeds.dtype,
|
| 415 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 416 |
+
)
|
| 417 |
+
callbacks.append(
|
| 418 |
+
SDXLPromptSchedulingCallback(
|
| 419 |
+
encoded_prompt=(prompt_embeds, negative_prompt_embeds),
|
| 420 |
+
add_text_embeds=(pooled_prompt_embeds, negative_pooled_prompt_embeds),
|
| 421 |
+
add_time_ids=(add_time_ids, negative_add_time_ids),
|
| 422 |
+
cutoff_step_index=index,
|
| 423 |
+
)
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
callback = MultiPipelineCallbacks(callbacks)
|
| 428 |
+
|
| 429 |
+
image = pipeline(
|
| 430 |
+
prompt="prompt",
|
| 431 |
+
negative_prompt="negative prompt",
|
| 432 |
+
callback_on_step_end=callback,
|
| 433 |
+
callback_on_step_end_tensor_inputs=[
|
| 434 |
+
"prompt_embeds",
|
| 435 |
+
"add_text_embeds",
|
| 436 |
+
"add_time_ids",
|
| 437 |
+
],
|
| 438 |
+
).images[0]
|
| 439 |
+
```
|
v0.36.0/adaptive_mask_inpainting.py
ADDED
|
@@ -0,0 +1,1469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
|
| 16 |
+
|
| 17 |
+
import inspect
|
| 18 |
+
import os
|
| 19 |
+
import shutil
|
| 20 |
+
from glob import glob
|
| 21 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 22 |
+
|
| 23 |
+
import cv2
|
| 24 |
+
import numpy as np
|
| 25 |
+
import PIL.Image
|
| 26 |
+
import requests
|
| 27 |
+
import torch
|
| 28 |
+
from detectron2.config import get_cfg
|
| 29 |
+
from detectron2.data import MetadataCatalog
|
| 30 |
+
from detectron2.engine import DefaultPredictor
|
| 31 |
+
from detectron2.projects import point_rend
|
| 32 |
+
from detectron2.structures.instances import Instances
|
| 33 |
+
from detectron2.utils.visualizer import ColorMode, Visualizer
|
| 34 |
+
from packaging import version
|
| 35 |
+
from tqdm import tqdm
|
| 36 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 37 |
+
|
| 38 |
+
from diffusers.configuration_utils import FrozenDict
|
| 39 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 40 |
+
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 41 |
+
from diffusers.models import AsymmetricAutoencoderKL, AutoencoderKL, UNet2DConditionModel
|
| 42 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 43 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 44 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 45 |
+
from diffusers.utils import (
|
| 46 |
+
deprecate,
|
| 47 |
+
is_accelerate_available,
|
| 48 |
+
is_accelerate_version,
|
| 49 |
+
logging,
|
| 50 |
+
randn_tensor,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
AMI_INSTALL_MESSAGE = """
|
| 58 |
+
|
| 59 |
+
Example Demo of Adaptive Mask Inpainting
|
| 60 |
+
|
| 61 |
+
Beyond the Contact: Discovering Comprehensive Affordance for 3D Objects from Pre-trained 2D Diffusion Models
|
| 62 |
+
Kim et al.
|
| 63 |
+
ECCV-2024 (Oral)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Please prepare the environment via
|
| 67 |
+
|
| 68 |
+
```
|
| 69 |
+
conda create --name ami python=3.9 -y
|
| 70 |
+
conda activate ami
|
| 71 |
+
|
| 72 |
+
conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=11.3 -c pytorch -c conda-forge -y
|
| 73 |
+
python -m pip install detectron2==0.6 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
|
| 74 |
+
pip install easydict
|
| 75 |
+
pip install diffusers==0.20.2 accelerate safetensors transformers
|
| 76 |
+
pip install setuptools==59.5.0
|
| 77 |
+
pip install opencv-python
|
| 78 |
+
pip install numpy==1.24.1
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
Put the code inside the root of diffusers library (e.g., as '/home/username/diffusers/adaptive_mask_inpainting_example.py') and run the python code.
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
EXAMPLE_DOC_STRING = """
|
| 91 |
+
Examples:
|
| 92 |
+
```py
|
| 93 |
+
>>> # !pip install transformers accelerate
|
| 94 |
+
>>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
|
| 95 |
+
>>> from diffusers.utils import load_image
|
| 96 |
+
>>> import numpy as np
|
| 97 |
+
>>> import torch
|
| 98 |
+
|
| 99 |
+
>>> init_image = load_image(
|
| 100 |
+
... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
|
| 101 |
+
... )
|
| 102 |
+
>>> init_image = init_image.resize((512, 512))
|
| 103 |
+
|
| 104 |
+
>>> generator = torch.Generator(device="cpu").manual_seed(1)
|
| 105 |
+
|
| 106 |
+
>>> mask_image = load_image(
|
| 107 |
+
... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
|
| 108 |
+
... )
|
| 109 |
+
>>> mask_image = mask_image.resize((512, 512))
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
>>> def make_inpaint_condition(image, image_mask):
|
| 113 |
+
... image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
|
| 114 |
+
... image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
|
| 115 |
+
|
| 116 |
+
... assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
|
| 117 |
+
... image[image_mask > 0.5] = -1.0 # set as masked pixel
|
| 118 |
+
... image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
|
| 119 |
+
... image = torch.from_numpy(image)
|
| 120 |
+
... return image
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
>>> control_image = make_inpaint_condition(init_image, mask_image)
|
| 124 |
+
|
| 125 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 126 |
+
... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
|
| 127 |
+
... )
|
| 128 |
+
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 129 |
+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 130 |
+
... )
|
| 131 |
+
|
| 132 |
+
>>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
| 133 |
+
>>> pipe.enable_model_cpu_offload()
|
| 134 |
+
|
| 135 |
+
>>> # generate image
|
| 136 |
+
>>> image = pipe(
|
| 137 |
+
... "a handsome man with ray-ban sunglasses",
|
| 138 |
+
... num_inference_steps=20,
|
| 139 |
+
... generator=generator,
|
| 140 |
+
... eta=1.0,
|
| 141 |
+
... image=init_image,
|
| 142 |
+
... mask_image=mask_image,
|
| 143 |
+
... control_image=control_image,
|
| 144 |
+
... ).images[0]
|
| 145 |
+
```
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def download_file(url, output_file, exist_ok: bool):
|
| 150 |
+
if exist_ok and os.path.exists(output_file):
|
| 151 |
+
return
|
| 152 |
+
|
| 153 |
+
response = requests.get(url, stream=True)
|
| 154 |
+
|
| 155 |
+
with open(output_file, "wb") as file:
|
| 156 |
+
for chunk in tqdm(response.iter_content(chunk_size=8192), desc=f"Downloading '{output_file}'..."):
|
| 157 |
+
if chunk:
|
| 158 |
+
file.write(chunk)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def generate_video_from_imgs(images_save_directory, fps=15.0, delete_dir=True):
|
| 162 |
+
# delete videos if exists
|
| 163 |
+
if os.path.exists(f"{images_save_directory}.mp4"):
|
| 164 |
+
os.remove(f"{images_save_directory}.mp4")
|
| 165 |
+
if os.path.exists(f"{images_save_directory}_before_process.mp4"):
|
| 166 |
+
os.remove(f"{images_save_directory}_before_process.mp4")
|
| 167 |
+
|
| 168 |
+
# assume there are "enumerated" images under "images_save_directory"
|
| 169 |
+
assert os.path.isdir(images_save_directory)
|
| 170 |
+
ImgPaths = sorted(glob(f"{images_save_directory}/*"))
|
| 171 |
+
|
| 172 |
+
if len(ImgPaths) == 0:
|
| 173 |
+
print("\tSkipping, since there must be at least one image to create mp4\n")
|
| 174 |
+
else:
|
| 175 |
+
# mp4 configuration
|
| 176 |
+
video_path = images_save_directory + "_before_process.mp4"
|
| 177 |
+
|
| 178 |
+
# Get height and width config
|
| 179 |
+
images = sorted([ImgPath.split("/")[-1] for ImgPath in ImgPaths if ImgPath.endswith(".png")])
|
| 180 |
+
frame = cv2.imread(os.path.join(images_save_directory, images[0]))
|
| 181 |
+
height, width, channels = frame.shape
|
| 182 |
+
|
| 183 |
+
# create mp4 video writer
|
| 184 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
| 185 |
+
video = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
|
| 186 |
+
for image in images:
|
| 187 |
+
video.write(cv2.imread(os.path.join(images_save_directory, image)))
|
| 188 |
+
cv2.destroyAllWindows()
|
| 189 |
+
video.release()
|
| 190 |
+
|
| 191 |
+
# generated video is not compatible with HTML5. Post-process and change codec of video, so that it is applicable to HTML.
|
| 192 |
+
os.system(
|
| 193 |
+
f'ffmpeg -i "{images_save_directory}_before_process.mp4" -vcodec libx264 -f mp4 "{images_save_directory}.mp4" '
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
# remove group of images, and remove video before post-process.
|
| 197 |
+
if delete_dir and os.path.exists(images_save_directory):
|
| 198 |
+
shutil.rmtree(images_save_directory)
|
| 199 |
+
# remove 'before-process' video
|
| 200 |
+
if os.path.exists(f"{images_save_directory}_before_process.mp4"):
|
| 201 |
+
os.remove(f"{images_save_directory}_before_process.mp4")
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image
|
| 205 |
+
def prepare_mask_and_masked_image(image, mask, height, width, return_image=False):
|
| 206 |
+
"""
|
| 207 |
+
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
|
| 208 |
+
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
|
| 209 |
+
``image`` and ``1`` for the ``mask``.
|
| 210 |
+
|
| 211 |
+
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
|
| 212 |
+
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
|
| 216 |
+
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
|
| 217 |
+
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
|
| 218 |
+
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
|
| 219 |
+
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
|
| 220 |
+
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
Raises:
|
| 224 |
+
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
|
| 225 |
+
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
|
| 226 |
+
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
|
| 227 |
+
(ot the other way around).
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
|
| 231 |
+
dimensions: ``batch x channels x height x width``.
|
| 232 |
+
"""
|
| 233 |
+
|
| 234 |
+
if image is None:
|
| 235 |
+
raise ValueError("`image` input cannot be undefined.")
|
| 236 |
+
|
| 237 |
+
if mask is None:
|
| 238 |
+
raise ValueError("`mask_image` input cannot be undefined.")
|
| 239 |
+
|
| 240 |
+
if isinstance(image, torch.Tensor):
|
| 241 |
+
if not isinstance(mask, torch.Tensor):
|
| 242 |
+
raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
|
| 243 |
+
|
| 244 |
+
# Batch single image
|
| 245 |
+
if image.ndim == 3:
|
| 246 |
+
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
|
| 247 |
+
image = image.unsqueeze(0)
|
| 248 |
+
|
| 249 |
+
# Batch and add channel dim for single mask
|
| 250 |
+
if mask.ndim == 2:
|
| 251 |
+
mask = mask.unsqueeze(0).unsqueeze(0)
|
| 252 |
+
|
| 253 |
+
# Batch single mask or add channel dim
|
| 254 |
+
if mask.ndim == 3:
|
| 255 |
+
# Single batched mask, no channel dim or single mask not batched but channel dim
|
| 256 |
+
if mask.shape[0] == 1:
|
| 257 |
+
mask = mask.unsqueeze(0)
|
| 258 |
+
|
| 259 |
+
# Batched masks no channel dim
|
| 260 |
+
else:
|
| 261 |
+
mask = mask.unsqueeze(1)
|
| 262 |
+
|
| 263 |
+
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
|
| 264 |
+
assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
|
| 265 |
+
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
|
| 266 |
+
|
| 267 |
+
# Check image is in [-1, 1]
|
| 268 |
+
if image.min() < -1 or image.max() > 1:
|
| 269 |
+
raise ValueError("Image should be in [-1, 1] range")
|
| 270 |
+
|
| 271 |
+
# Check mask is in [0, 1]
|
| 272 |
+
if mask.min() < 0 or mask.max() > 1:
|
| 273 |
+
raise ValueError("Mask should be in [0, 1] range")
|
| 274 |
+
|
| 275 |
+
# Binarize mask
|
| 276 |
+
mask[mask < 0.5] = 0
|
| 277 |
+
mask[mask >= 0.5] = 1
|
| 278 |
+
|
| 279 |
+
# Image as float32
|
| 280 |
+
image = image.to(dtype=torch.float32)
|
| 281 |
+
elif isinstance(mask, torch.Tensor):
|
| 282 |
+
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
|
| 283 |
+
else:
|
| 284 |
+
# preprocess image
|
| 285 |
+
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
| 286 |
+
image = [image]
|
| 287 |
+
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
| 288 |
+
# resize all images w.r.t passed height an width
|
| 289 |
+
image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
|
| 290 |
+
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
| 291 |
+
image = np.concatenate(image, axis=0)
|
| 292 |
+
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
| 293 |
+
image = np.concatenate([i[None, :] for i in image], axis=0)
|
| 294 |
+
|
| 295 |
+
image = image.transpose(0, 3, 1, 2)
|
| 296 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 297 |
+
|
| 298 |
+
# preprocess mask
|
| 299 |
+
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
|
| 300 |
+
mask = [mask]
|
| 301 |
+
|
| 302 |
+
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
|
| 303 |
+
mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
|
| 304 |
+
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
|
| 305 |
+
mask = mask.astype(np.float32) / 255.0
|
| 306 |
+
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
|
| 307 |
+
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
|
| 308 |
+
|
| 309 |
+
mask[mask < 0.5] = 0
|
| 310 |
+
mask[mask >= 0.5] = 1
|
| 311 |
+
mask = torch.from_numpy(mask)
|
| 312 |
+
|
| 313 |
+
masked_image = image * (mask < 0.5)
|
| 314 |
+
|
| 315 |
+
# n.b. ensure backwards compatibility as old function does not return image
|
| 316 |
+
if return_image:
|
| 317 |
+
return mask, masked_image, image
|
| 318 |
+
|
| 319 |
+
return mask, masked_image
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
class AdaptiveMaskInpaintPipeline(
|
| 323 |
+
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
|
| 324 |
+
):
|
| 325 |
+
r"""
|
| 326 |
+
Pipeline for text-guided image inpainting using Stable Diffusion.
|
| 327 |
+
|
| 328 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 329 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 330 |
+
|
| 331 |
+
The pipeline also inherits the following loading methods:
|
| 332 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 333 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 334 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]):
|
| 338 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 339 |
+
text_encoder ([`CLIPTextModel`]):
|
| 340 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 341 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 342 |
+
A `CLIPTokenizer` to tokenize text.
|
| 343 |
+
unet ([`UNet2DConditionModel`]):
|
| 344 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 345 |
+
scheduler ([`SchedulerMixin`]):
|
| 346 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 347 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 348 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 349 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 350 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 351 |
+
about a model's potential harms.
|
| 352 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 353 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 357 |
+
|
| 358 |
+
def __init__(
|
| 359 |
+
self,
|
| 360 |
+
vae: Union[AutoencoderKL, AsymmetricAutoencoderKL],
|
| 361 |
+
text_encoder: CLIPTextModel,
|
| 362 |
+
tokenizer: CLIPTokenizer,
|
| 363 |
+
unet: UNet2DConditionModel,
|
| 364 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 365 |
+
# safety_checker: StableDiffusionSafetyChecker,
|
| 366 |
+
safety_checker,
|
| 367 |
+
feature_extractor: CLIPImageProcessor,
|
| 368 |
+
requires_safety_checker: bool = True,
|
| 369 |
+
):
|
| 370 |
+
super().__init__()
|
| 371 |
+
|
| 372 |
+
self.register_adaptive_mask_model()
|
| 373 |
+
self.register_adaptive_mask_settings()
|
| 374 |
+
|
| 375 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 376 |
+
deprecation_message = (
|
| 377 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 378 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 379 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 380 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 381 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 382 |
+
" file"
|
| 383 |
+
)
|
| 384 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 385 |
+
new_config = dict(scheduler.config)
|
| 386 |
+
new_config["steps_offset"] = 1
|
| 387 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 388 |
+
|
| 389 |
+
if scheduler is not None and getattr(scheduler.config, "skip_prk_steps", True) is False:
|
| 390 |
+
deprecation_message = (
|
| 391 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
|
| 392 |
+
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
|
| 393 |
+
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
|
| 394 |
+
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
|
| 395 |
+
" Hub, it would be very nice if you could open a Pull request for the"
|
| 396 |
+
" `scheduler/scheduler_config.json` file"
|
| 397 |
+
)
|
| 398 |
+
deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 399 |
+
new_config = dict(scheduler.config)
|
| 400 |
+
new_config["skip_prk_steps"] = True
|
| 401 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 402 |
+
|
| 403 |
+
if safety_checker is None and requires_safety_checker:
|
| 404 |
+
logger.warning(
|
| 405 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 406 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 407 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 408 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 409 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 410 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
if safety_checker is not None and feature_extractor is None:
|
| 414 |
+
raise ValueError(
|
| 415 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 416 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
is_unet_version_less_0_9_0 = (
|
| 420 |
+
unet is not None
|
| 421 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 422 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 423 |
+
)
|
| 424 |
+
is_unet_sample_size_less_64 = (
|
| 425 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 426 |
+
)
|
| 427 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 428 |
+
deprecation_message = (
|
| 429 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 430 |
+
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 431 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 432 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 433 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 434 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 435 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 436 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 437 |
+
" the `unet/config.json` file"
|
| 438 |
+
)
|
| 439 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 440 |
+
new_config = dict(unet.config)
|
| 441 |
+
new_config["sample_size"] = 64
|
| 442 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 443 |
+
|
| 444 |
+
# Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
|
| 445 |
+
if unet is not None and unet.config.in_channels != 9:
|
| 446 |
+
logger.info(f"You have loaded a UNet with {unet.config.in_channels} input channels which.")
|
| 447 |
+
|
| 448 |
+
self.register_modules(
|
| 449 |
+
vae=vae,
|
| 450 |
+
text_encoder=text_encoder,
|
| 451 |
+
tokenizer=tokenizer,
|
| 452 |
+
unet=unet,
|
| 453 |
+
scheduler=scheduler,
|
| 454 |
+
safety_checker=safety_checker,
|
| 455 |
+
feature_extractor=feature_extractor,
|
| 456 |
+
)
|
| 457 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 458 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 459 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 460 |
+
|
| 461 |
+
""" Preparation for Adaptive Mask inpainting """
|
| 462 |
+
|
| 463 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
|
| 464 |
+
def enable_model_cpu_offload(self, gpu_id=0):
|
| 465 |
+
r"""
|
| 466 |
+
Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
|
| 467 |
+
time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
|
| 468 |
+
Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
|
| 469 |
+
iterative execution of the `unet`.
|
| 470 |
+
"""
|
| 471 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
| 472 |
+
from accelerate import cpu_offload_with_hook
|
| 473 |
+
else:
|
| 474 |
+
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
| 475 |
+
|
| 476 |
+
device = torch.device(f"cuda:{gpu_id}")
|
| 477 |
+
|
| 478 |
+
if self.device.type != "cpu":
|
| 479 |
+
self.to("cpu", silence_dtype_warnings=True)
|
| 480 |
+
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
| 481 |
+
|
| 482 |
+
hook = None
|
| 483 |
+
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
| 484 |
+
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
| 485 |
+
|
| 486 |
+
if self.safety_checker is not None:
|
| 487 |
+
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
| 488 |
+
|
| 489 |
+
# We'll offload the last model manually.
|
| 490 |
+
self.final_offload_hook = hook
|
| 491 |
+
|
| 492 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 493 |
+
def _encode_prompt(
|
| 494 |
+
self,
|
| 495 |
+
prompt,
|
| 496 |
+
device,
|
| 497 |
+
num_images_per_prompt,
|
| 498 |
+
do_classifier_free_guidance,
|
| 499 |
+
negative_prompt=None,
|
| 500 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 501 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 502 |
+
lora_scale: Optional[float] = None,
|
| 503 |
+
):
|
| 504 |
+
r"""
|
| 505 |
+
Encodes the prompt into text encoder hidden states.
|
| 506 |
+
|
| 507 |
+
Args:
|
| 508 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 509 |
+
prompt to be encoded
|
| 510 |
+
device: (`torch.device`):
|
| 511 |
+
torch device
|
| 512 |
+
num_images_per_prompt (`int`):
|
| 513 |
+
number of images that should be generated per prompt
|
| 514 |
+
do_classifier_free_guidance (`bool`):
|
| 515 |
+
whether to use classifier free guidance or not
|
| 516 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 517 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 518 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 519 |
+
less than `1`).
|
| 520 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 521 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 522 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 523 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 524 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 525 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 526 |
+
argument.
|
| 527 |
+
lora_scale (`float`, *optional*):
|
| 528 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 529 |
+
"""
|
| 530 |
+
# set lora scale so that monkey patched LoRA
|
| 531 |
+
# function of text encoder can correctly access it
|
| 532 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 533 |
+
self._lora_scale = lora_scale
|
| 534 |
+
|
| 535 |
+
if prompt is not None and isinstance(prompt, str):
|
| 536 |
+
batch_size = 1
|
| 537 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 538 |
+
batch_size = len(prompt)
|
| 539 |
+
else:
|
| 540 |
+
batch_size = prompt_embeds.shape[0]
|
| 541 |
+
|
| 542 |
+
if prompt_embeds is None:
|
| 543 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 544 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 545 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 546 |
+
|
| 547 |
+
text_inputs = self.tokenizer(
|
| 548 |
+
prompt,
|
| 549 |
+
padding="max_length",
|
| 550 |
+
max_length=self.tokenizer.model_max_length,
|
| 551 |
+
truncation=True,
|
| 552 |
+
return_tensors="pt",
|
| 553 |
+
)
|
| 554 |
+
text_input_ids = text_inputs.input_ids
|
| 555 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 556 |
+
|
| 557 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 558 |
+
text_input_ids, untruncated_ids
|
| 559 |
+
):
|
| 560 |
+
removed_text = self.tokenizer.batch_decode(
|
| 561 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 562 |
+
)
|
| 563 |
+
logger.warning(
|
| 564 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 565 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 569 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 570 |
+
else:
|
| 571 |
+
attention_mask = None
|
| 572 |
+
|
| 573 |
+
prompt_embeds = self.text_encoder(
|
| 574 |
+
text_input_ids.to(device),
|
| 575 |
+
attention_mask=attention_mask,
|
| 576 |
+
)
|
| 577 |
+
prompt_embeds = prompt_embeds[0]
|
| 578 |
+
|
| 579 |
+
if self.text_encoder is not None:
|
| 580 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 581 |
+
elif self.unet is not None:
|
| 582 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 583 |
+
else:
|
| 584 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 585 |
+
|
| 586 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 587 |
+
|
| 588 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 589 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 590 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 591 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 592 |
+
|
| 593 |
+
# get unconditional embeddings for classifier free guidance
|
| 594 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 595 |
+
uncond_tokens: List[str]
|
| 596 |
+
if negative_prompt is None:
|
| 597 |
+
uncond_tokens = [""] * batch_size
|
| 598 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 599 |
+
raise TypeError(
|
| 600 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 601 |
+
f" {type(prompt)}."
|
| 602 |
+
)
|
| 603 |
+
elif isinstance(negative_prompt, str):
|
| 604 |
+
uncond_tokens = [negative_prompt]
|
| 605 |
+
elif batch_size != len(negative_prompt):
|
| 606 |
+
raise ValueError(
|
| 607 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 608 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 609 |
+
" the batch size of `prompt`."
|
| 610 |
+
)
|
| 611 |
+
else:
|
| 612 |
+
uncond_tokens = negative_prompt
|
| 613 |
+
|
| 614 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 615 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 616 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 617 |
+
|
| 618 |
+
max_length = prompt_embeds.shape[1]
|
| 619 |
+
uncond_input = self.tokenizer(
|
| 620 |
+
uncond_tokens,
|
| 621 |
+
padding="max_length",
|
| 622 |
+
max_length=max_length,
|
| 623 |
+
truncation=True,
|
| 624 |
+
return_tensors="pt",
|
| 625 |
+
)
|
| 626 |
+
|
| 627 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 628 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 629 |
+
else:
|
| 630 |
+
attention_mask = None
|
| 631 |
+
|
| 632 |
+
negative_prompt_embeds = self.text_encoder(
|
| 633 |
+
uncond_input.input_ids.to(device),
|
| 634 |
+
attention_mask=attention_mask,
|
| 635 |
+
)
|
| 636 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 637 |
+
|
| 638 |
+
if do_classifier_free_guidance:
|
| 639 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 640 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 641 |
+
|
| 642 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 643 |
+
|
| 644 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 645 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 646 |
+
|
| 647 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 648 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 649 |
+
# to avoid doing two forward passes
|
| 650 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 651 |
+
|
| 652 |
+
return prompt_embeds
|
| 653 |
+
|
| 654 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 655 |
+
def run_safety_checker(self, image, device, dtype):
|
| 656 |
+
if self.safety_checker is None:
|
| 657 |
+
has_nsfw_concept = None
|
| 658 |
+
else:
|
| 659 |
+
if torch.is_tensor(image):
|
| 660 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 661 |
+
else:
|
| 662 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 663 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 664 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 665 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 666 |
+
)
|
| 667 |
+
return image, has_nsfw_concept
|
| 668 |
+
|
| 669 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 670 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 671 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 672 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 673 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 674 |
+
# and should be between [0, 1]
|
| 675 |
+
|
| 676 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 677 |
+
extra_step_kwargs = {}
|
| 678 |
+
if accepts_eta:
|
| 679 |
+
extra_step_kwargs["eta"] = eta
|
| 680 |
+
|
| 681 |
+
# check if the scheduler accepts generator
|
| 682 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 683 |
+
if accepts_generator:
|
| 684 |
+
extra_step_kwargs["generator"] = generator
|
| 685 |
+
return extra_step_kwargs
|
| 686 |
+
|
| 687 |
+
def check_inputs(
|
| 688 |
+
self,
|
| 689 |
+
prompt,
|
| 690 |
+
height,
|
| 691 |
+
width,
|
| 692 |
+
strength,
|
| 693 |
+
callback_steps,
|
| 694 |
+
negative_prompt=None,
|
| 695 |
+
prompt_embeds=None,
|
| 696 |
+
negative_prompt_embeds=None,
|
| 697 |
+
):
|
| 698 |
+
if strength < 0 or strength > 1:
|
| 699 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 700 |
+
|
| 701 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 702 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 703 |
+
|
| 704 |
+
if (callback_steps is None) or (
|
| 705 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 706 |
+
):
|
| 707 |
+
raise ValueError(
|
| 708 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 709 |
+
f" {type(callback_steps)}."
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
if prompt is not None and prompt_embeds is not None:
|
| 713 |
+
raise ValueError(
|
| 714 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 715 |
+
" only forward one of the two."
|
| 716 |
+
)
|
| 717 |
+
elif prompt is None and prompt_embeds is None:
|
| 718 |
+
raise ValueError(
|
| 719 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 720 |
+
)
|
| 721 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 722 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 723 |
+
|
| 724 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 725 |
+
raise ValueError(
|
| 726 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 727 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 731 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 732 |
+
raise ValueError(
|
| 733 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 734 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 735 |
+
f" {negative_prompt_embeds.shape}."
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
def prepare_latents(
|
| 739 |
+
self,
|
| 740 |
+
batch_size,
|
| 741 |
+
num_channels_latents,
|
| 742 |
+
height,
|
| 743 |
+
width,
|
| 744 |
+
dtype,
|
| 745 |
+
device,
|
| 746 |
+
generator,
|
| 747 |
+
latents=None,
|
| 748 |
+
image=None,
|
| 749 |
+
timestep=None,
|
| 750 |
+
is_strength_max=True,
|
| 751 |
+
return_noise=False,
|
| 752 |
+
return_image_latents=False,
|
| 753 |
+
):
|
| 754 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 755 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 756 |
+
raise ValueError(
|
| 757 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 758 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
if (image is None or timestep is None) and not is_strength_max:
|
| 762 |
+
raise ValueError(
|
| 763 |
+
"Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
|
| 764 |
+
"However, either the image or the noise timestep has not been provided."
|
| 765 |
+
)
|
| 766 |
+
|
| 767 |
+
if return_image_latents or (latents is None and not is_strength_max):
|
| 768 |
+
image = image.to(device=device, dtype=dtype)
|
| 769 |
+
image_latents = self._encode_vae_image(image=image, generator=generator)
|
| 770 |
+
|
| 771 |
+
if latents is None:
|
| 772 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 773 |
+
# if strength is 1. then initialise the latents to noise, else initial to image + noise
|
| 774 |
+
latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
|
| 775 |
+
# if pure noise then scale the initial latents by the Scheduler's init sigma
|
| 776 |
+
latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
|
| 777 |
+
else:
|
| 778 |
+
noise = latents.to(device)
|
| 779 |
+
latents = noise * self.scheduler.init_noise_sigma
|
| 780 |
+
|
| 781 |
+
outputs = (latents,)
|
| 782 |
+
|
| 783 |
+
if return_noise:
|
| 784 |
+
outputs += (noise,)
|
| 785 |
+
|
| 786 |
+
if return_image_latents:
|
| 787 |
+
outputs += (image_latents,)
|
| 788 |
+
|
| 789 |
+
return outputs
|
| 790 |
+
|
| 791 |
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
| 792 |
+
if isinstance(generator, list):
|
| 793 |
+
image_latents = [
|
| 794 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
|
| 795 |
+
for i in range(image.shape[0])
|
| 796 |
+
]
|
| 797 |
+
image_latents = torch.cat(image_latents, dim=0)
|
| 798 |
+
else:
|
| 799 |
+
image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
|
| 800 |
+
|
| 801 |
+
image_latents = self.vae.config.scaling_factor * image_latents
|
| 802 |
+
|
| 803 |
+
return image_latents
|
| 804 |
+
|
| 805 |
+
def prepare_mask_latents(
|
| 806 |
+
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
|
| 807 |
+
):
|
| 808 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 809 |
+
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
| 810 |
+
# and half precision
|
| 811 |
+
mask = torch.nn.functional.interpolate(
|
| 812 |
+
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 813 |
+
)
|
| 814 |
+
mask = mask.to(device=device, dtype=dtype)
|
| 815 |
+
|
| 816 |
+
masked_image = masked_image.to(device=device, dtype=dtype)
|
| 817 |
+
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
|
| 818 |
+
|
| 819 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 820 |
+
if mask.shape[0] < batch_size:
|
| 821 |
+
if not batch_size % mask.shape[0] == 0:
|
| 822 |
+
raise ValueError(
|
| 823 |
+
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
| 824 |
+
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
| 825 |
+
" of masks that you pass is divisible by the total requested batch size."
|
| 826 |
+
)
|
| 827 |
+
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
| 828 |
+
if masked_image_latents.shape[0] < batch_size:
|
| 829 |
+
if not batch_size % masked_image_latents.shape[0] == 0:
|
| 830 |
+
raise ValueError(
|
| 831 |
+
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
| 832 |
+
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
| 833 |
+
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
| 834 |
+
)
|
| 835 |
+
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
|
| 836 |
+
|
| 837 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 838 |
+
masked_image_latents = (
|
| 839 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 840 |
+
)
|
| 841 |
+
|
| 842 |
+
# aligning device to prevent device errors when concating it with the latent model input
|
| 843 |
+
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
| 844 |
+
return mask, masked_image_latents
|
| 845 |
+
|
| 846 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 847 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 848 |
+
# get the original timestep using init_timestep
|
| 849 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 850 |
+
|
| 851 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 852 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 853 |
+
|
| 854 |
+
return timesteps, num_inference_steps - t_start
|
| 855 |
+
|
| 856 |
+
@torch.no_grad()
|
| 857 |
+
def __call__(
|
| 858 |
+
self,
|
| 859 |
+
prompt: Union[str, List[str]] = None,
|
| 860 |
+
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 861 |
+
default_mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
| 862 |
+
height: Optional[int] = None,
|
| 863 |
+
width: Optional[int] = None,
|
| 864 |
+
strength: float = 1.0,
|
| 865 |
+
num_inference_steps: int = 50,
|
| 866 |
+
guidance_scale: float = 7.5,
|
| 867 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 868 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 869 |
+
eta: float = 0.0,
|
| 870 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 871 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 872 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 873 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 874 |
+
output_type: Optional[str] = "pil",
|
| 875 |
+
return_dict: bool = True,
|
| 876 |
+
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
| 877 |
+
callback_steps: int = 1,
|
| 878 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 879 |
+
use_adaptive_mask: bool = True,
|
| 880 |
+
enforce_full_mask_ratio: float = 0.5,
|
| 881 |
+
human_detection_thres: float = 0.008,
|
| 882 |
+
visualization_save_dir: str = None,
|
| 883 |
+
):
|
| 884 |
+
r"""
|
| 885 |
+
The call function to the pipeline for generation.
|
| 886 |
+
|
| 887 |
+
Args:
|
| 888 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 889 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 890 |
+
image (`PIL.Image.Image`):
|
| 891 |
+
`Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked
|
| 892 |
+
out with `default_mask_image` and repainted according to `prompt`).
|
| 893 |
+
default_mask_image (`PIL.Image.Image`):
|
| 894 |
+
`Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted
|
| 895 |
+
while black pixels are preserved. If `default_mask_image` is a PIL image, it is converted to a single channel
|
| 896 |
+
(luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
|
| 897 |
+
expected shape would be `(B, H, W, 1)`.
|
| 898 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 899 |
+
The height in pixels of the generated image.
|
| 900 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 901 |
+
The width in pixels of the generated image.
|
| 902 |
+
strength (`float`, *optional*, defaults to 1.0):
|
| 903 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 904 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 905 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 906 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 907 |
+
essentially ignores `image`.
|
| 908 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 909 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 910 |
+
expense of slower inference. This parameter is modulated by `strength`.
|
| 911 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 912 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 913 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 914 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 915 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 916 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 917 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 918 |
+
The number of images to generate per prompt.
|
| 919 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 920 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 921 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 922 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 923 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 924 |
+
generation deterministic.
|
| 925 |
+
latents (`torch.FloatTensor`, *optional*):
|
| 926 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 927 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 928 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 929 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 930 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 931 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 932 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 933 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 934 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 935 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 936 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 937 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 938 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 939 |
+
plain tuple.
|
| 940 |
+
callback (`Callable`, *optional*):
|
| 941 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 942 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 943 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 944 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 945 |
+
every step.
|
| 946 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 947 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 948 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 949 |
+
|
| 950 |
+
Examples:
|
| 951 |
+
|
| 952 |
+
```py
|
| 953 |
+
>>> import PIL
|
| 954 |
+
>>> import requests
|
| 955 |
+
>>> import torch
|
| 956 |
+
>>> from io import BytesIO
|
| 957 |
+
|
| 958 |
+
>>> from diffusers import AdaptiveMaskInpaintPipeline
|
| 959 |
+
|
| 960 |
+
|
| 961 |
+
>>> def download_image(url):
|
| 962 |
+
... response = requests.get(url)
|
| 963 |
+
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
| 964 |
+
|
| 965 |
+
|
| 966 |
+
>>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
| 967 |
+
>>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
| 968 |
+
|
| 969 |
+
>>> init_image = download_image(img_url).resize((512, 512))
|
| 970 |
+
>>> default_mask_image = download_image(mask_url).resize((512, 512))
|
| 971 |
+
|
| 972 |
+
>>> pipe = AdaptiveMaskInpaintPipeline.from_pretrained(
|
| 973 |
+
... "stable-diffusion-v1-5/stable-diffusion-inpainting", torch_dtype=torch.float16
|
| 974 |
+
... )
|
| 975 |
+
>>> pipe = pipe.to("cuda")
|
| 976 |
+
|
| 977 |
+
>>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
|
| 978 |
+
>>> image = pipe(prompt=prompt, image=init_image, default_mask_image=default_mask_image).images[0]
|
| 979 |
+
```
|
| 980 |
+
|
| 981 |
+
Returns:
|
| 982 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 983 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 984 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 985 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 986 |
+
"not-safe-for-work" (nsfw) content.
|
| 987 |
+
"""
|
| 988 |
+
# 0. Default height and width to unet
|
| 989 |
+
width, height = image.size
|
| 990 |
+
# height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 991 |
+
# width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 992 |
+
|
| 993 |
+
# 1. Check inputs
|
| 994 |
+
self.check_inputs(
|
| 995 |
+
prompt,
|
| 996 |
+
height,
|
| 997 |
+
width,
|
| 998 |
+
strength,
|
| 999 |
+
callback_steps,
|
| 1000 |
+
negative_prompt,
|
| 1001 |
+
prompt_embeds,
|
| 1002 |
+
negative_prompt_embeds,
|
| 1003 |
+
)
|
| 1004 |
+
|
| 1005 |
+
# 2. Define call parameters
|
| 1006 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1007 |
+
batch_size = 1
|
| 1008 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1009 |
+
batch_size = len(prompt)
|
| 1010 |
+
else:
|
| 1011 |
+
batch_size = prompt_embeds.shape[0]
|
| 1012 |
+
|
| 1013 |
+
device = self._execution_device
|
| 1014 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1015 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 1016 |
+
# corresponds to doing no classifier free guidance.
|
| 1017 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 1018 |
+
|
| 1019 |
+
# 3. Encode input prompt
|
| 1020 |
+
text_encoder_lora_scale = (
|
| 1021 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 1022 |
+
)
|
| 1023 |
+
prompt_embeds = self._encode_prompt(
|
| 1024 |
+
prompt,
|
| 1025 |
+
device,
|
| 1026 |
+
num_images_per_prompt,
|
| 1027 |
+
do_classifier_free_guidance,
|
| 1028 |
+
negative_prompt,
|
| 1029 |
+
prompt_embeds=prompt_embeds,
|
| 1030 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1031 |
+
lora_scale=text_encoder_lora_scale,
|
| 1032 |
+
)
|
| 1033 |
+
|
| 1034 |
+
# 4. set timesteps
|
| 1035 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1036 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 1037 |
+
num_inference_steps=num_inference_steps, strength=strength, device=device
|
| 1038 |
+
)
|
| 1039 |
+
# check that number of inference steps is not < 1 - as this doesn't make sense
|
| 1040 |
+
if num_inference_steps < 1:
|
| 1041 |
+
raise ValueError(
|
| 1042 |
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
| 1043 |
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
| 1044 |
+
)
|
| 1045 |
+
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
|
| 1046 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1047 |
+
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
|
| 1048 |
+
is_strength_max = strength == 1.0
|
| 1049 |
+
|
| 1050 |
+
# 5. Preprocess mask and image (will be used later, once again)
|
| 1051 |
+
mask, masked_image, init_image = prepare_mask_and_masked_image(
|
| 1052 |
+
image, default_mask_image, height, width, return_image=True
|
| 1053 |
+
)
|
| 1054 |
+
default_mask_image_np = np.array(default_mask_image).astype(np.uint8) / 255
|
| 1055 |
+
mask_condition = mask.clone()
|
| 1056 |
+
|
| 1057 |
+
# 6. Prepare latent variables
|
| 1058 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 1059 |
+
num_channels_unet = self.unet.config.in_channels
|
| 1060 |
+
return_image_latents = num_channels_unet == 4
|
| 1061 |
+
|
| 1062 |
+
latents_outputs = self.prepare_latents(
|
| 1063 |
+
batch_size * num_images_per_prompt,
|
| 1064 |
+
num_channels_latents,
|
| 1065 |
+
height,
|
| 1066 |
+
width,
|
| 1067 |
+
prompt_embeds.dtype,
|
| 1068 |
+
device,
|
| 1069 |
+
generator,
|
| 1070 |
+
latents,
|
| 1071 |
+
image=init_image,
|
| 1072 |
+
timestep=latent_timestep,
|
| 1073 |
+
is_strength_max=is_strength_max,
|
| 1074 |
+
return_noise=True,
|
| 1075 |
+
return_image_latents=return_image_latents,
|
| 1076 |
+
)
|
| 1077 |
+
|
| 1078 |
+
if return_image_latents:
|
| 1079 |
+
latents, noise, image_latents = latents_outputs
|
| 1080 |
+
else:
|
| 1081 |
+
latents, noise = latents_outputs
|
| 1082 |
+
|
| 1083 |
+
# 7. Prepare mask latent variables
|
| 1084 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 1085 |
+
mask,
|
| 1086 |
+
masked_image,
|
| 1087 |
+
batch_size * num_images_per_prompt,
|
| 1088 |
+
height,
|
| 1089 |
+
width,
|
| 1090 |
+
prompt_embeds.dtype,
|
| 1091 |
+
device,
|
| 1092 |
+
generator,
|
| 1093 |
+
do_classifier_free_guidance,
|
| 1094 |
+
)
|
| 1095 |
+
|
| 1096 |
+
# 8. Check that sizes of mask, masked image and latents match
|
| 1097 |
+
if num_channels_unet == 9:
|
| 1098 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1099 |
+
num_channels_mask = mask.shape[1]
|
| 1100 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1101 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 1102 |
+
raise ValueError(
|
| 1103 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 1104 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 1105 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 1106 |
+
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
|
| 1107 |
+
" `pipeline.unet` or your `default_mask_image` or `image` input."
|
| 1108 |
+
)
|
| 1109 |
+
elif num_channels_unet != 4:
|
| 1110 |
+
raise ValueError(
|
| 1111 |
+
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
|
| 1112 |
+
)
|
| 1113 |
+
|
| 1114 |
+
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1115 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1116 |
+
|
| 1117 |
+
# 10. Denoising loop
|
| 1118 |
+
mask_image_np = default_mask_image_np
|
| 1119 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1120 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1121 |
+
for i, t in enumerate(timesteps):
|
| 1122 |
+
# expand the latents if we are doing classifier free guidance
|
| 1123 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1124 |
+
|
| 1125 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 1126 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1127 |
+
|
| 1128 |
+
if num_channels_unet == 9:
|
| 1129 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 1130 |
+
else:
|
| 1131 |
+
raise NotImplementedError
|
| 1132 |
+
|
| 1133 |
+
# predict the noise residual
|
| 1134 |
+
noise_pred = self.unet(
|
| 1135 |
+
latent_model_input,
|
| 1136 |
+
t,
|
| 1137 |
+
encoder_hidden_states=prompt_embeds,
|
| 1138 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1139 |
+
return_dict=False,
|
| 1140 |
+
)[0]
|
| 1141 |
+
|
| 1142 |
+
# perform guidance
|
| 1143 |
+
if do_classifier_free_guidance:
|
| 1144 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1145 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1146 |
+
|
| 1147 |
+
# compute the previous noisy sample x_t -> x_t-1 & predicted original sample x_0
|
| 1148 |
+
outputs = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=True)
|
| 1149 |
+
latents = outputs["prev_sample"] # x_t-1
|
| 1150 |
+
pred_orig_latents = outputs["pred_original_sample"] # x_0
|
| 1151 |
+
|
| 1152 |
+
# run segmentation
|
| 1153 |
+
if use_adaptive_mask:
|
| 1154 |
+
if enforce_full_mask_ratio > 0.0:
|
| 1155 |
+
use_default_mask = t < self.scheduler.config.num_train_timesteps * enforce_full_mask_ratio
|
| 1156 |
+
elif enforce_full_mask_ratio == 0.0:
|
| 1157 |
+
use_default_mask = False
|
| 1158 |
+
else:
|
| 1159 |
+
raise NotImplementedError
|
| 1160 |
+
|
| 1161 |
+
pred_orig_image = self.decode_to_npuint8_image(pred_orig_latents)
|
| 1162 |
+
dilate_num = self.adaptive_mask_settings.dilate_scheduler(i)
|
| 1163 |
+
do_adapt_mask = self.adaptive_mask_settings.provoke_scheduler(i)
|
| 1164 |
+
if do_adapt_mask:
|
| 1165 |
+
mask, masked_image_latents, mask_image_np, vis_np = self.adapt_mask(
|
| 1166 |
+
init_image,
|
| 1167 |
+
pred_orig_image,
|
| 1168 |
+
default_mask_image_np,
|
| 1169 |
+
dilate_num=dilate_num,
|
| 1170 |
+
use_default_mask=use_default_mask,
|
| 1171 |
+
height=height,
|
| 1172 |
+
width=width,
|
| 1173 |
+
batch_size=batch_size,
|
| 1174 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1175 |
+
prompt_embeds=prompt_embeds,
|
| 1176 |
+
device=device,
|
| 1177 |
+
generator=generator,
|
| 1178 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1179 |
+
i=i,
|
| 1180 |
+
human_detection_thres=human_detection_thres,
|
| 1181 |
+
mask_image_np=mask_image_np,
|
| 1182 |
+
)
|
| 1183 |
+
|
| 1184 |
+
if self.adaptive_mask_model.use_visualizer:
|
| 1185 |
+
import matplotlib.pyplot as plt
|
| 1186 |
+
|
| 1187 |
+
# mask_image_new_colormap = np.clip(0.6 + (1.0 - mask_image_np), a_min=0.0, a_max=1.0) * 255
|
| 1188 |
+
|
| 1189 |
+
os.makedirs(visualization_save_dir, exist_ok=True)
|
| 1190 |
+
|
| 1191 |
+
# Image.fromarray(mask_image_new_colormap).convert("L").save(f"{visualization_save_dir}/masks/{i:05}.png")
|
| 1192 |
+
plt.axis("off")
|
| 1193 |
+
plt.subplot(1, 2, 1)
|
| 1194 |
+
plt.imshow(mask_image_np)
|
| 1195 |
+
plt.subplot(1, 2, 2)
|
| 1196 |
+
plt.imshow(pred_orig_image)
|
| 1197 |
+
plt.savefig(f"{visualization_save_dir}/{i:05}.png", bbox_inches="tight")
|
| 1198 |
+
plt.close("all")
|
| 1199 |
+
|
| 1200 |
+
if num_channels_unet == 4:
|
| 1201 |
+
init_latents_proper = image_latents[:1]
|
| 1202 |
+
init_mask = mask[:1]
|
| 1203 |
+
|
| 1204 |
+
if i < len(timesteps) - 1:
|
| 1205 |
+
noise_timestep = timesteps[i + 1]
|
| 1206 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 1207 |
+
init_latents_proper, noise, torch.tensor([noise_timestep])
|
| 1208 |
+
)
|
| 1209 |
+
|
| 1210 |
+
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
|
| 1211 |
+
|
| 1212 |
+
# call the callback, if provided
|
| 1213 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1214 |
+
progress_bar.update()
|
| 1215 |
+
if callback is not None and i % callback_steps == 0:
|
| 1216 |
+
callback(i, t, latents)
|
| 1217 |
+
|
| 1218 |
+
if not output_type == "latent":
|
| 1219 |
+
condition_kwargs = {}
|
| 1220 |
+
if isinstance(self.vae, AsymmetricAutoencoderKL):
|
| 1221 |
+
init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
|
| 1222 |
+
init_image_condition = init_image.clone()
|
| 1223 |
+
init_image = self._encode_vae_image(init_image, generator=generator)
|
| 1224 |
+
mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
|
| 1225 |
+
condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
|
| 1226 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]
|
| 1227 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1228 |
+
else:
|
| 1229 |
+
image = latents
|
| 1230 |
+
has_nsfw_concept = None
|
| 1231 |
+
|
| 1232 |
+
if has_nsfw_concept is None:
|
| 1233 |
+
do_denormalize = [True] * image.shape[0]
|
| 1234 |
+
else:
|
| 1235 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1236 |
+
|
| 1237 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1238 |
+
|
| 1239 |
+
# Offload last model to CPU
|
| 1240 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1241 |
+
self.final_offload_hook.offload()
|
| 1242 |
+
|
| 1243 |
+
if self.adaptive_mask_model.use_visualizer:
|
| 1244 |
+
generate_video_from_imgs(images_save_directory=visualization_save_dir, fps=10, delete_dir=True)
|
| 1245 |
+
|
| 1246 |
+
if not return_dict:
|
| 1247 |
+
return (image, has_nsfw_concept)
|
| 1248 |
+
|
| 1249 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1250 |
+
|
| 1251 |
+
def decode_to_npuint8_image(self, latents):
|
| 1252 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **{})[
|
| 1253 |
+
0
|
| 1254 |
+
] # torch, float32, -1.~1.
|
| 1255 |
+
image = self.image_processor.postprocess(image, output_type="pt", do_denormalize=[True] * image.shape[0])
|
| 1256 |
+
image = (image.squeeze().permute(1, 2, 0).detach().cpu().numpy() * 255).astype(np.uint8) # np, uint8, 0~255
|
| 1257 |
+
return image
|
| 1258 |
+
|
| 1259 |
+
def register_adaptive_mask_settings(self):
|
| 1260 |
+
from easydict import EasyDict
|
| 1261 |
+
|
| 1262 |
+
num_steps = 50
|
| 1263 |
+
|
| 1264 |
+
step_num = int(num_steps * 0.1)
|
| 1265 |
+
final_step_num = num_steps - step_num * 7
|
| 1266 |
+
# adaptive mask settings
|
| 1267 |
+
self.adaptive_mask_settings = EasyDict(
|
| 1268 |
+
dilate_scheduler=MaskDilateScheduler(
|
| 1269 |
+
max_dilate_num=20,
|
| 1270 |
+
num_inference_steps=num_steps,
|
| 1271 |
+
schedule=[20] * step_num
|
| 1272 |
+
+ [10] * step_num
|
| 1273 |
+
+ [5] * step_num
|
| 1274 |
+
+ [4] * step_num
|
| 1275 |
+
+ [3] * step_num
|
| 1276 |
+
+ [2] * step_num
|
| 1277 |
+
+ [1] * step_num
|
| 1278 |
+
+ [0] * final_step_num,
|
| 1279 |
+
),
|
| 1280 |
+
dilate_kernel=np.ones((3, 3), dtype=np.uint8),
|
| 1281 |
+
provoke_scheduler=ProvokeScheduler(
|
| 1282 |
+
num_inference_steps=num_steps,
|
| 1283 |
+
schedule=list(range(2, 10 + 1, 2)) + list(range(12, 40 + 1, 2)) + [45],
|
| 1284 |
+
is_zero_indexing=False,
|
| 1285 |
+
),
|
| 1286 |
+
)
|
| 1287 |
+
|
| 1288 |
+
def register_adaptive_mask_model(self):
|
| 1289 |
+
# declare segmentation model used for mask adaptation
|
| 1290 |
+
use_visualizer = True
|
| 1291 |
+
# assert not use_visualizer, \
|
| 1292 |
+
# """
|
| 1293 |
+
# If you plan to 'use_visualizer', USE WITH CAUTION.
|
| 1294 |
+
# It creates a directory of images and masks, which is used for merging into a video.
|
| 1295 |
+
# The procedure involves deleting the directory of images, which means that
|
| 1296 |
+
# if you set the directory wrong you can have other important files blown away.
|
| 1297 |
+
# """
|
| 1298 |
+
|
| 1299 |
+
self.adaptive_mask_model = PointRendPredictor(
|
| 1300 |
+
# pointrend_thres=0.2,
|
| 1301 |
+
pointrend_thres=0.9,
|
| 1302 |
+
device="cuda" if torch.cuda.is_available() else "cpu",
|
| 1303 |
+
use_visualizer=use_visualizer,
|
| 1304 |
+
config_pth="pointrend_rcnn_R_50_FPN_3x_coco.yaml",
|
| 1305 |
+
weights_pth="model_final_edd263.pkl",
|
| 1306 |
+
)
|
| 1307 |
+
|
| 1308 |
+
def adapt_mask(self, init_image, pred_orig_image, default_mask_image, dilate_num, use_default_mask, **kwargs):
|
| 1309 |
+
## predict mask to use for adaptation
|
| 1310 |
+
adapt_output = self.adaptive_mask_model(pred_orig_image) # vis can be None if 'use_visualizer' is False
|
| 1311 |
+
mask = adapt_output["mask"]
|
| 1312 |
+
vis = adapt_output["vis"]
|
| 1313 |
+
|
| 1314 |
+
## if mask is empty or too small, use default_mask_image. else, use dilate and intersect with default_mask_image
|
| 1315 |
+
if use_default_mask or mask.sum() < 512 * 512 * kwargs["human_detection_thres"]: # 0.005
|
| 1316 |
+
# set mask as default mask
|
| 1317 |
+
mask = default_mask_image # HxW
|
| 1318 |
+
|
| 1319 |
+
else:
|
| 1320 |
+
## timestep-adaptive mask
|
| 1321 |
+
mask = cv2.dilate(
|
| 1322 |
+
mask, self.adaptive_mask_settings.dilate_kernel, iterations=dilate_num
|
| 1323 |
+
) # dilate_kernel: np.ones((3,3), np.uint8)
|
| 1324 |
+
mask = np.logical_and(mask, default_mask_image) # HxW
|
| 1325 |
+
|
| 1326 |
+
## prepare mask as pt tensor format
|
| 1327 |
+
mask = torch.tensor(mask, dtype=torch.float32).to(kwargs["device"])[None, None] # 1 x 1 x H x W
|
| 1328 |
+
mask, masked_image = prepare_mask_and_masked_image(
|
| 1329 |
+
init_image.to(kwargs["device"]), mask, kwargs["height"], kwargs["width"], return_image=False
|
| 1330 |
+
)
|
| 1331 |
+
|
| 1332 |
+
mask_image_np = mask.clone().squeeze().detach().cpu().numpy()
|
| 1333 |
+
|
| 1334 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 1335 |
+
mask,
|
| 1336 |
+
masked_image,
|
| 1337 |
+
kwargs["batch_size"] * kwargs["num_images_per_prompt"],
|
| 1338 |
+
kwargs["height"],
|
| 1339 |
+
kwargs["width"],
|
| 1340 |
+
kwargs["prompt_embeds"].dtype,
|
| 1341 |
+
kwargs["device"],
|
| 1342 |
+
kwargs["generator"],
|
| 1343 |
+
kwargs["do_classifier_free_guidance"],
|
| 1344 |
+
)
|
| 1345 |
+
|
| 1346 |
+
return mask, masked_image_latents, mask_image_np, vis
|
| 1347 |
+
|
| 1348 |
+
|
| 1349 |
+
def seg2bbox(seg_mask: np.ndarray):
|
| 1350 |
+
nonzero_i, nonzero_j = seg_mask.nonzero()
|
| 1351 |
+
min_i, max_i = nonzero_i.min(), nonzero_i.max()
|
| 1352 |
+
min_j, max_j = nonzero_j.min(), nonzero_j.max()
|
| 1353 |
+
|
| 1354 |
+
return np.array([min_j, min_i, max_j + 1, max_i + 1])
|
| 1355 |
+
|
| 1356 |
+
|
| 1357 |
+
def merge_bbox(bboxes: list):
|
| 1358 |
+
assert len(bboxes) > 0
|
| 1359 |
+
|
| 1360 |
+
all_bboxes = np.stack(bboxes, axis=0) # shape: N_bbox X 4
|
| 1361 |
+
merged_bbox = np.zeros_like(all_bboxes[0]) # shape: 4,
|
| 1362 |
+
|
| 1363 |
+
merged_bbox[0] = all_bboxes[:, 0].min()
|
| 1364 |
+
merged_bbox[1] = all_bboxes[:, 1].min()
|
| 1365 |
+
merged_bbox[2] = all_bboxes[:, 2].max()
|
| 1366 |
+
merged_bbox[3] = all_bboxes[:, 3].max()
|
| 1367 |
+
|
| 1368 |
+
return merged_bbox
|
| 1369 |
+
|
| 1370 |
+
|
| 1371 |
+
class PointRendPredictor:
|
| 1372 |
+
def __init__(
|
| 1373 |
+
self,
|
| 1374 |
+
cat_id_to_focus=0,
|
| 1375 |
+
pointrend_thres=0.9,
|
| 1376 |
+
device="cuda",
|
| 1377 |
+
use_visualizer=False,
|
| 1378 |
+
merge_mode="merge",
|
| 1379 |
+
config_pth=None,
|
| 1380 |
+
weights_pth=None,
|
| 1381 |
+
):
|
| 1382 |
+
super().__init__()
|
| 1383 |
+
|
| 1384 |
+
# category id to focus (default: 0, which is human)
|
| 1385 |
+
self.cat_id_to_focus = cat_id_to_focus
|
| 1386 |
+
|
| 1387 |
+
# setup coco metadata
|
| 1388 |
+
self.coco_metadata = MetadataCatalog.get("coco_2017_val")
|
| 1389 |
+
self.cfg = get_cfg()
|
| 1390 |
+
|
| 1391 |
+
# get segmentation model config
|
| 1392 |
+
point_rend.add_pointrend_config(self.cfg) # --> Add PointRend-specific config
|
| 1393 |
+
self.cfg.merge_from_file(config_pth)
|
| 1394 |
+
self.cfg.MODEL.WEIGHTS = weights_pth
|
| 1395 |
+
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = pointrend_thres
|
| 1396 |
+
self.cfg.MODEL.DEVICE = device
|
| 1397 |
+
|
| 1398 |
+
# get segmentation model
|
| 1399 |
+
self.pointrend_seg_model = DefaultPredictor(self.cfg)
|
| 1400 |
+
|
| 1401 |
+
# settings for visualizer
|
| 1402 |
+
self.use_visualizer = use_visualizer
|
| 1403 |
+
|
| 1404 |
+
# mask-merge mode
|
| 1405 |
+
assert merge_mode in ["merge", "max-confidence"], f"'merge_mode': {merge_mode} not implemented."
|
| 1406 |
+
self.merge_mode = merge_mode
|
| 1407 |
+
|
| 1408 |
+
def merge_mask(self, masks, scores=None):
|
| 1409 |
+
if self.merge_mode == "merge":
|
| 1410 |
+
mask = np.any(masks, axis=0)
|
| 1411 |
+
elif self.merge_mode == "max-confidence":
|
| 1412 |
+
mask = masks[np.argmax(scores)]
|
| 1413 |
+
return mask
|
| 1414 |
+
|
| 1415 |
+
def vis_seg_on_img(self, image, mask):
|
| 1416 |
+
if type(mask) == np.ndarray:
|
| 1417 |
+
mask = torch.tensor(mask)
|
| 1418 |
+
v = Visualizer(image, self.coco_metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW)
|
| 1419 |
+
instances = Instances(image_size=image.shape[:2], pred_masks=mask if len(mask.shape) == 3 else mask[None])
|
| 1420 |
+
vis = v.draw_instance_predictions(instances.to("cpu")).get_image()
|
| 1421 |
+
return vis
|
| 1422 |
+
|
| 1423 |
+
def __call__(self, image):
|
| 1424 |
+
# run segmentation
|
| 1425 |
+
outputs = self.pointrend_seg_model(image)
|
| 1426 |
+
instances = outputs["instances"]
|
| 1427 |
+
|
| 1428 |
+
# merge instances for the category-id to focus
|
| 1429 |
+
is_class = instances.pred_classes == self.cat_id_to_focus
|
| 1430 |
+
masks = instances.pred_masks[is_class]
|
| 1431 |
+
masks = masks.detach().cpu().numpy() # [N, img_size, img_size]
|
| 1432 |
+
mask = self.merge_mask(masks, scores=instances.scores[is_class])
|
| 1433 |
+
|
| 1434 |
+
return {
|
| 1435 |
+
"asset_mask": None,
|
| 1436 |
+
"mask": mask.astype(np.uint8),
|
| 1437 |
+
"vis": self.vis_seg_on_img(image, mask) if self.use_visualizer else None,
|
| 1438 |
+
}
|
| 1439 |
+
|
| 1440 |
+
|
| 1441 |
+
class MaskDilateScheduler:
|
| 1442 |
+
def __init__(self, max_dilate_num=15, num_inference_steps=50, schedule=None):
|
| 1443 |
+
super().__init__()
|
| 1444 |
+
self.max_dilate_num = max_dilate_num
|
| 1445 |
+
self.schedule = [num_inference_steps - i for i in range(num_inference_steps)] if schedule is None else schedule
|
| 1446 |
+
assert len(self.schedule) == num_inference_steps
|
| 1447 |
+
|
| 1448 |
+
def __call__(self, i):
|
| 1449 |
+
return min(self.max_dilate_num, self.schedule[i])
|
| 1450 |
+
|
| 1451 |
+
|
| 1452 |
+
class ProvokeScheduler:
|
| 1453 |
+
def __init__(self, num_inference_steps=50, schedule=None, is_zero_indexing=False):
|
| 1454 |
+
super().__init__()
|
| 1455 |
+
if len(schedule) > 0:
|
| 1456 |
+
if is_zero_indexing:
|
| 1457 |
+
assert max(schedule) <= num_inference_steps - 1
|
| 1458 |
+
else:
|
| 1459 |
+
assert max(schedule) <= num_inference_steps
|
| 1460 |
+
|
| 1461 |
+
# register as self
|
| 1462 |
+
self.is_zero_indexing = is_zero_indexing
|
| 1463 |
+
self.schedule = schedule
|
| 1464 |
+
|
| 1465 |
+
def __call__(self, i):
|
| 1466 |
+
if self.is_zero_indexing:
|
| 1467 |
+
return i in self.schedule
|
| 1468 |
+
else:
|
| 1469 |
+
return i + 1 in self.schedule
|
v0.36.0/bit_diffusion.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from einops import rearrange, reduce
|
| 5 |
+
|
| 6 |
+
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
|
| 7 |
+
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
|
| 8 |
+
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
BITS = 8
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
|
| 15 |
+
def decimal_to_bits(x, bits=BITS):
|
| 16 |
+
"""expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
|
| 17 |
+
device = x.device
|
| 18 |
+
|
| 19 |
+
x = (x * 255).int().clamp(0, 255)
|
| 20 |
+
|
| 21 |
+
mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
|
| 22 |
+
mask = rearrange(mask, "d -> d 1 1")
|
| 23 |
+
x = rearrange(x, "b c h w -> b c 1 h w")
|
| 24 |
+
|
| 25 |
+
bits = ((x & mask) != 0).float()
|
| 26 |
+
bits = rearrange(bits, "b c d h w -> b (c d) h w")
|
| 27 |
+
bits = bits * 2 - 1
|
| 28 |
+
return bits
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def bits_to_decimal(x, bits=BITS):
|
| 32 |
+
"""expects bits from -1 to 1, outputs image tensor from 0 to 1"""
|
| 33 |
+
device = x.device
|
| 34 |
+
|
| 35 |
+
x = (x > 0).int()
|
| 36 |
+
mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
|
| 37 |
+
|
| 38 |
+
mask = rearrange(mask, "d -> d 1 1")
|
| 39 |
+
x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
|
| 40 |
+
dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
|
| 41 |
+
return (dec / 255).clamp(0.0, 1.0)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
|
| 45 |
+
def ddim_bit_scheduler_step(
|
| 46 |
+
self,
|
| 47 |
+
model_output: torch.Tensor,
|
| 48 |
+
timestep: int,
|
| 49 |
+
sample: torch.Tensor,
|
| 50 |
+
eta: float = 0.0,
|
| 51 |
+
use_clipped_model_output: bool = True,
|
| 52 |
+
generator=None,
|
| 53 |
+
return_dict: bool = True,
|
| 54 |
+
) -> Union[DDIMSchedulerOutput, Tuple]:
|
| 55 |
+
"""
|
| 56 |
+
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
| 57 |
+
process from the learned model outputs (most often the predicted noise).
|
| 58 |
+
Args:
|
| 59 |
+
model_output (`torch.Tensor`): direct output from learned diffusion model.
|
| 60 |
+
timestep (`int`): current discrete timestep in the diffusion chain.
|
| 61 |
+
sample (`torch.Tensor`):
|
| 62 |
+
current instance of sample being created by diffusion process.
|
| 63 |
+
eta (`float`): weight of noise for added noise in diffusion step.
|
| 64 |
+
use_clipped_model_output (`bool`): TODO
|
| 65 |
+
generator: random number generator.
|
| 66 |
+
return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
|
| 67 |
+
Returns:
|
| 68 |
+
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
|
| 69 |
+
[`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
| 70 |
+
returning a tuple, the first element is the sample tensor.
|
| 71 |
+
"""
|
| 72 |
+
if self.num_inference_steps is None:
|
| 73 |
+
raise ValueError(
|
| 74 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
# See formulas (12) and (16) of DDIM paper https://huggingface.co/papers/2010.02502
|
| 78 |
+
# Ideally, read DDIM paper in-detail understanding
|
| 79 |
+
|
| 80 |
+
# Notation (<variable name> -> <name in paper>
|
| 81 |
+
# - pred_noise_t -> e_theta(x_t, t)
|
| 82 |
+
# - pred_original_sample -> f_theta(x_t, t) or x_0
|
| 83 |
+
# - std_dev_t -> sigma_t
|
| 84 |
+
# - eta -> η
|
| 85 |
+
# - pred_sample_direction -> "direction pointing to x_t"
|
| 86 |
+
# - pred_prev_sample -> "x_t-1"
|
| 87 |
+
|
| 88 |
+
# 1. get previous step value (=t-1)
|
| 89 |
+
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
|
| 90 |
+
|
| 91 |
+
# 2. compute alphas, betas
|
| 92 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 93 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 94 |
+
|
| 95 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 96 |
+
|
| 97 |
+
# 3. compute predicted original sample from predicted noise also called
|
| 98 |
+
# "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 99 |
+
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
| 100 |
+
|
| 101 |
+
# 4. Clip "predicted x_0"
|
| 102 |
+
scale = self.bit_scale
|
| 103 |
+
if self.config.clip_sample:
|
| 104 |
+
pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
|
| 105 |
+
|
| 106 |
+
# 5. compute variance: "sigma_t(η)" -> see formula (16)
|
| 107 |
+
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
|
| 108 |
+
variance = self._get_variance(timestep, prev_timestep)
|
| 109 |
+
std_dev_t = eta * variance ** (0.5)
|
| 110 |
+
|
| 111 |
+
if use_clipped_model_output:
|
| 112 |
+
# the model_output is always re-derived from the clipped x_0 in Glide
|
| 113 |
+
model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
|
| 114 |
+
|
| 115 |
+
# 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 116 |
+
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
|
| 117 |
+
|
| 118 |
+
# 7. compute x_t without "random noise" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 119 |
+
prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
|
| 120 |
+
|
| 121 |
+
if eta > 0:
|
| 122 |
+
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
|
| 123 |
+
device = model_output.device if torch.is_tensor(model_output) else "cpu"
|
| 124 |
+
noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
|
| 125 |
+
variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
|
| 126 |
+
|
| 127 |
+
prev_sample = prev_sample + variance
|
| 128 |
+
|
| 129 |
+
if not return_dict:
|
| 130 |
+
return (prev_sample,)
|
| 131 |
+
|
| 132 |
+
return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def ddpm_bit_scheduler_step(
|
| 136 |
+
self,
|
| 137 |
+
model_output: torch.Tensor,
|
| 138 |
+
timestep: int,
|
| 139 |
+
sample: torch.Tensor,
|
| 140 |
+
prediction_type="epsilon",
|
| 141 |
+
generator=None,
|
| 142 |
+
return_dict: bool = True,
|
| 143 |
+
) -> Union[DDPMSchedulerOutput, Tuple]:
|
| 144 |
+
"""
|
| 145 |
+
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
|
| 146 |
+
process from the learned model outputs (most often the predicted noise).
|
| 147 |
+
Args:
|
| 148 |
+
model_output (`torch.Tensor`): direct output from learned diffusion model.
|
| 149 |
+
timestep (`int`): current discrete timestep in the diffusion chain.
|
| 150 |
+
sample (`torch.Tensor`):
|
| 151 |
+
current instance of sample being created by diffusion process.
|
| 152 |
+
prediction_type (`str`, default `epsilon`):
|
| 153 |
+
indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
|
| 154 |
+
generator: random number generator.
|
| 155 |
+
return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
|
| 156 |
+
Returns:
|
| 157 |
+
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
|
| 158 |
+
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
|
| 159 |
+
returning a tuple, the first element is the sample tensor.
|
| 160 |
+
"""
|
| 161 |
+
t = timestep
|
| 162 |
+
|
| 163 |
+
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
|
| 164 |
+
model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
|
| 165 |
+
else:
|
| 166 |
+
predicted_variance = None
|
| 167 |
+
|
| 168 |
+
# 1. compute alphas, betas
|
| 169 |
+
alpha_prod_t = self.alphas_cumprod[t]
|
| 170 |
+
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
|
| 171 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 172 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 173 |
+
|
| 174 |
+
# 2. compute predicted original sample from predicted noise also called
|
| 175 |
+
# "predicted x_0" of formula (15) from https://huggingface.co/papers/2006.11239
|
| 176 |
+
if prediction_type == "epsilon":
|
| 177 |
+
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
|
| 178 |
+
elif prediction_type == "sample":
|
| 179 |
+
pred_original_sample = model_output
|
| 180 |
+
else:
|
| 181 |
+
raise ValueError(f"Unsupported prediction_type {prediction_type}.")
|
| 182 |
+
|
| 183 |
+
# 3. Clip "predicted x_0"
|
| 184 |
+
scale = self.bit_scale
|
| 185 |
+
if self.config.clip_sample:
|
| 186 |
+
pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
|
| 187 |
+
|
| 188 |
+
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
|
| 189 |
+
# See formula (7) from https://huggingface.co/papers/2006.11239
|
| 190 |
+
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
|
| 191 |
+
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
|
| 192 |
+
|
| 193 |
+
# 5. Compute predicted previous sample µ_t
|
| 194 |
+
# See formula (7) from https://huggingface.co/papers/2006.11239
|
| 195 |
+
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
|
| 196 |
+
|
| 197 |
+
# 6. Add noise
|
| 198 |
+
variance = 0
|
| 199 |
+
if t > 0:
|
| 200 |
+
noise = torch.randn(
|
| 201 |
+
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
|
| 202 |
+
).to(model_output.device)
|
| 203 |
+
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
|
| 204 |
+
|
| 205 |
+
pred_prev_sample = pred_prev_sample + variance
|
| 206 |
+
|
| 207 |
+
if not return_dict:
|
| 208 |
+
return (pred_prev_sample,)
|
| 209 |
+
|
| 210 |
+
return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class BitDiffusion(DiffusionPipeline):
|
| 214 |
+
def __init__(
|
| 215 |
+
self,
|
| 216 |
+
unet: UNet2DConditionModel,
|
| 217 |
+
scheduler: Union[DDIMScheduler, DDPMScheduler],
|
| 218 |
+
bit_scale: Optional[float] = 1.0,
|
| 219 |
+
):
|
| 220 |
+
super().__init__()
|
| 221 |
+
self.bit_scale = bit_scale
|
| 222 |
+
self.scheduler.step = (
|
| 223 |
+
ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 227 |
+
|
| 228 |
+
@torch.no_grad()
|
| 229 |
+
def __call__(
|
| 230 |
+
self,
|
| 231 |
+
height: Optional[int] = 256,
|
| 232 |
+
width: Optional[int] = 256,
|
| 233 |
+
num_inference_steps: Optional[int] = 50,
|
| 234 |
+
generator: Optional[torch.Generator] = None,
|
| 235 |
+
batch_size: Optional[int] = 1,
|
| 236 |
+
output_type: Optional[str] = "pil",
|
| 237 |
+
return_dict: bool = True,
|
| 238 |
+
**kwargs,
|
| 239 |
+
) -> Union[Tuple, ImagePipelineOutput]:
|
| 240 |
+
latents = torch.randn(
|
| 241 |
+
(batch_size, self.unet.config.in_channels, height, width),
|
| 242 |
+
generator=generator,
|
| 243 |
+
)
|
| 244 |
+
latents = decimal_to_bits(latents) * self.bit_scale
|
| 245 |
+
latents = latents.to(self.device)
|
| 246 |
+
|
| 247 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 248 |
+
|
| 249 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 250 |
+
# predict the noise residual
|
| 251 |
+
noise_pred = self.unet(latents, t).sample
|
| 252 |
+
|
| 253 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 254 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 255 |
+
|
| 256 |
+
image = bits_to_decimal(latents)
|
| 257 |
+
|
| 258 |
+
if output_type == "pil":
|
| 259 |
+
image = self.numpy_to_pil(image)
|
| 260 |
+
|
| 261 |
+
if not return_dict:
|
| 262 |
+
return (image,)
|
| 263 |
+
|
| 264 |
+
return ImagePipelineOutput(images=image)
|
v0.36.0/checkpoint_merger.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import glob
|
| 2 |
+
import os
|
| 3 |
+
from typing import Dict, List, Union
|
| 4 |
+
|
| 5 |
+
import safetensors.torch
|
| 6 |
+
import torch
|
| 7 |
+
from huggingface_hub import snapshot_download
|
| 8 |
+
from huggingface_hub.utils import validate_hf_hub_args
|
| 9 |
+
|
| 10 |
+
from diffusers import DiffusionPipeline, __version__
|
| 11 |
+
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
|
| 12 |
+
from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class CheckpointMergerPipeline(DiffusionPipeline):
|
| 16 |
+
"""
|
| 17 |
+
A class that supports merging diffusion models based on the discussion here:
|
| 18 |
+
https://github.com/huggingface/diffusers/issues/877
|
| 19 |
+
|
| 20 |
+
Example usage:-
|
| 21 |
+
|
| 22 |
+
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
|
| 23 |
+
|
| 24 |
+
merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
|
| 25 |
+
|
| 26 |
+
merged_pipe.to('cuda')
|
| 27 |
+
|
| 28 |
+
prompt = "An astronaut riding a unicycle on Mars"
|
| 29 |
+
|
| 30 |
+
results = merged_pipe(prompt)
|
| 31 |
+
|
| 32 |
+
## For more details, see the docstring for the merge method.
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self):
|
| 37 |
+
self.register_to_config()
|
| 38 |
+
super().__init__()
|
| 39 |
+
|
| 40 |
+
def _compare_model_configs(self, dict0, dict1):
|
| 41 |
+
if dict0 == dict1:
|
| 42 |
+
return True
|
| 43 |
+
else:
|
| 44 |
+
config0, meta_keys0 = self._remove_meta_keys(dict0)
|
| 45 |
+
config1, meta_keys1 = self._remove_meta_keys(dict1)
|
| 46 |
+
if config0 == config1:
|
| 47 |
+
print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
|
| 48 |
+
return True
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
def _remove_meta_keys(self, config_dict: Dict):
|
| 52 |
+
meta_keys = []
|
| 53 |
+
temp_dict = config_dict.copy()
|
| 54 |
+
for key in config_dict.keys():
|
| 55 |
+
if key.startswith("_"):
|
| 56 |
+
temp_dict.pop(key)
|
| 57 |
+
meta_keys.append(key)
|
| 58 |
+
return (temp_dict, meta_keys)
|
| 59 |
+
|
| 60 |
+
@torch.no_grad()
|
| 61 |
+
@validate_hf_hub_args
|
| 62 |
+
def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
|
| 63 |
+
"""
|
| 64 |
+
Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
|
| 65 |
+
in the argument 'pretrained_model_name_or_path_list' as a list.
|
| 66 |
+
|
| 67 |
+
Parameters:
|
| 68 |
+
-----------
|
| 69 |
+
pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
|
| 70 |
+
|
| 71 |
+
**kwargs:
|
| 72 |
+
Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
|
| 73 |
+
|
| 74 |
+
cache_dir, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
|
| 75 |
+
|
| 76 |
+
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
|
| 77 |
+
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
|
| 78 |
+
|
| 79 |
+
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
|
| 80 |
+
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
|
| 81 |
+
|
| 82 |
+
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
|
| 83 |
+
|
| 84 |
+
variant - which variant of a pretrained model to load, e.g. "fp16" (None)
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
# Default kwargs from DiffusionPipeline
|
| 88 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
| 89 |
+
force_download = kwargs.pop("force_download", False)
|
| 90 |
+
proxies = kwargs.pop("proxies", None)
|
| 91 |
+
local_files_only = kwargs.pop("local_files_only", False)
|
| 92 |
+
token = kwargs.pop("token", None)
|
| 93 |
+
variant = kwargs.pop("variant", None)
|
| 94 |
+
revision = kwargs.pop("revision", None)
|
| 95 |
+
torch_dtype = kwargs.pop("torch_dtype", torch.float32)
|
| 96 |
+
device_map = kwargs.pop("device_map", None)
|
| 97 |
+
|
| 98 |
+
if not isinstance(torch_dtype, torch.dtype):
|
| 99 |
+
torch_dtype = torch.float32
|
| 100 |
+
print(f"Passed `torch_dtype` {torch_dtype} is not a `torch.dtype`. Defaulting to `torch.float32`.")
|
| 101 |
+
|
| 102 |
+
alpha = kwargs.pop("alpha", 0.5)
|
| 103 |
+
interp = kwargs.pop("interp", None)
|
| 104 |
+
|
| 105 |
+
print("Received list", pretrained_model_name_or_path_list)
|
| 106 |
+
print(f"Combining with alpha={alpha}, interpolation mode={interp}")
|
| 107 |
+
|
| 108 |
+
checkpoint_count = len(pretrained_model_name_or_path_list)
|
| 109 |
+
# Ignore result from model_index_json comparison of the two checkpoints
|
| 110 |
+
force = kwargs.pop("force", False)
|
| 111 |
+
|
| 112 |
+
# If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
|
| 113 |
+
if checkpoint_count > 3 or checkpoint_count < 2:
|
| 114 |
+
raise ValueError(
|
| 115 |
+
"Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
|
| 116 |
+
" passed."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
print("Received the right number of checkpoints")
|
| 120 |
+
# chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
|
| 121 |
+
# chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
|
| 122 |
+
|
| 123 |
+
# Validate that the checkpoints can be merged
|
| 124 |
+
# Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
|
| 125 |
+
config_dicts = []
|
| 126 |
+
for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
|
| 127 |
+
config_dict = DiffusionPipeline.load_config(
|
| 128 |
+
pretrained_model_name_or_path,
|
| 129 |
+
cache_dir=cache_dir,
|
| 130 |
+
force_download=force_download,
|
| 131 |
+
proxies=proxies,
|
| 132 |
+
local_files_only=local_files_only,
|
| 133 |
+
token=token,
|
| 134 |
+
revision=revision,
|
| 135 |
+
)
|
| 136 |
+
config_dicts.append(config_dict)
|
| 137 |
+
|
| 138 |
+
comparison_result = True
|
| 139 |
+
for idx in range(1, len(config_dicts)):
|
| 140 |
+
comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
|
| 141 |
+
if not force and comparison_result is False:
|
| 142 |
+
raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
|
| 143 |
+
print("Compatible model_index.json files found")
|
| 144 |
+
# Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
|
| 145 |
+
cached_folders = []
|
| 146 |
+
for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
|
| 147 |
+
folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
|
| 148 |
+
allow_patterns = [os.path.join(k, "*") for k in folder_names]
|
| 149 |
+
allow_patterns += [
|
| 150 |
+
WEIGHTS_NAME,
|
| 151 |
+
SCHEDULER_CONFIG_NAME,
|
| 152 |
+
CONFIG_NAME,
|
| 153 |
+
ONNX_WEIGHTS_NAME,
|
| 154 |
+
DiffusionPipeline.config_name,
|
| 155 |
+
]
|
| 156 |
+
requested_pipeline_class = config_dict.get("_class_name")
|
| 157 |
+
user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
|
| 158 |
+
|
| 159 |
+
cached_folder = (
|
| 160 |
+
pretrained_model_name_or_path
|
| 161 |
+
if os.path.isdir(pretrained_model_name_or_path)
|
| 162 |
+
else snapshot_download(
|
| 163 |
+
pretrained_model_name_or_path,
|
| 164 |
+
cache_dir=cache_dir,
|
| 165 |
+
proxies=proxies,
|
| 166 |
+
local_files_only=local_files_only,
|
| 167 |
+
token=token,
|
| 168 |
+
revision=revision,
|
| 169 |
+
allow_patterns=allow_patterns,
|
| 170 |
+
user_agent=user_agent,
|
| 171 |
+
)
|
| 172 |
+
)
|
| 173 |
+
print("Cached Folder", cached_folder)
|
| 174 |
+
cached_folders.append(cached_folder)
|
| 175 |
+
|
| 176 |
+
# Step 3:-
|
| 177 |
+
# Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
|
| 178 |
+
final_pipe = DiffusionPipeline.from_pretrained(
|
| 179 |
+
cached_folders[0],
|
| 180 |
+
torch_dtype=torch_dtype,
|
| 181 |
+
device_map=device_map,
|
| 182 |
+
variant=variant,
|
| 183 |
+
)
|
| 184 |
+
final_pipe.to(self.device)
|
| 185 |
+
|
| 186 |
+
checkpoint_path_2 = None
|
| 187 |
+
if len(cached_folders) > 2:
|
| 188 |
+
checkpoint_path_2 = os.path.join(cached_folders[2])
|
| 189 |
+
|
| 190 |
+
if interp == "sigmoid":
|
| 191 |
+
theta_func = CheckpointMergerPipeline.sigmoid
|
| 192 |
+
elif interp == "inv_sigmoid":
|
| 193 |
+
theta_func = CheckpointMergerPipeline.inv_sigmoid
|
| 194 |
+
elif interp == "add_diff":
|
| 195 |
+
theta_func = CheckpointMergerPipeline.add_difference
|
| 196 |
+
else:
|
| 197 |
+
theta_func = CheckpointMergerPipeline.weighted_sum
|
| 198 |
+
|
| 199 |
+
# Find each module's state dict.
|
| 200 |
+
for attr in final_pipe.config.keys():
|
| 201 |
+
if not attr.startswith("_"):
|
| 202 |
+
checkpoint_path_1 = os.path.join(cached_folders[1], attr)
|
| 203 |
+
if os.path.exists(checkpoint_path_1):
|
| 204 |
+
files = [
|
| 205 |
+
*glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
|
| 206 |
+
*glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
|
| 207 |
+
]
|
| 208 |
+
checkpoint_path_1 = files[0] if len(files) > 0 else None
|
| 209 |
+
if len(cached_folders) < 3:
|
| 210 |
+
checkpoint_path_2 = None
|
| 211 |
+
else:
|
| 212 |
+
checkpoint_path_2 = os.path.join(cached_folders[2], attr)
|
| 213 |
+
if os.path.exists(checkpoint_path_2):
|
| 214 |
+
files = [
|
| 215 |
+
*glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
|
| 216 |
+
*glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
|
| 217 |
+
]
|
| 218 |
+
checkpoint_path_2 = files[0] if len(files) > 0 else None
|
| 219 |
+
# For an attr if both checkpoint_path_1 and 2 are None, ignore.
|
| 220 |
+
# If at least one is present, deal with it according to interp method, of course only if the state_dict keys match.
|
| 221 |
+
if checkpoint_path_1 is None and checkpoint_path_2 is None:
|
| 222 |
+
print(f"Skipping {attr}: not present in 2nd or 3d model")
|
| 223 |
+
continue
|
| 224 |
+
try:
|
| 225 |
+
module = getattr(final_pipe, attr)
|
| 226 |
+
if isinstance(module, bool): # ignore requires_safety_checker boolean
|
| 227 |
+
continue
|
| 228 |
+
theta_0 = getattr(module, "state_dict")
|
| 229 |
+
theta_0 = theta_0()
|
| 230 |
+
|
| 231 |
+
update_theta_0 = getattr(module, "load_state_dict")
|
| 232 |
+
theta_1 = (
|
| 233 |
+
safetensors.torch.load_file(checkpoint_path_1)
|
| 234 |
+
if (checkpoint_path_1.endswith(".safetensors"))
|
| 235 |
+
else torch.load(checkpoint_path_1, map_location="cpu")
|
| 236 |
+
)
|
| 237 |
+
theta_2 = None
|
| 238 |
+
if checkpoint_path_2:
|
| 239 |
+
theta_2 = (
|
| 240 |
+
safetensors.torch.load_file(checkpoint_path_2)
|
| 241 |
+
if (checkpoint_path_2.endswith(".safetensors"))
|
| 242 |
+
else torch.load(checkpoint_path_2, map_location="cpu")
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
if not theta_0.keys() == theta_1.keys():
|
| 246 |
+
print(f"Skipping {attr}: key mismatch")
|
| 247 |
+
continue
|
| 248 |
+
if theta_2 and not theta_1.keys() == theta_2.keys():
|
| 249 |
+
print(f"Skipping {attr}:y mismatch")
|
| 250 |
+
except Exception as e:
|
| 251 |
+
print(f"Skipping {attr} do to an unexpected error: {str(e)}")
|
| 252 |
+
continue
|
| 253 |
+
print(f"MERGING {attr}")
|
| 254 |
+
|
| 255 |
+
for key in theta_0.keys():
|
| 256 |
+
if theta_2:
|
| 257 |
+
theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
|
| 258 |
+
else:
|
| 259 |
+
theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
|
| 260 |
+
|
| 261 |
+
del theta_1
|
| 262 |
+
del theta_2
|
| 263 |
+
update_theta_0(theta_0)
|
| 264 |
+
|
| 265 |
+
del theta_0
|
| 266 |
+
return final_pipe
|
| 267 |
+
|
| 268 |
+
@staticmethod
|
| 269 |
+
def weighted_sum(theta0, theta1, theta2, alpha):
|
| 270 |
+
return ((1 - alpha) * theta0) + (alpha * theta1)
|
| 271 |
+
|
| 272 |
+
# Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
|
| 273 |
+
@staticmethod
|
| 274 |
+
def sigmoid(theta0, theta1, theta2, alpha):
|
| 275 |
+
alpha = alpha * alpha * (3 - (2 * alpha))
|
| 276 |
+
return theta0 + ((theta1 - theta0) * alpha)
|
| 277 |
+
|
| 278 |
+
# Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
|
| 279 |
+
@staticmethod
|
| 280 |
+
def inv_sigmoid(theta0, theta1, theta2, alpha):
|
| 281 |
+
import math
|
| 282 |
+
|
| 283 |
+
alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
|
| 284 |
+
return theta0 + ((theta1 - theta0) * alpha)
|
| 285 |
+
|
| 286 |
+
@staticmethod
|
| 287 |
+
def add_difference(theta0, theta1, theta2, alpha):
|
| 288 |
+
return theta0 + (theta1 - theta2) * (1.0 - alpha)
|
v0.36.0/clip_guided_images_mixing_stable_diffusion.py
ADDED
|
@@ -0,0 +1,445 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import inspect
|
| 3 |
+
from typing import Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers import (
|
| 13 |
+
AutoencoderKL,
|
| 14 |
+
DDIMScheduler,
|
| 15 |
+
DPMSolverMultistepScheduler,
|
| 16 |
+
LMSDiscreteScheduler,
|
| 17 |
+
PNDMScheduler,
|
| 18 |
+
UNet2DConditionModel,
|
| 19 |
+
)
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 21 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 22 |
+
from diffusers.utils import PIL_INTERPOLATION
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def preprocess(image, w, h):
|
| 27 |
+
if isinstance(image, torch.Tensor):
|
| 28 |
+
return image
|
| 29 |
+
elif isinstance(image, PIL.Image.Image):
|
| 30 |
+
image = [image]
|
| 31 |
+
|
| 32 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 33 |
+
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
| 34 |
+
image = np.concatenate(image, axis=0)
|
| 35 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 36 |
+
image = image.transpose(0, 3, 1, 2)
|
| 37 |
+
image = 2.0 * image - 1.0
|
| 38 |
+
image = torch.from_numpy(image)
|
| 39 |
+
elif isinstance(image[0], torch.Tensor):
|
| 40 |
+
image = torch.cat(image, dim=0)
|
| 41 |
+
return image
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
|
| 45 |
+
if not isinstance(v0, np.ndarray):
|
| 46 |
+
inputs_are_torch = True
|
| 47 |
+
input_device = v0.device
|
| 48 |
+
v0 = v0.cpu().numpy()
|
| 49 |
+
v1 = v1.cpu().numpy()
|
| 50 |
+
|
| 51 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 52 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 53 |
+
v2 = (1 - t) * v0 + t * v1
|
| 54 |
+
else:
|
| 55 |
+
theta_0 = np.arccos(dot)
|
| 56 |
+
sin_theta_0 = np.sin(theta_0)
|
| 57 |
+
theta_t = theta_0 * t
|
| 58 |
+
sin_theta_t = np.sin(theta_t)
|
| 59 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 60 |
+
s1 = sin_theta_t / sin_theta_0
|
| 61 |
+
v2 = s0 * v0 + s1 * v1
|
| 62 |
+
|
| 63 |
+
if inputs_are_torch:
|
| 64 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 65 |
+
|
| 66 |
+
return v2
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def spherical_dist_loss(x, y):
|
| 70 |
+
x = F.normalize(x, dim=-1)
|
| 71 |
+
y = F.normalize(y, dim=-1)
|
| 72 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def set_requires_grad(model, value):
|
| 76 |
+
for param in model.parameters():
|
| 77 |
+
param.requires_grad = value
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 81 |
+
def __init__(
|
| 82 |
+
self,
|
| 83 |
+
vae: AutoencoderKL,
|
| 84 |
+
text_encoder: CLIPTextModel,
|
| 85 |
+
clip_model: CLIPModel,
|
| 86 |
+
tokenizer: CLIPTokenizer,
|
| 87 |
+
unet: UNet2DConditionModel,
|
| 88 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 89 |
+
feature_extractor: CLIPImageProcessor,
|
| 90 |
+
coca_model=None,
|
| 91 |
+
coca_tokenizer=None,
|
| 92 |
+
coca_transform=None,
|
| 93 |
+
):
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.register_modules(
|
| 96 |
+
vae=vae,
|
| 97 |
+
text_encoder=text_encoder,
|
| 98 |
+
clip_model=clip_model,
|
| 99 |
+
tokenizer=tokenizer,
|
| 100 |
+
unet=unet,
|
| 101 |
+
scheduler=scheduler,
|
| 102 |
+
feature_extractor=feature_extractor,
|
| 103 |
+
coca_model=coca_model,
|
| 104 |
+
coca_tokenizer=coca_tokenizer,
|
| 105 |
+
coca_transform=coca_transform,
|
| 106 |
+
)
|
| 107 |
+
self.feature_extractor_size = (
|
| 108 |
+
feature_extractor.size
|
| 109 |
+
if isinstance(feature_extractor.size, int)
|
| 110 |
+
else feature_extractor.size["shortest_edge"]
|
| 111 |
+
)
|
| 112 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 113 |
+
set_requires_grad(self.text_encoder, False)
|
| 114 |
+
set_requires_grad(self.clip_model, False)
|
| 115 |
+
|
| 116 |
+
def freeze_vae(self):
|
| 117 |
+
set_requires_grad(self.vae, False)
|
| 118 |
+
|
| 119 |
+
def unfreeze_vae(self):
|
| 120 |
+
set_requires_grad(self.vae, True)
|
| 121 |
+
|
| 122 |
+
def freeze_unet(self):
|
| 123 |
+
set_requires_grad(self.unet, False)
|
| 124 |
+
|
| 125 |
+
def unfreeze_unet(self):
|
| 126 |
+
set_requires_grad(self.unet, True)
|
| 127 |
+
|
| 128 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 129 |
+
# get the original timestep using init_timestep
|
| 130 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 131 |
+
|
| 132 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 133 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 134 |
+
|
| 135 |
+
return timesteps, num_inference_steps - t_start
|
| 136 |
+
|
| 137 |
+
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
|
| 138 |
+
if not isinstance(image, torch.Tensor):
|
| 139 |
+
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
|
| 140 |
+
|
| 141 |
+
image = image.to(device=device, dtype=dtype)
|
| 142 |
+
|
| 143 |
+
if isinstance(generator, list):
|
| 144 |
+
init_latents = [
|
| 145 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 146 |
+
]
|
| 147 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 148 |
+
else:
|
| 149 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 150 |
+
|
| 151 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 152 |
+
init_latents = 0.18215 * init_latents
|
| 153 |
+
init_latents = init_latents.repeat_interleave(batch_size, dim=0)
|
| 154 |
+
|
| 155 |
+
noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
|
| 156 |
+
|
| 157 |
+
# get latents
|
| 158 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 159 |
+
latents = init_latents
|
| 160 |
+
|
| 161 |
+
return latents
|
| 162 |
+
|
| 163 |
+
def get_image_description(self, image):
|
| 164 |
+
transformed_image = self.coca_transform(image).unsqueeze(0)
|
| 165 |
+
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 166 |
+
generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
|
| 167 |
+
generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
|
| 168 |
+
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
|
| 169 |
+
|
| 170 |
+
def get_clip_image_embeddings(self, image, batch_size):
|
| 171 |
+
clip_image_input = self.feature_extractor.preprocess(image)
|
| 172 |
+
clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
|
| 173 |
+
image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
|
| 174 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 175 |
+
image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
|
| 176 |
+
return image_embeddings_clip
|
| 177 |
+
|
| 178 |
+
@torch.enable_grad()
|
| 179 |
+
def cond_fn(
|
| 180 |
+
self,
|
| 181 |
+
latents,
|
| 182 |
+
timestep,
|
| 183 |
+
index,
|
| 184 |
+
text_embeddings,
|
| 185 |
+
noise_pred_original,
|
| 186 |
+
original_image_embeddings_clip,
|
| 187 |
+
clip_guidance_scale,
|
| 188 |
+
):
|
| 189 |
+
latents = latents.detach().requires_grad_()
|
| 190 |
+
|
| 191 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 192 |
+
|
| 193 |
+
# predict the noise residual
|
| 194 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 195 |
+
|
| 196 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 197 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 198 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 199 |
+
# compute predicted original sample from predicted noise also called
|
| 200 |
+
# "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 201 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 202 |
+
|
| 203 |
+
fac = torch.sqrt(beta_prod_t)
|
| 204 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 205 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 206 |
+
sigma = self.scheduler.sigmas[index]
|
| 207 |
+
sample = latents - sigma * noise_pred
|
| 208 |
+
else:
|
| 209 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 210 |
+
|
| 211 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 212 |
+
sample = 1 / 0.18215 * sample
|
| 213 |
+
image = self.vae.decode(sample).sample
|
| 214 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 215 |
+
|
| 216 |
+
image = transforms.Resize(self.feature_extractor_size)(image)
|
| 217 |
+
image = self.normalize(image).to(latents.dtype)
|
| 218 |
+
|
| 219 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 220 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 221 |
+
|
| 222 |
+
loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
|
| 223 |
+
|
| 224 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 225 |
+
|
| 226 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 227 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 228 |
+
noise_pred = noise_pred_original
|
| 229 |
+
else:
|
| 230 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 231 |
+
return noise_pred, latents
|
| 232 |
+
|
| 233 |
+
@torch.no_grad()
|
| 234 |
+
def __call__(
|
| 235 |
+
self,
|
| 236 |
+
style_image: Union[torch.Tensor, PIL.Image.Image],
|
| 237 |
+
content_image: Union[torch.Tensor, PIL.Image.Image],
|
| 238 |
+
style_prompt: Optional[str] = None,
|
| 239 |
+
content_prompt: Optional[str] = None,
|
| 240 |
+
height: Optional[int] = 512,
|
| 241 |
+
width: Optional[int] = 512,
|
| 242 |
+
noise_strength: float = 0.6,
|
| 243 |
+
num_inference_steps: Optional[int] = 50,
|
| 244 |
+
guidance_scale: Optional[float] = 7.5,
|
| 245 |
+
batch_size: Optional[int] = 1,
|
| 246 |
+
eta: float = 0.0,
|
| 247 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 248 |
+
generator: Optional[torch.Generator] = None,
|
| 249 |
+
output_type: Optional[str] = "pil",
|
| 250 |
+
return_dict: bool = True,
|
| 251 |
+
slerp_latent_style_strength: float = 0.8,
|
| 252 |
+
slerp_prompt_style_strength: float = 0.1,
|
| 253 |
+
slerp_clip_image_style_strength: float = 0.1,
|
| 254 |
+
):
|
| 255 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 256 |
+
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
|
| 257 |
+
|
| 258 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 259 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 260 |
+
|
| 261 |
+
if isinstance(generator, torch.Generator) and batch_size > 1:
|
| 262 |
+
generator = [generator] + [None] * (batch_size - 1)
|
| 263 |
+
|
| 264 |
+
coca_is_none = [
|
| 265 |
+
("model", self.coca_model is None),
|
| 266 |
+
("tokenizer", self.coca_tokenizer is None),
|
| 267 |
+
("transform", self.coca_transform is None),
|
| 268 |
+
]
|
| 269 |
+
coca_is_none = [x[0] for x in coca_is_none if x[1]]
|
| 270 |
+
coca_is_none_str = ", ".join(coca_is_none)
|
| 271 |
+
# generate prompts with coca model if prompt is None
|
| 272 |
+
if content_prompt is None:
|
| 273 |
+
if len(coca_is_none):
|
| 274 |
+
raise ValueError(
|
| 275 |
+
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
|
| 276 |
+
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
| 277 |
+
)
|
| 278 |
+
content_prompt = self.get_image_description(content_image)
|
| 279 |
+
if style_prompt is None:
|
| 280 |
+
if len(coca_is_none):
|
| 281 |
+
raise ValueError(
|
| 282 |
+
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
|
| 283 |
+
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
|
| 284 |
+
)
|
| 285 |
+
style_prompt = self.get_image_description(style_image)
|
| 286 |
+
|
| 287 |
+
# get prompt text embeddings for content and style
|
| 288 |
+
content_text_input = self.tokenizer(
|
| 289 |
+
content_prompt,
|
| 290 |
+
padding="max_length",
|
| 291 |
+
max_length=self.tokenizer.model_max_length,
|
| 292 |
+
truncation=True,
|
| 293 |
+
return_tensors="pt",
|
| 294 |
+
)
|
| 295 |
+
content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
|
| 296 |
+
|
| 297 |
+
style_text_input = self.tokenizer(
|
| 298 |
+
style_prompt,
|
| 299 |
+
padding="max_length",
|
| 300 |
+
max_length=self.tokenizer.model_max_length,
|
| 301 |
+
truncation=True,
|
| 302 |
+
return_tensors="pt",
|
| 303 |
+
)
|
| 304 |
+
style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
|
| 305 |
+
|
| 306 |
+
text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
|
| 307 |
+
|
| 308 |
+
# duplicate text embeddings for each generation per prompt
|
| 309 |
+
text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
|
| 310 |
+
|
| 311 |
+
# set timesteps
|
| 312 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 313 |
+
extra_set_kwargs = {}
|
| 314 |
+
if accepts_offset:
|
| 315 |
+
extra_set_kwargs["offset"] = 1
|
| 316 |
+
|
| 317 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 318 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 319 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 320 |
+
self.scheduler.timesteps.to(self.device)
|
| 321 |
+
|
| 322 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
|
| 323 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 324 |
+
|
| 325 |
+
# Preprocess image
|
| 326 |
+
preprocessed_content_image = preprocess(content_image, width, height)
|
| 327 |
+
content_latents = self.prepare_latents(
|
| 328 |
+
preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
preprocessed_style_image = preprocess(style_image, width, height)
|
| 332 |
+
style_latents = self.prepare_latents(
|
| 333 |
+
preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
|
| 337 |
+
|
| 338 |
+
if clip_guidance_scale > 0:
|
| 339 |
+
content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
|
| 340 |
+
style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
|
| 341 |
+
clip_image_embeddings = slerp(
|
| 342 |
+
slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 346 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 347 |
+
# corresponds to doing no classifier free guidance.
|
| 348 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 349 |
+
# get unconditional embeddings for classifier free guidance
|
| 350 |
+
if do_classifier_free_guidance:
|
| 351 |
+
max_length = content_text_input.input_ids.shape[-1]
|
| 352 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 353 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 354 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 355 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
|
| 356 |
+
|
| 357 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 358 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 359 |
+
# to avoid doing two forward passes
|
| 360 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 361 |
+
|
| 362 |
+
# get the initial random noise unless the user supplied it
|
| 363 |
+
|
| 364 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 365 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 366 |
+
# However this currently doesn't work in `mps`.
|
| 367 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
| 368 |
+
latents_dtype = text_embeddings.dtype
|
| 369 |
+
if latents is None:
|
| 370 |
+
if self.device.type == "mps":
|
| 371 |
+
# randn does not work reproducibly on mps
|
| 372 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 373 |
+
self.device
|
| 374 |
+
)
|
| 375 |
+
else:
|
| 376 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 377 |
+
else:
|
| 378 |
+
if latents.shape != latents_shape:
|
| 379 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 380 |
+
latents = latents.to(self.device)
|
| 381 |
+
|
| 382 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 383 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 384 |
+
|
| 385 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 386 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 387 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 388 |
+
# and should be between [0, 1]
|
| 389 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 390 |
+
extra_step_kwargs = {}
|
| 391 |
+
if accepts_eta:
|
| 392 |
+
extra_step_kwargs["eta"] = eta
|
| 393 |
+
|
| 394 |
+
# check if the scheduler accepts generator
|
| 395 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 396 |
+
if accepts_generator:
|
| 397 |
+
extra_step_kwargs["generator"] = generator
|
| 398 |
+
|
| 399 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 400 |
+
for i, t in enumerate(timesteps):
|
| 401 |
+
# expand the latents if we are doing classifier free guidance
|
| 402 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 403 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 404 |
+
|
| 405 |
+
# predict the noise residual
|
| 406 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 407 |
+
|
| 408 |
+
# perform classifier free guidance
|
| 409 |
+
if do_classifier_free_guidance:
|
| 410 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 411 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 412 |
+
|
| 413 |
+
# perform clip guidance
|
| 414 |
+
if clip_guidance_scale > 0:
|
| 415 |
+
text_embeddings_for_guidance = (
|
| 416 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 417 |
+
)
|
| 418 |
+
noise_pred, latents = self.cond_fn(
|
| 419 |
+
latents,
|
| 420 |
+
t,
|
| 421 |
+
i,
|
| 422 |
+
text_embeddings_for_guidance,
|
| 423 |
+
noise_pred,
|
| 424 |
+
clip_image_embeddings,
|
| 425 |
+
clip_guidance_scale,
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 429 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 430 |
+
|
| 431 |
+
progress_bar.update()
|
| 432 |
+
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
|
| 433 |
+
latents = 1 / 0.18215 * latents
|
| 434 |
+
image = self.vae.decode(latents).sample
|
| 435 |
+
|
| 436 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 437 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 438 |
+
|
| 439 |
+
if output_type == "pil":
|
| 440 |
+
image = self.numpy_to_pil(image)
|
| 441 |
+
|
| 442 |
+
if not return_dict:
|
| 443 |
+
return (image, None)
|
| 444 |
+
|
| 445 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.36.0/clip_guided_stable_diffusion.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 9 |
+
|
| 10 |
+
from diffusers import (
|
| 11 |
+
AutoencoderKL,
|
| 12 |
+
DDIMScheduler,
|
| 13 |
+
DPMSolverMultistepScheduler,
|
| 14 |
+
LMSDiscreteScheduler,
|
| 15 |
+
PNDMScheduler,
|
| 16 |
+
UNet2DConditionModel,
|
| 17 |
+
)
|
| 18 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 19 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MakeCutouts(nn.Module):
|
| 23 |
+
def __init__(self, cut_size, cut_power=1.0):
|
| 24 |
+
super().__init__()
|
| 25 |
+
|
| 26 |
+
self.cut_size = cut_size
|
| 27 |
+
self.cut_power = cut_power
|
| 28 |
+
|
| 29 |
+
def forward(self, pixel_values, num_cutouts):
|
| 30 |
+
sideY, sideX = pixel_values.shape[2:4]
|
| 31 |
+
max_size = min(sideX, sideY)
|
| 32 |
+
min_size = min(sideX, sideY, self.cut_size)
|
| 33 |
+
cutouts = []
|
| 34 |
+
for _ in range(num_cutouts):
|
| 35 |
+
size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
|
| 36 |
+
offsetx = torch.randint(0, sideX - size + 1, ())
|
| 37 |
+
offsety = torch.randint(0, sideY - size + 1, ())
|
| 38 |
+
cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
|
| 39 |
+
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
|
| 40 |
+
return torch.cat(cutouts)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def spherical_dist_loss(x, y):
|
| 44 |
+
x = F.normalize(x, dim=-1)
|
| 45 |
+
y = F.normalize(y, dim=-1)
|
| 46 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def set_requires_grad(model, value):
|
| 50 |
+
for param in model.parameters():
|
| 51 |
+
param.requires_grad = value
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 55 |
+
"""CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
|
| 56 |
+
- https://github.com/Jack000/glid-3-xl
|
| 57 |
+
- https://github.dev/crowsonkb/k-diffusion
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
vae: AutoencoderKL,
|
| 63 |
+
text_encoder: CLIPTextModel,
|
| 64 |
+
clip_model: CLIPModel,
|
| 65 |
+
tokenizer: CLIPTokenizer,
|
| 66 |
+
unet: UNet2DConditionModel,
|
| 67 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 68 |
+
feature_extractor: CLIPImageProcessor,
|
| 69 |
+
):
|
| 70 |
+
super().__init__()
|
| 71 |
+
self.register_modules(
|
| 72 |
+
vae=vae,
|
| 73 |
+
text_encoder=text_encoder,
|
| 74 |
+
clip_model=clip_model,
|
| 75 |
+
tokenizer=tokenizer,
|
| 76 |
+
unet=unet,
|
| 77 |
+
scheduler=scheduler,
|
| 78 |
+
feature_extractor=feature_extractor,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 82 |
+
self.cut_out_size = (
|
| 83 |
+
feature_extractor.size
|
| 84 |
+
if isinstance(feature_extractor.size, int)
|
| 85 |
+
else feature_extractor.size["shortest_edge"]
|
| 86 |
+
)
|
| 87 |
+
self.make_cutouts = MakeCutouts(self.cut_out_size)
|
| 88 |
+
|
| 89 |
+
set_requires_grad(self.text_encoder, False)
|
| 90 |
+
set_requires_grad(self.clip_model, False)
|
| 91 |
+
|
| 92 |
+
def freeze_vae(self):
|
| 93 |
+
set_requires_grad(self.vae, False)
|
| 94 |
+
|
| 95 |
+
def unfreeze_vae(self):
|
| 96 |
+
set_requires_grad(self.vae, True)
|
| 97 |
+
|
| 98 |
+
def freeze_unet(self):
|
| 99 |
+
set_requires_grad(self.unet, False)
|
| 100 |
+
|
| 101 |
+
def unfreeze_unet(self):
|
| 102 |
+
set_requires_grad(self.unet, True)
|
| 103 |
+
|
| 104 |
+
@torch.enable_grad()
|
| 105 |
+
def cond_fn(
|
| 106 |
+
self,
|
| 107 |
+
latents,
|
| 108 |
+
timestep,
|
| 109 |
+
index,
|
| 110 |
+
text_embeddings,
|
| 111 |
+
noise_pred_original,
|
| 112 |
+
text_embeddings_clip,
|
| 113 |
+
clip_guidance_scale,
|
| 114 |
+
num_cutouts,
|
| 115 |
+
use_cutouts=True,
|
| 116 |
+
):
|
| 117 |
+
latents = latents.detach().requires_grad_()
|
| 118 |
+
|
| 119 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 120 |
+
|
| 121 |
+
# predict the noise residual
|
| 122 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 123 |
+
|
| 124 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 125 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 126 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 127 |
+
# compute predicted original sample from predicted noise also called
|
| 128 |
+
# "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 129 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 130 |
+
|
| 131 |
+
fac = torch.sqrt(beta_prod_t)
|
| 132 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 133 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 134 |
+
sigma = self.scheduler.sigmas[index]
|
| 135 |
+
sample = latents - sigma * noise_pred
|
| 136 |
+
else:
|
| 137 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 138 |
+
|
| 139 |
+
sample = 1 / self.vae.config.scaling_factor * sample
|
| 140 |
+
image = self.vae.decode(sample).sample
|
| 141 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 142 |
+
|
| 143 |
+
if use_cutouts:
|
| 144 |
+
image = self.make_cutouts(image, num_cutouts)
|
| 145 |
+
else:
|
| 146 |
+
image = transforms.Resize(self.cut_out_size)(image)
|
| 147 |
+
image = self.normalize(image).to(latents.dtype)
|
| 148 |
+
|
| 149 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 150 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 151 |
+
|
| 152 |
+
if use_cutouts:
|
| 153 |
+
dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
|
| 154 |
+
dists = dists.view([num_cutouts, sample.shape[0], -1])
|
| 155 |
+
loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
|
| 156 |
+
else:
|
| 157 |
+
loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
|
| 158 |
+
|
| 159 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 160 |
+
|
| 161 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 162 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 163 |
+
noise_pred = noise_pred_original
|
| 164 |
+
else:
|
| 165 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 166 |
+
return noise_pred, latents
|
| 167 |
+
|
| 168 |
+
@torch.no_grad()
|
| 169 |
+
def __call__(
|
| 170 |
+
self,
|
| 171 |
+
prompt: Union[str, List[str]],
|
| 172 |
+
height: Optional[int] = 512,
|
| 173 |
+
width: Optional[int] = 512,
|
| 174 |
+
num_inference_steps: Optional[int] = 50,
|
| 175 |
+
guidance_scale: Optional[float] = 7.5,
|
| 176 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 177 |
+
eta: float = 0.0,
|
| 178 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 179 |
+
clip_prompt: Optional[Union[str, List[str]]] = None,
|
| 180 |
+
num_cutouts: Optional[int] = 4,
|
| 181 |
+
use_cutouts: Optional[bool] = True,
|
| 182 |
+
generator: Optional[torch.Generator] = None,
|
| 183 |
+
latents: Optional[torch.Tensor] = None,
|
| 184 |
+
output_type: Optional[str] = "pil",
|
| 185 |
+
return_dict: bool = True,
|
| 186 |
+
):
|
| 187 |
+
if isinstance(prompt, str):
|
| 188 |
+
batch_size = 1
|
| 189 |
+
elif isinstance(prompt, list):
|
| 190 |
+
batch_size = len(prompt)
|
| 191 |
+
else:
|
| 192 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 193 |
+
|
| 194 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 195 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 196 |
+
|
| 197 |
+
# get prompt text embeddings
|
| 198 |
+
text_input = self.tokenizer(
|
| 199 |
+
prompt,
|
| 200 |
+
padding="max_length",
|
| 201 |
+
max_length=self.tokenizer.model_max_length,
|
| 202 |
+
truncation=True,
|
| 203 |
+
return_tensors="pt",
|
| 204 |
+
)
|
| 205 |
+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 206 |
+
# duplicate text embeddings for each generation per prompt
|
| 207 |
+
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 208 |
+
|
| 209 |
+
if clip_guidance_scale > 0:
|
| 210 |
+
if clip_prompt is not None:
|
| 211 |
+
clip_text_input = self.tokenizer(
|
| 212 |
+
clip_prompt,
|
| 213 |
+
padding="max_length",
|
| 214 |
+
max_length=self.tokenizer.model_max_length,
|
| 215 |
+
truncation=True,
|
| 216 |
+
return_tensors="pt",
|
| 217 |
+
).input_ids.to(self.device)
|
| 218 |
+
else:
|
| 219 |
+
clip_text_input = text_input.input_ids.to(self.device)
|
| 220 |
+
text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
|
| 221 |
+
text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 222 |
+
# duplicate text embeddings clip for each generation per prompt
|
| 223 |
+
text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
|
| 224 |
+
|
| 225 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 226 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 227 |
+
# corresponds to doing no classifier free guidance.
|
| 228 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 229 |
+
# get unconditional embeddings for classifier free guidance
|
| 230 |
+
if do_classifier_free_guidance:
|
| 231 |
+
max_length = text_input.input_ids.shape[-1]
|
| 232 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 233 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 234 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 235 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 236 |
+
|
| 237 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 238 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 239 |
+
# to avoid doing two forward passes
|
| 240 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 241 |
+
|
| 242 |
+
# get the initial random noise unless the user supplied it
|
| 243 |
+
|
| 244 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 245 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 246 |
+
# However this currently doesn't work in `mps`.
|
| 247 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 248 |
+
latents_dtype = text_embeddings.dtype
|
| 249 |
+
if latents is None:
|
| 250 |
+
if self.device.type == "mps":
|
| 251 |
+
# randn does not work reproducibly on mps
|
| 252 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 253 |
+
self.device
|
| 254 |
+
)
|
| 255 |
+
else:
|
| 256 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 257 |
+
else:
|
| 258 |
+
if latents.shape != latents_shape:
|
| 259 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 260 |
+
latents = latents.to(self.device)
|
| 261 |
+
|
| 262 |
+
# set timesteps
|
| 263 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 264 |
+
extra_set_kwargs = {}
|
| 265 |
+
if accepts_offset:
|
| 266 |
+
extra_set_kwargs["offset"] = 1
|
| 267 |
+
|
| 268 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 269 |
+
|
| 270 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 271 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 272 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 273 |
+
|
| 274 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 275 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 276 |
+
|
| 277 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 278 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 279 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 280 |
+
# and should be between [0, 1]
|
| 281 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 282 |
+
extra_step_kwargs = {}
|
| 283 |
+
if accepts_eta:
|
| 284 |
+
extra_step_kwargs["eta"] = eta
|
| 285 |
+
|
| 286 |
+
# check if the scheduler accepts generator
|
| 287 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 288 |
+
if accepts_generator:
|
| 289 |
+
extra_step_kwargs["generator"] = generator
|
| 290 |
+
|
| 291 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 292 |
+
# expand the latents if we are doing classifier free guidance
|
| 293 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 294 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 295 |
+
|
| 296 |
+
# predict the noise residual
|
| 297 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 298 |
+
|
| 299 |
+
# perform classifier free guidance
|
| 300 |
+
if do_classifier_free_guidance:
|
| 301 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 302 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 303 |
+
|
| 304 |
+
# perform clip guidance
|
| 305 |
+
if clip_guidance_scale > 0:
|
| 306 |
+
text_embeddings_for_guidance = (
|
| 307 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 308 |
+
)
|
| 309 |
+
noise_pred, latents = self.cond_fn(
|
| 310 |
+
latents,
|
| 311 |
+
t,
|
| 312 |
+
i,
|
| 313 |
+
text_embeddings_for_guidance,
|
| 314 |
+
noise_pred,
|
| 315 |
+
text_embeddings_clip,
|
| 316 |
+
clip_guidance_scale,
|
| 317 |
+
num_cutouts,
|
| 318 |
+
use_cutouts,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 322 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 323 |
+
|
| 324 |
+
# scale and decode the image latents with vae
|
| 325 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 326 |
+
image = self.vae.decode(latents).sample
|
| 327 |
+
|
| 328 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 329 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 330 |
+
|
| 331 |
+
if output_type == "pil":
|
| 332 |
+
image = self.numpy_to_pil(image)
|
| 333 |
+
|
| 334 |
+
if not return_dict:
|
| 335 |
+
return (image, None)
|
| 336 |
+
|
| 337 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.36.0/clip_guided_stable_diffusion_img2img.py
ADDED
|
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import functional as F
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers import (
|
| 13 |
+
AutoencoderKL,
|
| 14 |
+
DDIMScheduler,
|
| 15 |
+
DPMSolverMultistepScheduler,
|
| 16 |
+
LMSDiscreteScheduler,
|
| 17 |
+
PNDMScheduler,
|
| 18 |
+
UNet2DConditionModel,
|
| 19 |
+
)
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 21 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 22 |
+
from diffusers.utils import PIL_INTERPOLATION, deprecate
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
EXAMPLE_DOC_STRING = """
|
| 27 |
+
Examples:
|
| 28 |
+
```py
|
| 29 |
+
from io import BytesIO
|
| 30 |
+
|
| 31 |
+
import requests
|
| 32 |
+
import torch
|
| 33 |
+
from diffusers import DiffusionPipeline
|
| 34 |
+
from PIL import Image
|
| 35 |
+
from transformers import CLIPImageProcessor, CLIPModel
|
| 36 |
+
|
| 37 |
+
feature_extractor = CLIPImageProcessor.from_pretrained(
|
| 38 |
+
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
|
| 39 |
+
)
|
| 40 |
+
clip_model = CLIPModel.from_pretrained(
|
| 41 |
+
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
guided_pipeline = DiffusionPipeline.from_pretrained(
|
| 46 |
+
"CompVis/stable-diffusion-v1-4",
|
| 47 |
+
# custom_pipeline="clip_guided_stable_diffusion",
|
| 48 |
+
custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
|
| 49 |
+
clip_model=clip_model,
|
| 50 |
+
feature_extractor=feature_extractor,
|
| 51 |
+
torch_dtype=torch.float16,
|
| 52 |
+
)
|
| 53 |
+
guided_pipeline.enable_attention_slicing()
|
| 54 |
+
guided_pipeline = guided_pipeline.to("cuda")
|
| 55 |
+
|
| 56 |
+
prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
|
| 57 |
+
|
| 58 |
+
url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
|
| 59 |
+
|
| 60 |
+
response = requests.get(url)
|
| 61 |
+
init_image = Image.open(BytesIO(response.content)).convert("RGB")
|
| 62 |
+
|
| 63 |
+
image = guided_pipeline(
|
| 64 |
+
prompt=prompt,
|
| 65 |
+
num_inference_steps=30,
|
| 66 |
+
image=init_image,
|
| 67 |
+
strength=0.75,
|
| 68 |
+
guidance_scale=7.5,
|
| 69 |
+
clip_guidance_scale=100,
|
| 70 |
+
num_cutouts=4,
|
| 71 |
+
use_cutouts=False,
|
| 72 |
+
).images[0]
|
| 73 |
+
display(image)
|
| 74 |
+
```
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def preprocess(image, w, h):
|
| 79 |
+
if isinstance(image, torch.Tensor):
|
| 80 |
+
return image
|
| 81 |
+
elif isinstance(image, PIL.Image.Image):
|
| 82 |
+
image = [image]
|
| 83 |
+
|
| 84 |
+
if isinstance(image[0], PIL.Image.Image):
|
| 85 |
+
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
|
| 86 |
+
image = np.concatenate(image, axis=0)
|
| 87 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 88 |
+
image = image.transpose(0, 3, 1, 2)
|
| 89 |
+
image = 2.0 * image - 1.0
|
| 90 |
+
image = torch.from_numpy(image)
|
| 91 |
+
elif isinstance(image[0], torch.Tensor):
|
| 92 |
+
image = torch.cat(image, dim=0)
|
| 93 |
+
return image
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class MakeCutouts(nn.Module):
|
| 97 |
+
def __init__(self, cut_size, cut_power=1.0):
|
| 98 |
+
super().__init__()
|
| 99 |
+
|
| 100 |
+
self.cut_size = cut_size
|
| 101 |
+
self.cut_power = cut_power
|
| 102 |
+
|
| 103 |
+
def forward(self, pixel_values, num_cutouts):
|
| 104 |
+
sideY, sideX = pixel_values.shape[2:4]
|
| 105 |
+
max_size = min(sideX, sideY)
|
| 106 |
+
min_size = min(sideX, sideY, self.cut_size)
|
| 107 |
+
cutouts = []
|
| 108 |
+
for _ in range(num_cutouts):
|
| 109 |
+
size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
|
| 110 |
+
offsetx = torch.randint(0, sideX - size + 1, ())
|
| 111 |
+
offsety = torch.randint(0, sideY - size + 1, ())
|
| 112 |
+
cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
|
| 113 |
+
cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
|
| 114 |
+
return torch.cat(cutouts)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def spherical_dist_loss(x, y):
|
| 118 |
+
x = F.normalize(x, dim=-1)
|
| 119 |
+
y = F.normalize(y, dim=-1)
|
| 120 |
+
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def set_requires_grad(model, value):
|
| 124 |
+
for param in model.parameters():
|
| 125 |
+
param.requires_grad = value
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 129 |
+
"""CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
|
| 130 |
+
- https://github.com/Jack000/glid-3-xl
|
| 131 |
+
- https://github.dev/crowsonkb/k-diffusion
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
vae: AutoencoderKL,
|
| 137 |
+
text_encoder: CLIPTextModel,
|
| 138 |
+
clip_model: CLIPModel,
|
| 139 |
+
tokenizer: CLIPTokenizer,
|
| 140 |
+
unet: UNet2DConditionModel,
|
| 141 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
|
| 142 |
+
feature_extractor: CLIPImageProcessor,
|
| 143 |
+
):
|
| 144 |
+
super().__init__()
|
| 145 |
+
self.register_modules(
|
| 146 |
+
vae=vae,
|
| 147 |
+
text_encoder=text_encoder,
|
| 148 |
+
clip_model=clip_model,
|
| 149 |
+
tokenizer=tokenizer,
|
| 150 |
+
unet=unet,
|
| 151 |
+
scheduler=scheduler,
|
| 152 |
+
feature_extractor=feature_extractor,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
|
| 156 |
+
self.cut_out_size = (
|
| 157 |
+
feature_extractor.size
|
| 158 |
+
if isinstance(feature_extractor.size, int)
|
| 159 |
+
else feature_extractor.size["shortest_edge"]
|
| 160 |
+
)
|
| 161 |
+
self.make_cutouts = MakeCutouts(self.cut_out_size)
|
| 162 |
+
|
| 163 |
+
set_requires_grad(self.text_encoder, False)
|
| 164 |
+
set_requires_grad(self.clip_model, False)
|
| 165 |
+
|
| 166 |
+
def freeze_vae(self):
|
| 167 |
+
set_requires_grad(self.vae, False)
|
| 168 |
+
|
| 169 |
+
def unfreeze_vae(self):
|
| 170 |
+
set_requires_grad(self.vae, True)
|
| 171 |
+
|
| 172 |
+
def freeze_unet(self):
|
| 173 |
+
set_requires_grad(self.unet, False)
|
| 174 |
+
|
| 175 |
+
def unfreeze_unet(self):
|
| 176 |
+
set_requires_grad(self.unet, True)
|
| 177 |
+
|
| 178 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 179 |
+
# get the original timestep using init_timestep
|
| 180 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 181 |
+
|
| 182 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 183 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 184 |
+
|
| 185 |
+
return timesteps, num_inference_steps - t_start
|
| 186 |
+
|
| 187 |
+
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
| 188 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 189 |
+
raise ValueError(
|
| 190 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
image = image.to(device=device, dtype=dtype)
|
| 194 |
+
|
| 195 |
+
batch_size = batch_size * num_images_per_prompt
|
| 196 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 197 |
+
raise ValueError(
|
| 198 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 199 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
if isinstance(generator, list):
|
| 203 |
+
init_latents = [
|
| 204 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 205 |
+
]
|
| 206 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 207 |
+
else:
|
| 208 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 209 |
+
|
| 210 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 211 |
+
|
| 212 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 213 |
+
# expand init_latents for batch_size
|
| 214 |
+
deprecation_message = (
|
| 215 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 216 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 217 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 218 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 219 |
+
)
|
| 220 |
+
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 221 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 222 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 223 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 224 |
+
raise ValueError(
|
| 225 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 226 |
+
)
|
| 227 |
+
else:
|
| 228 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 229 |
+
|
| 230 |
+
shape = init_latents.shape
|
| 231 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 232 |
+
|
| 233 |
+
# get latents
|
| 234 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 235 |
+
latents = init_latents
|
| 236 |
+
|
| 237 |
+
return latents
|
| 238 |
+
|
| 239 |
+
@torch.enable_grad()
|
| 240 |
+
def cond_fn(
|
| 241 |
+
self,
|
| 242 |
+
latents,
|
| 243 |
+
timestep,
|
| 244 |
+
index,
|
| 245 |
+
text_embeddings,
|
| 246 |
+
noise_pred_original,
|
| 247 |
+
text_embeddings_clip,
|
| 248 |
+
clip_guidance_scale,
|
| 249 |
+
num_cutouts,
|
| 250 |
+
use_cutouts=True,
|
| 251 |
+
):
|
| 252 |
+
latents = latents.detach().requires_grad_()
|
| 253 |
+
|
| 254 |
+
latent_model_input = self.scheduler.scale_model_input(latents, timestep)
|
| 255 |
+
|
| 256 |
+
# predict the noise residual
|
| 257 |
+
noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
|
| 258 |
+
|
| 259 |
+
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
|
| 260 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
|
| 261 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 262 |
+
# compute predicted original sample from predicted noise also called
|
| 263 |
+
# "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
|
| 264 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 265 |
+
|
| 266 |
+
fac = torch.sqrt(beta_prod_t)
|
| 267 |
+
sample = pred_original_sample * (fac) + latents * (1 - fac)
|
| 268 |
+
elif isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 269 |
+
sigma = self.scheduler.sigmas[index]
|
| 270 |
+
sample = latents - sigma * noise_pred
|
| 271 |
+
else:
|
| 272 |
+
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
|
| 273 |
+
|
| 274 |
+
sample = 1 / self.vae.config.scaling_factor * sample
|
| 275 |
+
image = self.vae.decode(sample).sample
|
| 276 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 277 |
+
|
| 278 |
+
if use_cutouts:
|
| 279 |
+
image = self.make_cutouts(image, num_cutouts)
|
| 280 |
+
else:
|
| 281 |
+
image = transforms.Resize(self.cut_out_size)(image)
|
| 282 |
+
image = self.normalize(image).to(latents.dtype)
|
| 283 |
+
|
| 284 |
+
image_embeddings_clip = self.clip_model.get_image_features(image)
|
| 285 |
+
image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 286 |
+
|
| 287 |
+
if use_cutouts:
|
| 288 |
+
dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
|
| 289 |
+
dists = dists.view([num_cutouts, sample.shape[0], -1])
|
| 290 |
+
loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
|
| 291 |
+
else:
|
| 292 |
+
loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
|
| 293 |
+
|
| 294 |
+
grads = -torch.autograd.grad(loss, latents)[0]
|
| 295 |
+
|
| 296 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 297 |
+
latents = latents.detach() + grads * (sigma**2)
|
| 298 |
+
noise_pred = noise_pred_original
|
| 299 |
+
else:
|
| 300 |
+
noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
|
| 301 |
+
return noise_pred, latents
|
| 302 |
+
|
| 303 |
+
@torch.no_grad()
|
| 304 |
+
def __call__(
|
| 305 |
+
self,
|
| 306 |
+
prompt: Union[str, List[str]],
|
| 307 |
+
height: Optional[int] = 512,
|
| 308 |
+
width: Optional[int] = 512,
|
| 309 |
+
image: Union[torch.Tensor, PIL.Image.Image] = None,
|
| 310 |
+
strength: float = 0.8,
|
| 311 |
+
num_inference_steps: Optional[int] = 50,
|
| 312 |
+
guidance_scale: Optional[float] = 7.5,
|
| 313 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 314 |
+
eta: float = 0.0,
|
| 315 |
+
clip_guidance_scale: Optional[float] = 100,
|
| 316 |
+
clip_prompt: Optional[Union[str, List[str]]] = None,
|
| 317 |
+
num_cutouts: Optional[int] = 4,
|
| 318 |
+
use_cutouts: Optional[bool] = True,
|
| 319 |
+
generator: Optional[torch.Generator] = None,
|
| 320 |
+
latents: Optional[torch.Tensor] = None,
|
| 321 |
+
output_type: Optional[str] = "pil",
|
| 322 |
+
return_dict: bool = True,
|
| 323 |
+
):
|
| 324 |
+
if isinstance(prompt, str):
|
| 325 |
+
batch_size = 1
|
| 326 |
+
elif isinstance(prompt, list):
|
| 327 |
+
batch_size = len(prompt)
|
| 328 |
+
else:
|
| 329 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 330 |
+
|
| 331 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 332 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 333 |
+
|
| 334 |
+
# get prompt text embeddings
|
| 335 |
+
text_input = self.tokenizer(
|
| 336 |
+
prompt,
|
| 337 |
+
padding="max_length",
|
| 338 |
+
max_length=self.tokenizer.model_max_length,
|
| 339 |
+
truncation=True,
|
| 340 |
+
return_tensors="pt",
|
| 341 |
+
)
|
| 342 |
+
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 343 |
+
# duplicate text embeddings for each generation per prompt
|
| 344 |
+
text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 345 |
+
|
| 346 |
+
# set timesteps
|
| 347 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 348 |
+
extra_set_kwargs = {}
|
| 349 |
+
if accepts_offset:
|
| 350 |
+
extra_set_kwargs["offset"] = 1
|
| 351 |
+
|
| 352 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 353 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 354 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 355 |
+
self.scheduler.timesteps.to(self.device)
|
| 356 |
+
|
| 357 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
|
| 358 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 359 |
+
|
| 360 |
+
# Preprocess image
|
| 361 |
+
image = preprocess(image, width, height)
|
| 362 |
+
if latents is None:
|
| 363 |
+
latents = self.prepare_latents(
|
| 364 |
+
image,
|
| 365 |
+
latent_timestep,
|
| 366 |
+
batch_size,
|
| 367 |
+
num_images_per_prompt,
|
| 368 |
+
text_embeddings.dtype,
|
| 369 |
+
self.device,
|
| 370 |
+
generator,
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
if clip_guidance_scale > 0:
|
| 374 |
+
if clip_prompt is not None:
|
| 375 |
+
clip_text_input = self.tokenizer(
|
| 376 |
+
clip_prompt,
|
| 377 |
+
padding="max_length",
|
| 378 |
+
max_length=self.tokenizer.model_max_length,
|
| 379 |
+
truncation=True,
|
| 380 |
+
return_tensors="pt",
|
| 381 |
+
).input_ids.to(self.device)
|
| 382 |
+
else:
|
| 383 |
+
clip_text_input = text_input.input_ids.to(self.device)
|
| 384 |
+
text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
|
| 385 |
+
text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
|
| 386 |
+
# duplicate text embeddings clip for each generation per prompt
|
| 387 |
+
text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
|
| 388 |
+
|
| 389 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 390 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 391 |
+
# corresponds to doing no classifier free guidance.
|
| 392 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 393 |
+
# get unconditional embeddings for classifier free guidance
|
| 394 |
+
if do_classifier_free_guidance:
|
| 395 |
+
max_length = text_input.input_ids.shape[-1]
|
| 396 |
+
uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
|
| 397 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 398 |
+
# duplicate unconditional embeddings for each generation per prompt
|
| 399 |
+
uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
|
| 400 |
+
|
| 401 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 402 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 403 |
+
# to avoid doing two forward passes
|
| 404 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 405 |
+
|
| 406 |
+
# get the initial random noise unless the user supplied it
|
| 407 |
+
|
| 408 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 409 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 410 |
+
# However this currently doesn't work in `mps`.
|
| 411 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 412 |
+
latents_dtype = text_embeddings.dtype
|
| 413 |
+
if latents is None:
|
| 414 |
+
if self.device.type == "mps":
|
| 415 |
+
# randn does not work reproducibly on mps
|
| 416 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 417 |
+
self.device
|
| 418 |
+
)
|
| 419 |
+
else:
|
| 420 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 421 |
+
else:
|
| 422 |
+
if latents.shape != latents_shape:
|
| 423 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 424 |
+
latents = latents.to(self.device)
|
| 425 |
+
|
| 426 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 427 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 428 |
+
|
| 429 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 430 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 431 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 432 |
+
# and should be between [0, 1]
|
| 433 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 434 |
+
extra_step_kwargs = {}
|
| 435 |
+
if accepts_eta:
|
| 436 |
+
extra_step_kwargs["eta"] = eta
|
| 437 |
+
|
| 438 |
+
# check if the scheduler accepts generator
|
| 439 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 440 |
+
if accepts_generator:
|
| 441 |
+
extra_step_kwargs["generator"] = generator
|
| 442 |
+
|
| 443 |
+
with self.progress_bar(total=num_inference_steps):
|
| 444 |
+
for i, t in enumerate(timesteps):
|
| 445 |
+
# expand the latents if we are doing classifier free guidance
|
| 446 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 447 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 448 |
+
|
| 449 |
+
# predict the noise residual
|
| 450 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 451 |
+
|
| 452 |
+
# perform classifier free guidance
|
| 453 |
+
if do_classifier_free_guidance:
|
| 454 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 455 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 456 |
+
|
| 457 |
+
# perform clip guidance
|
| 458 |
+
if clip_guidance_scale > 0:
|
| 459 |
+
text_embeddings_for_guidance = (
|
| 460 |
+
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
|
| 461 |
+
)
|
| 462 |
+
noise_pred, latents = self.cond_fn(
|
| 463 |
+
latents,
|
| 464 |
+
t,
|
| 465 |
+
i,
|
| 466 |
+
text_embeddings_for_guidance,
|
| 467 |
+
noise_pred,
|
| 468 |
+
text_embeddings_clip,
|
| 469 |
+
clip_guidance_scale,
|
| 470 |
+
num_cutouts,
|
| 471 |
+
use_cutouts,
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 475 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 476 |
+
|
| 477 |
+
# scale and decode the image latents with vae
|
| 478 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 479 |
+
image = self.vae.decode(latents).sample
|
| 480 |
+
|
| 481 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 482 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 483 |
+
|
| 484 |
+
if output_type == "pil":
|
| 485 |
+
image = self.numpy_to_pil(image)
|
| 486 |
+
|
| 487 |
+
if not return_dict:
|
| 488 |
+
return (image, None)
|
| 489 |
+
|
| 490 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
v0.36.0/cogvideox_ddim_inversion.py
ADDED
|
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This script performs DDIM inversion for video frames using a pre-trained model and generates
|
| 3 |
+
a video reconstruction based on a provided prompt. It utilizes the CogVideoX pipeline to
|
| 4 |
+
process video frames, apply the DDIM inverse scheduler, and produce an output video.
|
| 5 |
+
|
| 6 |
+
**Please notice that this script is based on the CogVideoX 5B model, and would not generate
|
| 7 |
+
a good result for 2B variants.**
|
| 8 |
+
|
| 9 |
+
Usage:
|
| 10 |
+
python cogvideox_ddim_inversion.py
|
| 11 |
+
--model-path /path/to/model
|
| 12 |
+
--prompt "a prompt"
|
| 13 |
+
--video-path /path/to/video.mp4
|
| 14 |
+
--output-path /path/to/output
|
| 15 |
+
|
| 16 |
+
For more details about the cli arguments, please run `python cogvideox_ddim_inversion.py --help`.
|
| 17 |
+
|
| 18 |
+
Author:
|
| 19 |
+
LittleNyima <littlenyima[at]163[dot]com>
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import math
|
| 24 |
+
import os
|
| 25 |
+
from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union, cast
|
| 26 |
+
|
| 27 |
+
import torch
|
| 28 |
+
import torch.nn.functional as F
|
| 29 |
+
import torchvision.transforms as T
|
| 30 |
+
from transformers import T5EncoderModel, T5Tokenizer
|
| 31 |
+
|
| 32 |
+
from diffusers.models.attention_processor import Attention, CogVideoXAttnProcessor2_0
|
| 33 |
+
from diffusers.models.autoencoders import AutoencoderKLCogVideoX
|
| 34 |
+
from diffusers.models.embeddings import apply_rotary_emb
|
| 35 |
+
from diffusers.models.transformers.cogvideox_transformer_3d import CogVideoXBlock, CogVideoXTransformer3DModel
|
| 36 |
+
from diffusers.pipelines.cogvideo.pipeline_cogvideox import CogVideoXPipeline, retrieve_timesteps
|
| 37 |
+
from diffusers.schedulers import CogVideoXDDIMScheduler, DDIMInverseScheduler
|
| 38 |
+
from diffusers.utils import export_to_video
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Must import after torch because this can sometimes lead to a nasty segmentation fault, or stack smashing error.
|
| 42 |
+
# Very few bug reports but it happens. Look in decord Github issues for more relevant information.
|
| 43 |
+
import decord # isort: skip
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class DDIMInversionArguments(TypedDict):
|
| 47 |
+
model_path: str
|
| 48 |
+
prompt: str
|
| 49 |
+
video_path: str
|
| 50 |
+
output_path: str
|
| 51 |
+
guidance_scale: float
|
| 52 |
+
num_inference_steps: int
|
| 53 |
+
skip_frames_start: int
|
| 54 |
+
skip_frames_end: int
|
| 55 |
+
frame_sample_step: Optional[int]
|
| 56 |
+
max_num_frames: int
|
| 57 |
+
width: int
|
| 58 |
+
height: int
|
| 59 |
+
fps: int
|
| 60 |
+
dtype: torch.dtype
|
| 61 |
+
seed: int
|
| 62 |
+
device: torch.device
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def get_args() -> DDIMInversionArguments:
|
| 66 |
+
parser = argparse.ArgumentParser()
|
| 67 |
+
|
| 68 |
+
parser.add_argument("--model_path", type=str, required=True, help="Path of the pretrained model")
|
| 69 |
+
parser.add_argument("--prompt", type=str, required=True, help="Prompt for the direct sample procedure")
|
| 70 |
+
parser.add_argument("--video_path", type=str, required=True, help="Path of the video for inversion")
|
| 71 |
+
parser.add_argument("--output_path", type=str, default="output", help="Path of the output videos")
|
| 72 |
+
parser.add_argument("--guidance_scale", type=float, default=6.0, help="Classifier-free guidance scale")
|
| 73 |
+
parser.add_argument("--num_inference_steps", type=int, default=50, help="Number of inference steps")
|
| 74 |
+
parser.add_argument("--skip_frames_start", type=int, default=0, help="Number of skipped frames from the start")
|
| 75 |
+
parser.add_argument("--skip_frames_end", type=int, default=0, help="Number of skipped frames from the end")
|
| 76 |
+
parser.add_argument("--frame_sample_step", type=int, default=None, help="Temporal stride of the sampled frames")
|
| 77 |
+
parser.add_argument("--max_num_frames", type=int, default=81, help="Max number of sampled frames")
|
| 78 |
+
parser.add_argument("--width", type=int, default=720, help="Resized width of the video frames")
|
| 79 |
+
parser.add_argument("--height", type=int, default=480, help="Resized height of the video frames")
|
| 80 |
+
parser.add_argument("--fps", type=int, default=8, help="Frame rate of the output videos")
|
| 81 |
+
parser.add_argument("--dtype", type=str, default="bf16", choices=["bf16", "fp16"], help="Dtype of the model")
|
| 82 |
+
parser.add_argument("--seed", type=int, default=42, help="Seed for the random number generator")
|
| 83 |
+
parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"], help="Device for inference")
|
| 84 |
+
|
| 85 |
+
args = parser.parse_args()
|
| 86 |
+
args.dtype = torch.bfloat16 if args.dtype == "bf16" else torch.float16
|
| 87 |
+
args.device = torch.device(args.device)
|
| 88 |
+
|
| 89 |
+
return DDIMInversionArguments(**vars(args))
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class CogVideoXAttnProcessor2_0ForDDIMInversion(CogVideoXAttnProcessor2_0):
|
| 93 |
+
def __init__(self):
|
| 94 |
+
super().__init__()
|
| 95 |
+
|
| 96 |
+
def calculate_attention(
|
| 97 |
+
self,
|
| 98 |
+
query: torch.Tensor,
|
| 99 |
+
key: torch.Tensor,
|
| 100 |
+
value: torch.Tensor,
|
| 101 |
+
attn: Attention,
|
| 102 |
+
batch_size: int,
|
| 103 |
+
image_seq_length: int,
|
| 104 |
+
text_seq_length: int,
|
| 105 |
+
attention_mask: Optional[torch.Tensor],
|
| 106 |
+
image_rotary_emb: Optional[torch.Tensor],
|
| 107 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 108 |
+
r"""
|
| 109 |
+
Core attention computation with inversion-guided RoPE integration.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
query (`torch.Tensor`): `[batch_size, seq_len, dim]` query tensor
|
| 113 |
+
key (`torch.Tensor`): `[batch_size, seq_len, dim]` key tensor
|
| 114 |
+
value (`torch.Tensor`): `[batch_size, seq_len, dim]` value tensor
|
| 115 |
+
attn (`Attention`): Parent attention module with projection layers
|
| 116 |
+
batch_size (`int`): Effective batch size (after chunk splitting)
|
| 117 |
+
image_seq_length (`int`): Length of image feature sequence
|
| 118 |
+
text_seq_length (`int`): Length of text feature sequence
|
| 119 |
+
attention_mask (`Optional[torch.Tensor]`): Attention mask tensor
|
| 120 |
+
image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image positions
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
`Tuple[torch.Tensor, torch.Tensor]`:
|
| 124 |
+
(1) hidden_states: [batch_size, image_seq_length, dim] processed image features
|
| 125 |
+
(2) encoder_hidden_states: [batch_size, text_seq_length, dim] processed text features
|
| 126 |
+
"""
|
| 127 |
+
inner_dim = key.shape[-1]
|
| 128 |
+
head_dim = inner_dim // attn.heads
|
| 129 |
+
|
| 130 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 131 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 132 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 133 |
+
|
| 134 |
+
if attn.norm_q is not None:
|
| 135 |
+
query = attn.norm_q(query)
|
| 136 |
+
if attn.norm_k is not None:
|
| 137 |
+
key = attn.norm_k(key)
|
| 138 |
+
|
| 139 |
+
# Apply RoPE if needed
|
| 140 |
+
if image_rotary_emb is not None:
|
| 141 |
+
query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb)
|
| 142 |
+
if not attn.is_cross_attention:
|
| 143 |
+
if key.size(2) == query.size(2): # Attention for reference hidden states
|
| 144 |
+
key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb)
|
| 145 |
+
else: # RoPE should be applied to each group of image tokens
|
| 146 |
+
key[:, :, text_seq_length : text_seq_length + image_seq_length] = apply_rotary_emb(
|
| 147 |
+
key[:, :, text_seq_length : text_seq_length + image_seq_length], image_rotary_emb
|
| 148 |
+
)
|
| 149 |
+
key[:, :, text_seq_length * 2 + image_seq_length :] = apply_rotary_emb(
|
| 150 |
+
key[:, :, text_seq_length * 2 + image_seq_length :], image_rotary_emb
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 154 |
+
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 158 |
+
|
| 159 |
+
# linear proj
|
| 160 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 161 |
+
# dropout
|
| 162 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 163 |
+
|
| 164 |
+
encoder_hidden_states, hidden_states = hidden_states.split(
|
| 165 |
+
[text_seq_length, hidden_states.size(1) - text_seq_length], dim=1
|
| 166 |
+
)
|
| 167 |
+
return hidden_states, encoder_hidden_states
|
| 168 |
+
|
| 169 |
+
def __call__(
|
| 170 |
+
self,
|
| 171 |
+
attn: Attention,
|
| 172 |
+
hidden_states: torch.Tensor,
|
| 173 |
+
encoder_hidden_states: torch.Tensor,
|
| 174 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 175 |
+
image_rotary_emb: Optional[torch.Tensor] = None,
|
| 176 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 177 |
+
r"""
|
| 178 |
+
Process the dual-path attention for the inversion-guided denoising procedure.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
attn (`Attention`): Parent attention module
|
| 182 |
+
hidden_states (`torch.Tensor`): `[batch_size, image_seq_len, dim]` Image tokens
|
| 183 |
+
encoder_hidden_states (`torch.Tensor`): `[batch_size, text_seq_len, dim]` Text tokens
|
| 184 |
+
attention_mask (`Optional[torch.Tensor]`): Optional attention mask
|
| 185 |
+
image_rotary_emb (`Optional[torch.Tensor]`): Rotary embeddings for image tokens
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
`Tuple[torch.Tensor, torch.Tensor]`:
|
| 189 |
+
(1) Final hidden states: `[batch_size, image_seq_length, dim]` Resulting image tokens
|
| 190 |
+
(2) Final encoder states: `[batch_size, text_seq_length, dim]` Resulting text tokens
|
| 191 |
+
"""
|
| 192 |
+
image_seq_length = hidden_states.size(1)
|
| 193 |
+
text_seq_length = encoder_hidden_states.size(1)
|
| 194 |
+
|
| 195 |
+
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
|
| 196 |
+
|
| 197 |
+
batch_size, sequence_length, _ = (
|
| 198 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if attention_mask is not None:
|
| 202 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 203 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 204 |
+
|
| 205 |
+
query = attn.to_q(hidden_states)
|
| 206 |
+
key = attn.to_k(hidden_states)
|
| 207 |
+
value = attn.to_v(hidden_states)
|
| 208 |
+
|
| 209 |
+
query, query_reference = query.chunk(2)
|
| 210 |
+
key, key_reference = key.chunk(2)
|
| 211 |
+
value, value_reference = value.chunk(2)
|
| 212 |
+
batch_size = batch_size // 2
|
| 213 |
+
|
| 214 |
+
hidden_states, encoder_hidden_states = self.calculate_attention(
|
| 215 |
+
query=query,
|
| 216 |
+
key=torch.cat((key, key_reference), dim=1),
|
| 217 |
+
value=torch.cat((value, value_reference), dim=1),
|
| 218 |
+
attn=attn,
|
| 219 |
+
batch_size=batch_size,
|
| 220 |
+
image_seq_length=image_seq_length,
|
| 221 |
+
text_seq_length=text_seq_length,
|
| 222 |
+
attention_mask=attention_mask,
|
| 223 |
+
image_rotary_emb=image_rotary_emb,
|
| 224 |
+
)
|
| 225 |
+
hidden_states_reference, encoder_hidden_states_reference = self.calculate_attention(
|
| 226 |
+
query=query_reference,
|
| 227 |
+
key=key_reference,
|
| 228 |
+
value=value_reference,
|
| 229 |
+
attn=attn,
|
| 230 |
+
batch_size=batch_size,
|
| 231 |
+
image_seq_length=image_seq_length,
|
| 232 |
+
text_seq_length=text_seq_length,
|
| 233 |
+
attention_mask=attention_mask,
|
| 234 |
+
image_rotary_emb=image_rotary_emb,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
return (
|
| 238 |
+
torch.cat((hidden_states, hidden_states_reference)),
|
| 239 |
+
torch.cat((encoder_hidden_states, encoder_hidden_states_reference)),
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class OverrideAttnProcessors:
|
| 244 |
+
r"""
|
| 245 |
+
Context manager for temporarily overriding attention processors in CogVideo transformer blocks.
|
| 246 |
+
|
| 247 |
+
Designed for DDIM inversion process, replaces original attention processors with
|
| 248 |
+
`CogVideoXAttnProcessor2_0ForDDIMInversion` and restores them upon exit. Uses Python context manager
|
| 249 |
+
pattern to safely manage processor replacement.
|
| 250 |
+
|
| 251 |
+
Typical usage:
|
| 252 |
+
```python
|
| 253 |
+
with OverrideAttnProcessors(transformer):
|
| 254 |
+
# Perform DDIM inversion operations
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
Args:
|
| 258 |
+
transformer (`CogVideoXTransformer3DModel`):
|
| 259 |
+
The transformer model containing attention blocks to be modified. Should have
|
| 260 |
+
`transformer_blocks` attribute containing `CogVideoXBlock` instances.
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
def __init__(self, transformer: CogVideoXTransformer3DModel):
|
| 264 |
+
self.transformer = transformer
|
| 265 |
+
self.original_processors = {}
|
| 266 |
+
|
| 267 |
+
def __enter__(self):
|
| 268 |
+
for block in self.transformer.transformer_blocks:
|
| 269 |
+
block = cast(CogVideoXBlock, block)
|
| 270 |
+
self.original_processors[id(block)] = block.attn1.get_processor()
|
| 271 |
+
block.attn1.set_processor(CogVideoXAttnProcessor2_0ForDDIMInversion())
|
| 272 |
+
|
| 273 |
+
def __exit__(self, _0, _1, _2):
|
| 274 |
+
for block in self.transformer.transformer_blocks:
|
| 275 |
+
block = cast(CogVideoXBlock, block)
|
| 276 |
+
block.attn1.set_processor(self.original_processors[id(block)])
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def get_video_frames(
|
| 280 |
+
video_path: str,
|
| 281 |
+
width: int,
|
| 282 |
+
height: int,
|
| 283 |
+
skip_frames_start: int,
|
| 284 |
+
skip_frames_end: int,
|
| 285 |
+
max_num_frames: int,
|
| 286 |
+
frame_sample_step: Optional[int],
|
| 287 |
+
) -> torch.FloatTensor:
|
| 288 |
+
"""
|
| 289 |
+
Extract and preprocess video frames from a video file for VAE processing.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
video_path (`str`): Path to input video file
|
| 293 |
+
width (`int`): Target frame width for decoding
|
| 294 |
+
height (`int`): Target frame height for decoding
|
| 295 |
+
skip_frames_start (`int`): Number of frames to skip at video start
|
| 296 |
+
skip_frames_end (`int`): Number of frames to skip at video end
|
| 297 |
+
max_num_frames (`int`): Maximum allowed number of output frames
|
| 298 |
+
frame_sample_step (`Optional[int]`):
|
| 299 |
+
Frame sampling step size. If None, automatically calculated as:
|
| 300 |
+
(total_frames - skipped_frames) // max_num_frames
|
| 301 |
+
|
| 302 |
+
Returns:
|
| 303 |
+
`torch.FloatTensor`: Preprocessed frames in `[F, C, H, W]` format where:
|
| 304 |
+
- `F`: Number of frames (adjusted to 4k + 1 for VAE compatibility)
|
| 305 |
+
- `C`: Channels (3 for RGB)
|
| 306 |
+
- `H`: Frame height
|
| 307 |
+
- `W`: Frame width
|
| 308 |
+
"""
|
| 309 |
+
with decord.bridge.use_torch():
|
| 310 |
+
video_reader = decord.VideoReader(uri=video_path, width=width, height=height)
|
| 311 |
+
video_num_frames = len(video_reader)
|
| 312 |
+
start_frame = min(skip_frames_start, video_num_frames)
|
| 313 |
+
end_frame = max(0, video_num_frames - skip_frames_end)
|
| 314 |
+
|
| 315 |
+
if end_frame <= start_frame:
|
| 316 |
+
indices = [start_frame]
|
| 317 |
+
elif end_frame - start_frame <= max_num_frames:
|
| 318 |
+
indices = list(range(start_frame, end_frame))
|
| 319 |
+
else:
|
| 320 |
+
step = frame_sample_step or (end_frame - start_frame) // max_num_frames
|
| 321 |
+
indices = list(range(start_frame, end_frame, step))
|
| 322 |
+
|
| 323 |
+
frames = video_reader.get_batch(indices=indices)
|
| 324 |
+
frames = frames[:max_num_frames].float() # ensure that we don't go over the limit
|
| 325 |
+
|
| 326 |
+
# Choose first (4k + 1) frames as this is how many is required by the VAE
|
| 327 |
+
selected_num_frames = frames.size(0)
|
| 328 |
+
remainder = (3 + selected_num_frames) % 4
|
| 329 |
+
if remainder != 0:
|
| 330 |
+
frames = frames[:-remainder]
|
| 331 |
+
assert frames.size(0) % 4 == 1
|
| 332 |
+
|
| 333 |
+
# Normalize the frames
|
| 334 |
+
transform = T.Lambda(lambda x: x / 255.0 * 2.0 - 1.0)
|
| 335 |
+
frames = torch.stack(tuple(map(transform, frames)), dim=0)
|
| 336 |
+
|
| 337 |
+
return frames.permute(0, 3, 1, 2).contiguous() # [F, C, H, W]
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
class CogVideoXDDIMInversionOutput:
|
| 341 |
+
inverse_latents: torch.FloatTensor
|
| 342 |
+
recon_latents: torch.FloatTensor
|
| 343 |
+
|
| 344 |
+
def __init__(self, inverse_latents: torch.FloatTensor, recon_latents: torch.FloatTensor):
|
| 345 |
+
self.inverse_latents = inverse_latents
|
| 346 |
+
self.recon_latents = recon_latents
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class CogVideoXPipelineForDDIMInversion(CogVideoXPipeline):
|
| 350 |
+
def __init__(
|
| 351 |
+
self,
|
| 352 |
+
tokenizer: T5Tokenizer,
|
| 353 |
+
text_encoder: T5EncoderModel,
|
| 354 |
+
vae: AutoencoderKLCogVideoX,
|
| 355 |
+
transformer: CogVideoXTransformer3DModel,
|
| 356 |
+
scheduler: CogVideoXDDIMScheduler,
|
| 357 |
+
):
|
| 358 |
+
super().__init__(
|
| 359 |
+
tokenizer=tokenizer,
|
| 360 |
+
text_encoder=text_encoder,
|
| 361 |
+
vae=vae,
|
| 362 |
+
transformer=transformer,
|
| 363 |
+
scheduler=scheduler,
|
| 364 |
+
)
|
| 365 |
+
self.inverse_scheduler = DDIMInverseScheduler(**scheduler.config)
|
| 366 |
+
|
| 367 |
+
def encode_video_frames(self, video_frames: torch.FloatTensor) -> torch.FloatTensor:
|
| 368 |
+
"""
|
| 369 |
+
Encode video frames into latent space using Variational Autoencoder.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
video_frames (`torch.FloatTensor`):
|
| 373 |
+
Input frames tensor in `[F, C, H, W]` format from `get_video_frames()`
|
| 374 |
+
|
| 375 |
+
Returns:
|
| 376 |
+
`torch.FloatTensor`: Encoded latents in `[1, F, D, H_latent, W_latent]` format where:
|
| 377 |
+
- `F`: Number of frames (same as input)
|
| 378 |
+
- `D`: Latent channel dimension
|
| 379 |
+
- `H_latent`: Latent space height (H // 2^vae.downscale_factor)
|
| 380 |
+
- `W_latent`: Latent space width (W // 2^vae.downscale_factor)
|
| 381 |
+
"""
|
| 382 |
+
vae: AutoencoderKLCogVideoX = self.vae
|
| 383 |
+
video_frames = video_frames.to(device=vae.device, dtype=vae.dtype)
|
| 384 |
+
video_frames = video_frames.unsqueeze(0).permute(0, 2, 1, 3, 4) # [B, C, F, H, W]
|
| 385 |
+
latent_dist = vae.encode(x=video_frames).latent_dist.sample().transpose(1, 2)
|
| 386 |
+
return latent_dist * vae.config.scaling_factor
|
| 387 |
+
|
| 388 |
+
@torch.no_grad()
|
| 389 |
+
def export_latents_to_video(self, latents: torch.FloatTensor, video_path: str, fps: int):
|
| 390 |
+
r"""
|
| 391 |
+
Decode latent vectors into video and export as video file.
|
| 392 |
+
|
| 393 |
+
Args:
|
| 394 |
+
latents (`torch.FloatTensor`): Encoded latents in `[B, F, D, H_latent, W_latent]` format from
|
| 395 |
+
`encode_video_frames()`
|
| 396 |
+
video_path (`str`): Output path for video file
|
| 397 |
+
fps (`int`): Target frames per second for output video
|
| 398 |
+
"""
|
| 399 |
+
video = self.decode_latents(latents)
|
| 400 |
+
frames = self.video_processor.postprocess_video(video=video, output_type="pil")
|
| 401 |
+
os.makedirs(os.path.dirname(video_path), exist_ok=True)
|
| 402 |
+
export_to_video(video_frames=frames[0], output_video_path=video_path, fps=fps)
|
| 403 |
+
|
| 404 |
+
# Modified from CogVideoXPipeline.__call__
|
| 405 |
+
@torch.no_grad()
|
| 406 |
+
def sample(
|
| 407 |
+
self,
|
| 408 |
+
latents: torch.FloatTensor,
|
| 409 |
+
scheduler: Union[DDIMInverseScheduler, CogVideoXDDIMScheduler],
|
| 410 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 411 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 412 |
+
num_inference_steps: int = 50,
|
| 413 |
+
guidance_scale: float = 6,
|
| 414 |
+
use_dynamic_cfg: bool = False,
|
| 415 |
+
eta: float = 0.0,
|
| 416 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 417 |
+
attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 418 |
+
reference_latents: torch.FloatTensor = None,
|
| 419 |
+
) -> torch.FloatTensor:
|
| 420 |
+
r"""
|
| 421 |
+
Execute the core sampling loop for video generation/inversion using CogVideoX.
|
| 422 |
+
|
| 423 |
+
Implements the full denoising trajectory recording for both DDIM inversion and
|
| 424 |
+
generation processes. Supports dynamic classifier-free guidance and reference
|
| 425 |
+
latent conditioning.
|
| 426 |
+
|
| 427 |
+
Args:
|
| 428 |
+
latents (`torch.FloatTensor`):
|
| 429 |
+
Initial noise tensor of shape `[B, F, C, H, W]`.
|
| 430 |
+
scheduler (`Union[DDIMInverseScheduler, CogVideoXDDIMScheduler]`):
|
| 431 |
+
Scheduling strategy for diffusion process. Use:
|
| 432 |
+
(1) `DDIMInverseScheduler` for inversion
|
| 433 |
+
(2) `CogVideoXDDIMScheduler` for generation
|
| 434 |
+
prompt (`Optional[Union[str, List[str]]]`):
|
| 435 |
+
Text prompt(s) for conditional generation. Defaults to unconditional.
|
| 436 |
+
negative_prompt (`Optional[Union[str, List[str]]]`):
|
| 437 |
+
Negative prompt(s) for guidance. Requires `guidance_scale > 1`.
|
| 438 |
+
num_inference_steps (`int`):
|
| 439 |
+
Number of denoising steps. Affects quality/compute trade-off.
|
| 440 |
+
guidance_scale (`float`):
|
| 441 |
+
Classifier-free guidance weight. 1.0 = no guidance.
|
| 442 |
+
use_dynamic_cfg (`bool`):
|
| 443 |
+
Enable time-varying guidance scale (cosine schedule)
|
| 444 |
+
eta (`float`):
|
| 445 |
+
DDIM variance parameter (0 = deterministic process)
|
| 446 |
+
generator (`Optional[Union[torch.Generator, List[torch.Generator]]]`):
|
| 447 |
+
Random number generator(s) for reproducibility
|
| 448 |
+
attention_kwargs (`Optional[Dict[str, Any]]`):
|
| 449 |
+
Custom parameters for attention modules
|
| 450 |
+
reference_latents (`torch.FloatTensor`):
|
| 451 |
+
Reference latent trajectory for conditional sampling. Shape should match
|
| 452 |
+
`[T, B, F, C, H, W]` where `T` is number of timesteps
|
| 453 |
+
|
| 454 |
+
Returns:
|
| 455 |
+
`torch.FloatTensor`:
|
| 456 |
+
Full denoising trajectory tensor of shape `[T, B, F, C, H, W]`.
|
| 457 |
+
"""
|
| 458 |
+
self._guidance_scale = guidance_scale
|
| 459 |
+
self._attention_kwargs = attention_kwargs
|
| 460 |
+
self._interrupt = False
|
| 461 |
+
|
| 462 |
+
device = self._execution_device
|
| 463 |
+
|
| 464 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 465 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 466 |
+
# corresponds to doing no classifier free guidance.
|
| 467 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 468 |
+
|
| 469 |
+
# 3. Encode input prompt
|
| 470 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 471 |
+
prompt,
|
| 472 |
+
negative_prompt,
|
| 473 |
+
do_classifier_free_guidance,
|
| 474 |
+
device=device,
|
| 475 |
+
)
|
| 476 |
+
if do_classifier_free_guidance:
|
| 477 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 478 |
+
if reference_latents is not None:
|
| 479 |
+
prompt_embeds = torch.cat([prompt_embeds] * 2, dim=0)
|
| 480 |
+
|
| 481 |
+
# 4. Prepare timesteps
|
| 482 |
+
timesteps, num_inference_steps = retrieve_timesteps(scheduler, num_inference_steps, device)
|
| 483 |
+
self._num_timesteps = len(timesteps)
|
| 484 |
+
|
| 485 |
+
# 5. Prepare latents.
|
| 486 |
+
latents = latents.to(device=device) * scheduler.init_noise_sigma
|
| 487 |
+
|
| 488 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 489 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 490 |
+
if isinstance(scheduler, DDIMInverseScheduler): # Inverse scheduler does not accept extra kwargs
|
| 491 |
+
extra_step_kwargs = {}
|
| 492 |
+
|
| 493 |
+
# 7. Create rotary embeds if required
|
| 494 |
+
image_rotary_emb = (
|
| 495 |
+
self._prepare_rotary_positional_embeddings(
|
| 496 |
+
height=latents.size(3) * self.vae_scale_factor_spatial,
|
| 497 |
+
width=latents.size(4) * self.vae_scale_factor_spatial,
|
| 498 |
+
num_frames=latents.size(1),
|
| 499 |
+
device=device,
|
| 500 |
+
)
|
| 501 |
+
if self.transformer.config.use_rotary_positional_embeddings
|
| 502 |
+
else None
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
# 8. Denoising loop
|
| 506 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * scheduler.order, 0)
|
| 507 |
+
|
| 508 |
+
trajectory = torch.zeros_like(latents).unsqueeze(0).repeat(len(timesteps), 1, 1, 1, 1, 1)
|
| 509 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 510 |
+
for i, t in enumerate(timesteps):
|
| 511 |
+
if self.interrupt:
|
| 512 |
+
continue
|
| 513 |
+
|
| 514 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 515 |
+
if reference_latents is not None:
|
| 516 |
+
reference = reference_latents[i]
|
| 517 |
+
reference = torch.cat([reference] * 2) if do_classifier_free_guidance else reference
|
| 518 |
+
latent_model_input = torch.cat([latent_model_input, reference], dim=0)
|
| 519 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
| 520 |
+
|
| 521 |
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 522 |
+
timestep = t.expand(latent_model_input.shape[0])
|
| 523 |
+
|
| 524 |
+
# predict noise model_output
|
| 525 |
+
noise_pred = self.transformer(
|
| 526 |
+
hidden_states=latent_model_input,
|
| 527 |
+
encoder_hidden_states=prompt_embeds,
|
| 528 |
+
timestep=timestep,
|
| 529 |
+
image_rotary_emb=image_rotary_emb,
|
| 530 |
+
attention_kwargs=attention_kwargs,
|
| 531 |
+
return_dict=False,
|
| 532 |
+
)[0]
|
| 533 |
+
noise_pred = noise_pred.float()
|
| 534 |
+
|
| 535 |
+
if reference_latents is not None: # Recover the original batch size
|
| 536 |
+
noise_pred, _ = noise_pred.chunk(2)
|
| 537 |
+
|
| 538 |
+
# perform guidance
|
| 539 |
+
if use_dynamic_cfg:
|
| 540 |
+
self._guidance_scale = 1 + guidance_scale * (
|
| 541 |
+
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
|
| 542 |
+
)
|
| 543 |
+
if do_classifier_free_guidance:
|
| 544 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 545 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 546 |
+
|
| 547 |
+
# compute the noisy sample x_t-1 -> x_t
|
| 548 |
+
latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 549 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 550 |
+
trajectory[i] = latents
|
| 551 |
+
|
| 552 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0):
|
| 553 |
+
progress_bar.update()
|
| 554 |
+
|
| 555 |
+
# Offload all models
|
| 556 |
+
self.maybe_free_model_hooks()
|
| 557 |
+
|
| 558 |
+
return trajectory
|
| 559 |
+
|
| 560 |
+
@torch.no_grad()
|
| 561 |
+
def __call__(
|
| 562 |
+
self,
|
| 563 |
+
prompt: str,
|
| 564 |
+
video_path: str,
|
| 565 |
+
guidance_scale: float,
|
| 566 |
+
num_inference_steps: int,
|
| 567 |
+
skip_frames_start: int,
|
| 568 |
+
skip_frames_end: int,
|
| 569 |
+
frame_sample_step: Optional[int],
|
| 570 |
+
max_num_frames: int,
|
| 571 |
+
width: int,
|
| 572 |
+
height: int,
|
| 573 |
+
seed: int,
|
| 574 |
+
):
|
| 575 |
+
"""
|
| 576 |
+
Performs DDIM inversion on a video to reconstruct it with a new prompt.
|
| 577 |
+
|
| 578 |
+
Args:
|
| 579 |
+
prompt (`str`): The text prompt to guide the reconstruction.
|
| 580 |
+
video_path (`str`): Path to the input video file.
|
| 581 |
+
guidance_scale (`float`): Scale for classifier-free guidance.
|
| 582 |
+
num_inference_steps (`int`): Number of denoising steps.
|
| 583 |
+
skip_frames_start (`int`): Number of frames to skip from the beginning of the video.
|
| 584 |
+
skip_frames_end (`int`): Number of frames to skip from the end of the video.
|
| 585 |
+
frame_sample_step (`Optional[int]`): Step size for sampling frames. If None, all frames are used.
|
| 586 |
+
max_num_frames (`int`): Maximum number of frames to process.
|
| 587 |
+
width (`int`): Width of the output video frames.
|
| 588 |
+
height (`int`): Height of the output video frames.
|
| 589 |
+
seed (`int`): Random seed for reproducibility.
|
| 590 |
+
|
| 591 |
+
Returns:
|
| 592 |
+
`CogVideoXDDIMInversionOutput`: Contains the inverse latents and reconstructed latents.
|
| 593 |
+
"""
|
| 594 |
+
if not self.transformer.config.use_rotary_positional_embeddings:
|
| 595 |
+
raise NotImplementedError("This script supports CogVideoX 5B model only.")
|
| 596 |
+
video_frames = get_video_frames(
|
| 597 |
+
video_path=video_path,
|
| 598 |
+
width=width,
|
| 599 |
+
height=height,
|
| 600 |
+
skip_frames_start=skip_frames_start,
|
| 601 |
+
skip_frames_end=skip_frames_end,
|
| 602 |
+
max_num_frames=max_num_frames,
|
| 603 |
+
frame_sample_step=frame_sample_step,
|
| 604 |
+
).to(device=self.device)
|
| 605 |
+
video_latents = self.encode_video_frames(video_frames=video_frames)
|
| 606 |
+
inverse_latents = self.sample(
|
| 607 |
+
latents=video_latents,
|
| 608 |
+
scheduler=self.inverse_scheduler,
|
| 609 |
+
prompt="",
|
| 610 |
+
num_inference_steps=num_inference_steps,
|
| 611 |
+
guidance_scale=guidance_scale,
|
| 612 |
+
generator=torch.Generator(device=self.device).manual_seed(seed),
|
| 613 |
+
)
|
| 614 |
+
with OverrideAttnProcessors(transformer=self.transformer):
|
| 615 |
+
recon_latents = self.sample(
|
| 616 |
+
latents=torch.randn_like(video_latents),
|
| 617 |
+
scheduler=self.scheduler,
|
| 618 |
+
prompt=prompt,
|
| 619 |
+
num_inference_steps=num_inference_steps,
|
| 620 |
+
guidance_scale=guidance_scale,
|
| 621 |
+
generator=torch.Generator(device=self.device).manual_seed(seed),
|
| 622 |
+
reference_latents=reversed(inverse_latents),
|
| 623 |
+
)
|
| 624 |
+
return CogVideoXDDIMInversionOutput(
|
| 625 |
+
inverse_latents=inverse_latents,
|
| 626 |
+
recon_latents=recon_latents,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
if __name__ == "__main__":
|
| 631 |
+
arguments = get_args()
|
| 632 |
+
pipeline = CogVideoXPipelineForDDIMInversion.from_pretrained(
|
| 633 |
+
arguments.pop("model_path"),
|
| 634 |
+
torch_dtype=arguments.pop("dtype"),
|
| 635 |
+
).to(device=arguments.pop("device"))
|
| 636 |
+
|
| 637 |
+
output_path = arguments.pop("output_path")
|
| 638 |
+
fps = arguments.pop("fps")
|
| 639 |
+
inverse_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_inversion.mp4")
|
| 640 |
+
recon_video_path = os.path.join(output_path, f"{arguments.get('video_path')}_reconstruction.mp4")
|
| 641 |
+
|
| 642 |
+
# Run DDIM inversion
|
| 643 |
+
output = pipeline(**arguments)
|
| 644 |
+
pipeline.export_latents_to_video(output.inverse_latents[-1], inverse_video_path, fps)
|
| 645 |
+
pipeline.export_latents_to_video(output.recon_latents[-1], recon_video_path, fps)
|
v0.36.0/composable_stable_diffusion.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Callable, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from packaging import version
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 21 |
+
|
| 22 |
+
from diffusers import DiffusionPipeline
|
| 23 |
+
from diffusers.configuration_utils import FrozenDict
|
| 24 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 25 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 26 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
|
| 27 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 28 |
+
from diffusers.schedulers import (
|
| 29 |
+
DDIMScheduler,
|
| 30 |
+
DPMSolverMultistepScheduler,
|
| 31 |
+
EulerAncestralDiscreteScheduler,
|
| 32 |
+
EulerDiscreteScheduler,
|
| 33 |
+
LMSDiscreteScheduler,
|
| 34 |
+
PNDMScheduler,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.utils import deprecate, logging
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 43 |
+
r"""
|
| 44 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 45 |
+
|
| 46 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 47 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
vae ([`AutoencoderKL`]):
|
| 51 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 52 |
+
text_encoder ([`CLIPTextModel`]):
|
| 53 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 54 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 55 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 56 |
+
tokenizer (`CLIPTokenizer`):
|
| 57 |
+
Tokenizer of class
|
| 58 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 59 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 60 |
+
scheduler ([`SchedulerMixin`]):
|
| 61 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 62 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 63 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 64 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 65 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 66 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 67 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
vae: AutoencoderKL,
|
| 75 |
+
text_encoder: CLIPTextModel,
|
| 76 |
+
tokenizer: CLIPTokenizer,
|
| 77 |
+
unet: UNet2DConditionModel,
|
| 78 |
+
scheduler: Union[
|
| 79 |
+
DDIMScheduler,
|
| 80 |
+
PNDMScheduler,
|
| 81 |
+
LMSDiscreteScheduler,
|
| 82 |
+
EulerDiscreteScheduler,
|
| 83 |
+
EulerAncestralDiscreteScheduler,
|
| 84 |
+
DPMSolverMultistepScheduler,
|
| 85 |
+
],
|
| 86 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 87 |
+
feature_extractor: CLIPImageProcessor,
|
| 88 |
+
requires_safety_checker: bool = True,
|
| 89 |
+
):
|
| 90 |
+
super().__init__()
|
| 91 |
+
|
| 92 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 93 |
+
deprecation_message = (
|
| 94 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 95 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 96 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 97 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 98 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 99 |
+
" file"
|
| 100 |
+
)
|
| 101 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 102 |
+
new_config = dict(scheduler.config)
|
| 103 |
+
new_config["steps_offset"] = 1
|
| 104 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 105 |
+
|
| 106 |
+
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
|
| 107 |
+
deprecation_message = (
|
| 108 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 109 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 110 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 111 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 112 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 113 |
+
)
|
| 114 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 115 |
+
new_config = dict(scheduler.config)
|
| 116 |
+
new_config["clip_sample"] = False
|
| 117 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 118 |
+
|
| 119 |
+
if safety_checker is None and requires_safety_checker:
|
| 120 |
+
logger.warning(
|
| 121 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 122 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 123 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 124 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 125 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 126 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
if safety_checker is not None and feature_extractor is None:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 132 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
is_unet_version_less_0_9_0 = (
|
| 136 |
+
unet is not None
|
| 137 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 138 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 139 |
+
)
|
| 140 |
+
is_unet_sample_size_less_64 = (
|
| 141 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 142 |
+
)
|
| 143 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 144 |
+
deprecation_message = (
|
| 145 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 146 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 147 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 148 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 149 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 150 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 151 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 152 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 153 |
+
" the `unet/config.json` file"
|
| 154 |
+
)
|
| 155 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 156 |
+
new_config = dict(unet.config)
|
| 157 |
+
new_config["sample_size"] = 64
|
| 158 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 159 |
+
|
| 160 |
+
self.register_modules(
|
| 161 |
+
vae=vae,
|
| 162 |
+
text_encoder=text_encoder,
|
| 163 |
+
tokenizer=tokenizer,
|
| 164 |
+
unet=unet,
|
| 165 |
+
scheduler=scheduler,
|
| 166 |
+
safety_checker=safety_checker,
|
| 167 |
+
feature_extractor=feature_extractor,
|
| 168 |
+
)
|
| 169 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 170 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 171 |
+
|
| 172 |
+
def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
|
| 173 |
+
r"""
|
| 174 |
+
Encodes the prompt into text encoder hidden states.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
prompt (`str` or `list(int)`):
|
| 178 |
+
prompt to be encoded
|
| 179 |
+
device: (`torch.device`):
|
| 180 |
+
torch device
|
| 181 |
+
num_images_per_prompt (`int`):
|
| 182 |
+
number of images that should be generated per prompt
|
| 183 |
+
do_classifier_free_guidance (`bool`):
|
| 184 |
+
whether to use classifier free guidance or not
|
| 185 |
+
negative_prompt (`str` or `List[str]`):
|
| 186 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 187 |
+
if `guidance_scale` is less than `1`).
|
| 188 |
+
"""
|
| 189 |
+
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
| 190 |
+
|
| 191 |
+
text_inputs = self.tokenizer(
|
| 192 |
+
prompt,
|
| 193 |
+
padding="max_length",
|
| 194 |
+
max_length=self.tokenizer.model_max_length,
|
| 195 |
+
truncation=True,
|
| 196 |
+
return_tensors="pt",
|
| 197 |
+
)
|
| 198 |
+
text_input_ids = text_inputs.input_ids
|
| 199 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 200 |
+
|
| 201 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
| 202 |
+
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
| 203 |
+
logger.warning(
|
| 204 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 205 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 209 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 210 |
+
else:
|
| 211 |
+
attention_mask = None
|
| 212 |
+
|
| 213 |
+
text_embeddings = self.text_encoder(
|
| 214 |
+
text_input_ids.to(device),
|
| 215 |
+
attention_mask=attention_mask,
|
| 216 |
+
)
|
| 217 |
+
text_embeddings = text_embeddings[0]
|
| 218 |
+
|
| 219 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 220 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 221 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 222 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 223 |
+
|
| 224 |
+
# get unconditional embeddings for classifier free guidance
|
| 225 |
+
if do_classifier_free_guidance:
|
| 226 |
+
uncond_tokens: List[str]
|
| 227 |
+
if negative_prompt is None:
|
| 228 |
+
uncond_tokens = [""] * batch_size
|
| 229 |
+
elif type(prompt) is not type(negative_prompt):
|
| 230 |
+
raise TypeError(
|
| 231 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 232 |
+
f" {type(prompt)}."
|
| 233 |
+
)
|
| 234 |
+
elif isinstance(negative_prompt, str):
|
| 235 |
+
uncond_tokens = [negative_prompt]
|
| 236 |
+
elif batch_size != len(negative_prompt):
|
| 237 |
+
raise ValueError(
|
| 238 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 239 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 240 |
+
" the batch size of `prompt`."
|
| 241 |
+
)
|
| 242 |
+
else:
|
| 243 |
+
uncond_tokens = negative_prompt
|
| 244 |
+
|
| 245 |
+
max_length = text_input_ids.shape[-1]
|
| 246 |
+
uncond_input = self.tokenizer(
|
| 247 |
+
uncond_tokens,
|
| 248 |
+
padding="max_length",
|
| 249 |
+
max_length=max_length,
|
| 250 |
+
truncation=True,
|
| 251 |
+
return_tensors="pt",
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 255 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 256 |
+
else:
|
| 257 |
+
attention_mask = None
|
| 258 |
+
|
| 259 |
+
uncond_embeddings = self.text_encoder(
|
| 260 |
+
uncond_input.input_ids.to(device),
|
| 261 |
+
attention_mask=attention_mask,
|
| 262 |
+
)
|
| 263 |
+
uncond_embeddings = uncond_embeddings[0]
|
| 264 |
+
|
| 265 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 266 |
+
seq_len = uncond_embeddings.shape[1]
|
| 267 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 268 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 269 |
+
|
| 270 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 271 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 272 |
+
# to avoid doing two forward passes
|
| 273 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 274 |
+
|
| 275 |
+
return text_embeddings
|
| 276 |
+
|
| 277 |
+
def run_safety_checker(self, image, device, dtype):
|
| 278 |
+
if self.safety_checker is not None:
|
| 279 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 280 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 281 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 282 |
+
)
|
| 283 |
+
else:
|
| 284 |
+
has_nsfw_concept = None
|
| 285 |
+
return image, has_nsfw_concept
|
| 286 |
+
|
| 287 |
+
def decode_latents(self, latents):
|
| 288 |
+
latents = 1 / 0.18215 * latents
|
| 289 |
+
image = self.vae.decode(latents).sample
|
| 290 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 291 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 292 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 293 |
+
return image
|
| 294 |
+
|
| 295 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 296 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 297 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 298 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 299 |
+
# and should be between [0, 1]
|
| 300 |
+
|
| 301 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 302 |
+
extra_step_kwargs = {}
|
| 303 |
+
if accepts_eta:
|
| 304 |
+
extra_step_kwargs["eta"] = eta
|
| 305 |
+
|
| 306 |
+
# check if the scheduler accepts generator
|
| 307 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 308 |
+
if accepts_generator:
|
| 309 |
+
extra_step_kwargs["generator"] = generator
|
| 310 |
+
return extra_step_kwargs
|
| 311 |
+
|
| 312 |
+
def check_inputs(self, prompt, height, width, callback_steps):
|
| 313 |
+
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
| 314 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 315 |
+
|
| 316 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 317 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 318 |
+
|
| 319 |
+
if (callback_steps is None) or (
|
| 320 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 321 |
+
):
|
| 322 |
+
raise ValueError(
|
| 323 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 324 |
+
f" {type(callback_steps)}."
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 328 |
+
shape = (
|
| 329 |
+
batch_size,
|
| 330 |
+
num_channels_latents,
|
| 331 |
+
int(height) // self.vae_scale_factor,
|
| 332 |
+
int(width) // self.vae_scale_factor,
|
| 333 |
+
)
|
| 334 |
+
if latents is None:
|
| 335 |
+
if device.type == "mps":
|
| 336 |
+
# randn does not work reproducibly on mps
|
| 337 |
+
latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
|
| 338 |
+
else:
|
| 339 |
+
latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
|
| 340 |
+
else:
|
| 341 |
+
if latents.shape != shape:
|
| 342 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 343 |
+
latents = latents.to(device)
|
| 344 |
+
|
| 345 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 346 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 347 |
+
return latents
|
| 348 |
+
|
| 349 |
+
@torch.no_grad()
|
| 350 |
+
def __call__(
|
| 351 |
+
self,
|
| 352 |
+
prompt: Union[str, List[str]],
|
| 353 |
+
height: Optional[int] = None,
|
| 354 |
+
width: Optional[int] = None,
|
| 355 |
+
num_inference_steps: int = 50,
|
| 356 |
+
guidance_scale: float = 7.5,
|
| 357 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 358 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 359 |
+
eta: float = 0.0,
|
| 360 |
+
generator: Optional[torch.Generator] = None,
|
| 361 |
+
latents: Optional[torch.Tensor] = None,
|
| 362 |
+
output_type: Optional[str] = "pil",
|
| 363 |
+
return_dict: bool = True,
|
| 364 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 365 |
+
callback_steps: int = 1,
|
| 366 |
+
weights: Optional[str] = "",
|
| 367 |
+
):
|
| 368 |
+
r"""
|
| 369 |
+
Function invoked when calling the pipeline for generation.
|
| 370 |
+
|
| 371 |
+
Args:
|
| 372 |
+
prompt (`str` or `List[str]`):
|
| 373 |
+
The prompt or prompts to guide the image generation.
|
| 374 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 375 |
+
The height in pixels of the generated image.
|
| 376 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 377 |
+
The width in pixels of the generated image.
|
| 378 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 379 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 380 |
+
expense of slower inference.
|
| 381 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 382 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 383 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 384 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 385 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 386 |
+
usually at the expense of lower image quality.
|
| 387 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 388 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 389 |
+
if `guidance_scale` is less than `1`).
|
| 390 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 391 |
+
The number of images to generate per prompt.
|
| 392 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 393 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 394 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 395 |
+
generator (`torch.Generator`, *optional*):
|
| 396 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 397 |
+
deterministic.
|
| 398 |
+
latents (`torch.Tensor`, *optional*):
|
| 399 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 400 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 401 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 402 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 403 |
+
The output format of the generate image. Choose between
|
| 404 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 405 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 406 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 407 |
+
plain tuple.
|
| 408 |
+
callback (`Callable`, *optional*):
|
| 409 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 410 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 411 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 412 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 413 |
+
called at every step.
|
| 414 |
+
|
| 415 |
+
Returns:
|
| 416 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 417 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 418 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 419 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 420 |
+
(nsfw) content, according to the `safety_checker`.
|
| 421 |
+
"""
|
| 422 |
+
# 0. Default height and width to unet
|
| 423 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 424 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 425 |
+
|
| 426 |
+
# 1. Check inputs. Raise error if not correct
|
| 427 |
+
self.check_inputs(prompt, height, width, callback_steps)
|
| 428 |
+
|
| 429 |
+
# 2. Define call parameters
|
| 430 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
| 431 |
+
device = self._execution_device
|
| 432 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 433 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 434 |
+
# corresponds to doing no classifier free guidance.
|
| 435 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 436 |
+
|
| 437 |
+
if "|" in prompt:
|
| 438 |
+
prompt = [x.strip() for x in prompt.split("|")]
|
| 439 |
+
print(f"composing {prompt}...")
|
| 440 |
+
|
| 441 |
+
if not weights:
|
| 442 |
+
# specify weights for prompts (excluding the unconditional score)
|
| 443 |
+
print("using equal positive weights (conjunction) for all prompts...")
|
| 444 |
+
weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
|
| 445 |
+
else:
|
| 446 |
+
# set prompt weight for each
|
| 447 |
+
num_prompts = len(prompt) if isinstance(prompt, list) else 1
|
| 448 |
+
weights = [float(w.strip()) for w in weights.split("|")]
|
| 449 |
+
# guidance scale as the default
|
| 450 |
+
if len(weights) < num_prompts:
|
| 451 |
+
weights.append(guidance_scale)
|
| 452 |
+
else:
|
| 453 |
+
weights = weights[:num_prompts]
|
| 454 |
+
assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
|
| 455 |
+
weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
|
| 456 |
+
else:
|
| 457 |
+
weights = guidance_scale
|
| 458 |
+
|
| 459 |
+
# 3. Encode input prompt
|
| 460 |
+
text_embeddings = self._encode_prompt(
|
| 461 |
+
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
# 4. Prepare timesteps
|
| 465 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 466 |
+
timesteps = self.scheduler.timesteps
|
| 467 |
+
|
| 468 |
+
# 5. Prepare latent variables
|
| 469 |
+
num_channels_latents = self.unet.config.in_channels
|
| 470 |
+
latents = self.prepare_latents(
|
| 471 |
+
batch_size * num_images_per_prompt,
|
| 472 |
+
num_channels_latents,
|
| 473 |
+
height,
|
| 474 |
+
width,
|
| 475 |
+
text_embeddings.dtype,
|
| 476 |
+
device,
|
| 477 |
+
generator,
|
| 478 |
+
latents,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
# composable diffusion
|
| 482 |
+
if isinstance(prompt, list) and batch_size == 1:
|
| 483 |
+
# remove extra unconditional embedding
|
| 484 |
+
# N = one unconditional embed + conditional embeds
|
| 485 |
+
text_embeddings = text_embeddings[len(prompt) - 1 :]
|
| 486 |
+
|
| 487 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 488 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 489 |
+
|
| 490 |
+
# 7. Denoising loop
|
| 491 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 492 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 493 |
+
for i, t in enumerate(timesteps):
|
| 494 |
+
# expand the latents if we are doing classifier free guidance
|
| 495 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 496 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 497 |
+
|
| 498 |
+
# predict the noise residual
|
| 499 |
+
noise_pred = []
|
| 500 |
+
for j in range(text_embeddings.shape[0]):
|
| 501 |
+
noise_pred.append(
|
| 502 |
+
self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
|
| 503 |
+
)
|
| 504 |
+
noise_pred = torch.cat(noise_pred, dim=0)
|
| 505 |
+
|
| 506 |
+
# perform guidance
|
| 507 |
+
if do_classifier_free_guidance:
|
| 508 |
+
noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
|
| 509 |
+
noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
|
| 510 |
+
dim=0, keepdims=True
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 514 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 515 |
+
|
| 516 |
+
# call the callback, if provided
|
| 517 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 518 |
+
progress_bar.update()
|
| 519 |
+
if callback is not None and i % callback_steps == 0:
|
| 520 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 521 |
+
callback(step_idx, t, latents)
|
| 522 |
+
|
| 523 |
+
# 8. Post-processing
|
| 524 |
+
image = self.decode_latents(latents)
|
| 525 |
+
|
| 526 |
+
# 9. Run safety checker
|
| 527 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
|
| 528 |
+
|
| 529 |
+
# 10. Convert to PIL
|
| 530 |
+
if output_type == "pil":
|
| 531 |
+
image = self.numpy_to_pil(image)
|
| 532 |
+
|
| 533 |
+
if not return_dict:
|
| 534 |
+
return (image, has_nsfw_concept)
|
| 535 |
+
|
| 536 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/ddim_noise_comparative_analysis.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import List, Optional, Tuple, Union
|
| 16 |
+
|
| 17 |
+
import PIL.Image
|
| 18 |
+
import torch
|
| 19 |
+
from torchvision import transforms
|
| 20 |
+
|
| 21 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
| 22 |
+
from diffusers.schedulers import DDIMScheduler
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
trans = transforms.Compose(
|
| 27 |
+
[
|
| 28 |
+
transforms.Resize((256, 256)),
|
| 29 |
+
transforms.ToTensor(),
|
| 30 |
+
transforms.Normalize([0.5], [0.5]),
|
| 31 |
+
]
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def preprocess(image):
|
| 36 |
+
if isinstance(image, torch.Tensor):
|
| 37 |
+
return image
|
| 38 |
+
elif isinstance(image, PIL.Image.Image):
|
| 39 |
+
image = [image]
|
| 40 |
+
|
| 41 |
+
image = [trans(img.convert("RGB")) for img in image]
|
| 42 |
+
image = torch.stack(image)
|
| 43 |
+
return image
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
|
| 47 |
+
r"""
|
| 48 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 49 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 50 |
+
|
| 51 |
+
Parameters:
|
| 52 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 53 |
+
scheduler ([`SchedulerMixin`]):
|
| 54 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 55 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(self, unet, scheduler):
|
| 59 |
+
super().__init__()
|
| 60 |
+
|
| 61 |
+
# make sure scheduler can always be converted to DDIM
|
| 62 |
+
scheduler = DDIMScheduler.from_config(scheduler.config)
|
| 63 |
+
|
| 64 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 65 |
+
|
| 66 |
+
def check_inputs(self, strength):
|
| 67 |
+
if strength < 0 or strength > 1:
|
| 68 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 69 |
+
|
| 70 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 71 |
+
# get the original timestep using init_timestep
|
| 72 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 73 |
+
|
| 74 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 75 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 76 |
+
|
| 77 |
+
return timesteps, num_inference_steps - t_start
|
| 78 |
+
|
| 79 |
+
def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
|
| 80 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 81 |
+
raise ValueError(
|
| 82 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
init_latents = image.to(device=device, dtype=dtype)
|
| 86 |
+
|
| 87 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 88 |
+
raise ValueError(
|
| 89 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 90 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
shape = init_latents.shape
|
| 94 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 95 |
+
|
| 96 |
+
# get latents
|
| 97 |
+
print("add noise to latents at timestep", timestep)
|
| 98 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 99 |
+
latents = init_latents
|
| 100 |
+
|
| 101 |
+
return latents
|
| 102 |
+
|
| 103 |
+
@torch.no_grad()
|
| 104 |
+
def __call__(
|
| 105 |
+
self,
|
| 106 |
+
image: Union[torch.Tensor, PIL.Image.Image] = None,
|
| 107 |
+
strength: float = 0.8,
|
| 108 |
+
batch_size: int = 1,
|
| 109 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 110 |
+
eta: float = 0.0,
|
| 111 |
+
num_inference_steps: int = 50,
|
| 112 |
+
use_clipped_model_output: Optional[bool] = None,
|
| 113 |
+
output_type: Optional[str] = "pil",
|
| 114 |
+
return_dict: bool = True,
|
| 115 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 116 |
+
r"""
|
| 117 |
+
Args:
|
| 118 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 119 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 120 |
+
process.
|
| 121 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 122 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
|
| 123 |
+
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
|
| 124 |
+
denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
|
| 125 |
+
be maximum and the denoising process will run for the full number of iterations specified in
|
| 126 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 127 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 128 |
+
The number of images to generate.
|
| 129 |
+
generator (`torch.Generator`, *optional*):
|
| 130 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 131 |
+
to make generation deterministic.
|
| 132 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 133 |
+
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
|
| 134 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 135 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 136 |
+
expense of slower inference.
|
| 137 |
+
use_clipped_model_output (`bool`, *optional*, defaults to `None`):
|
| 138 |
+
if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
|
| 139 |
+
downstream to the scheduler. So use `None` for schedulers which don't support this argument.
|
| 140 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 141 |
+
The output format of the generate image. Choose between
|
| 142 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 143 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 144 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 145 |
+
|
| 146 |
+
Returns:
|
| 147 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
| 148 |
+
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
| 149 |
+
"""
|
| 150 |
+
# 1. Check inputs. Raise error if not correct
|
| 151 |
+
self.check_inputs(strength)
|
| 152 |
+
|
| 153 |
+
# 2. Preprocess image
|
| 154 |
+
image = preprocess(image)
|
| 155 |
+
|
| 156 |
+
# 3. set timesteps
|
| 157 |
+
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
| 158 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
|
| 159 |
+
latent_timestep = timesteps[:1].repeat(batch_size)
|
| 160 |
+
|
| 161 |
+
# 4. Prepare latent variables
|
| 162 |
+
latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
|
| 163 |
+
image = latents
|
| 164 |
+
|
| 165 |
+
# 5. Denoising loop
|
| 166 |
+
for t in self.progress_bar(timesteps):
|
| 167 |
+
# 1. predict noise model_output
|
| 168 |
+
model_output = self.unet(image, t).sample
|
| 169 |
+
|
| 170 |
+
# 2. predict previous mean of image x_t-1 and add variance depending on eta
|
| 171 |
+
# eta corresponds to η in paper and should be between [0, 1]
|
| 172 |
+
# do x_t -> x_t-1
|
| 173 |
+
image = self.scheduler.step(
|
| 174 |
+
model_output,
|
| 175 |
+
t,
|
| 176 |
+
image,
|
| 177 |
+
eta=eta,
|
| 178 |
+
use_clipped_model_output=use_clipped_model_output,
|
| 179 |
+
generator=generator,
|
| 180 |
+
).prev_sample
|
| 181 |
+
|
| 182 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 183 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 184 |
+
if output_type == "pil":
|
| 185 |
+
image = self.numpy_to_pil(image)
|
| 186 |
+
|
| 187 |
+
if not return_dict:
|
| 188 |
+
return (image, latent_timestep.item())
|
| 189 |
+
|
| 190 |
+
return ImagePipelineOutput(images=image)
|
v0.36.0/dps_pipeline.py
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from math import pi
|
| 17 |
+
from typing import Callable, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from PIL import Image
|
| 22 |
+
|
| 23 |
+
from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel
|
| 24 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DPSPipeline(DiffusionPipeline):
|
| 28 |
+
r"""
|
| 29 |
+
Pipeline for Diffusion Posterior Sampling.
|
| 30 |
+
|
| 31 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 32 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 33 |
+
|
| 34 |
+
Parameters:
|
| 35 |
+
unet ([`UNet2DModel`]):
|
| 36 |
+
A `UNet2DModel` to denoise the encoded image latents.
|
| 37 |
+
scheduler ([`SchedulerMixin`]):
|
| 38 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 39 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
model_cpu_offload_seq = "unet"
|
| 43 |
+
|
| 44 |
+
def __init__(self, unet, scheduler):
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 47 |
+
|
| 48 |
+
@torch.no_grad()
|
| 49 |
+
def __call__(
|
| 50 |
+
self,
|
| 51 |
+
measurement: torch.Tensor,
|
| 52 |
+
operator: torch.nn.Module,
|
| 53 |
+
loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
|
| 54 |
+
batch_size: int = 1,
|
| 55 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 56 |
+
num_inference_steps: int = 1000,
|
| 57 |
+
output_type: Optional[str] = "pil",
|
| 58 |
+
return_dict: bool = True,
|
| 59 |
+
zeta: float = 0.3,
|
| 60 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 61 |
+
r"""
|
| 62 |
+
The call function to the pipeline for generation.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
measurement (`torch.Tensor`, *required*):
|
| 66 |
+
A 'torch.Tensor', the corrupted image
|
| 67 |
+
operator (`torch.nn.Module`, *required*):
|
| 68 |
+
A 'torch.nn.Module', the operator generating the corrupted image
|
| 69 |
+
loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*):
|
| 70 |
+
A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used
|
| 71 |
+
between the measurements, for most of the cases using RMSE is fine.
|
| 72 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 73 |
+
The number of images to generate.
|
| 74 |
+
generator (`torch.Generator`, *optional*):
|
| 75 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 76 |
+
generation deterministic.
|
| 77 |
+
num_inference_steps (`int`, *optional*, defaults to 1000):
|
| 78 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 79 |
+
expense of slower inference.
|
| 80 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 81 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 82 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 83 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 84 |
+
|
| 85 |
+
Example:
|
| 86 |
+
|
| 87 |
+
```py
|
| 88 |
+
>>> from diffusers import DDPMPipeline
|
| 89 |
+
|
| 90 |
+
>>> # load model and scheduler
|
| 91 |
+
>>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
|
| 92 |
+
|
| 93 |
+
>>> # run pipeline in inference (sample random noise and denoise)
|
| 94 |
+
>>> image = pipe().images[0]
|
| 95 |
+
|
| 96 |
+
>>> # save image
|
| 97 |
+
>>> image.save("ddpm_generated_image.png")
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
| 102 |
+
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
| 103 |
+
returned where the first element is a list with the generated images
|
| 104 |
+
"""
|
| 105 |
+
# Sample gaussian noise to begin loop
|
| 106 |
+
if isinstance(self.unet.config.sample_size, int):
|
| 107 |
+
image_shape = (
|
| 108 |
+
batch_size,
|
| 109 |
+
self.unet.config.in_channels,
|
| 110 |
+
self.unet.config.sample_size,
|
| 111 |
+
self.unet.config.sample_size,
|
| 112 |
+
)
|
| 113 |
+
else:
|
| 114 |
+
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
| 115 |
+
|
| 116 |
+
if self.device.type == "mps":
|
| 117 |
+
# randn does not work reproducibly on mps
|
| 118 |
+
image = randn_tensor(image_shape, generator=generator)
|
| 119 |
+
image = image.to(self.device)
|
| 120 |
+
else:
|
| 121 |
+
image = randn_tensor(image_shape, generator=generator, device=self.device)
|
| 122 |
+
|
| 123 |
+
# set step values
|
| 124 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 125 |
+
|
| 126 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 127 |
+
with torch.enable_grad():
|
| 128 |
+
# 1. predict noise model_output
|
| 129 |
+
image = image.requires_grad_()
|
| 130 |
+
model_output = self.unet(image, t).sample
|
| 131 |
+
|
| 132 |
+
# 2. compute previous image x'_{t-1} and original prediction x0_{t}
|
| 133 |
+
scheduler_out = self.scheduler.step(model_output, t, image, generator=generator)
|
| 134 |
+
image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample
|
| 135 |
+
|
| 136 |
+
# 3. compute y'_t = f(x0_{t})
|
| 137 |
+
measurement_pred = operator(origi_pred)
|
| 138 |
+
|
| 139 |
+
# 4. compute loss = d(y, y'_t-1)
|
| 140 |
+
loss = loss_fn(measurement, measurement_pred)
|
| 141 |
+
loss.backward()
|
| 142 |
+
|
| 143 |
+
print("distance: {0:.4f}".format(loss.item()))
|
| 144 |
+
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
image_pred = image_pred - zeta * image.grad
|
| 147 |
+
image = image_pred.detach()
|
| 148 |
+
|
| 149 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 150 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 151 |
+
if output_type == "pil":
|
| 152 |
+
image = self.numpy_to_pil(image)
|
| 153 |
+
|
| 154 |
+
if not return_dict:
|
| 155 |
+
return (image,)
|
| 156 |
+
|
| 157 |
+
return ImagePipelineOutput(images=image)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
if __name__ == "__main__":
|
| 161 |
+
import scipy
|
| 162 |
+
from torch import nn
|
| 163 |
+
from torchvision.utils import save_image
|
| 164 |
+
|
| 165 |
+
# defining the operators f(.) of y = f(x)
|
| 166 |
+
# super-resolution operator
|
| 167 |
+
class SuperResolutionOperator(nn.Module):
|
| 168 |
+
def __init__(self, in_shape, scale_factor):
|
| 169 |
+
super().__init__()
|
| 170 |
+
|
| 171 |
+
# Resizer local class, do not use outiside the SR operator class
|
| 172 |
+
class Resizer(nn.Module):
|
| 173 |
+
def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True):
|
| 174 |
+
super(Resizer, self).__init__()
|
| 175 |
+
|
| 176 |
+
# First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
|
| 177 |
+
scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
|
| 178 |
+
|
| 179 |
+
# Choose interpolation method, each method has the matching kernel size
|
| 180 |
+
def cubic(x):
|
| 181 |
+
absx = np.abs(x)
|
| 182 |
+
absx2 = absx**2
|
| 183 |
+
absx3 = absx**3
|
| 184 |
+
return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + (
|
| 185 |
+
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
|
| 186 |
+
) * ((1 < absx) & (absx <= 2))
|
| 187 |
+
|
| 188 |
+
def lanczos2(x):
|
| 189 |
+
return (
|
| 190 |
+
(np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps)
|
| 191 |
+
/ ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps)
|
| 192 |
+
) * (abs(x) < 2)
|
| 193 |
+
|
| 194 |
+
def box(x):
|
| 195 |
+
return ((-0.5 <= x) & (x < 0.5)) * 1.0
|
| 196 |
+
|
| 197 |
+
def lanczos3(x):
|
| 198 |
+
return (
|
| 199 |
+
(np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps)
|
| 200 |
+
/ ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps)
|
| 201 |
+
) * (abs(x) < 3)
|
| 202 |
+
|
| 203 |
+
def linear(x):
|
| 204 |
+
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
|
| 205 |
+
|
| 206 |
+
method, kernel_width = {
|
| 207 |
+
"cubic": (cubic, 4.0),
|
| 208 |
+
"lanczos2": (lanczos2, 4.0),
|
| 209 |
+
"lanczos3": (lanczos3, 6.0),
|
| 210 |
+
"box": (box, 1.0),
|
| 211 |
+
"linear": (linear, 2.0),
|
| 212 |
+
None: (cubic, 4.0), # set default interpolation method as cubic
|
| 213 |
+
}.get(kernel)
|
| 214 |
+
|
| 215 |
+
# Antialiasing is only used when downscaling
|
| 216 |
+
antialiasing *= np.any(np.array(scale_factor) < 1)
|
| 217 |
+
|
| 218 |
+
# Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
|
| 219 |
+
sorted_dims = np.argsort(np.array(scale_factor))
|
| 220 |
+
self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
|
| 221 |
+
|
| 222 |
+
# Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
|
| 223 |
+
field_of_view_list = []
|
| 224 |
+
weights_list = []
|
| 225 |
+
for dim in self.sorted_dims:
|
| 226 |
+
# for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
|
| 227 |
+
# weights that multiply the values there to get its result.
|
| 228 |
+
weights, field_of_view = self.contributions(
|
| 229 |
+
in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# convert to torch tensor
|
| 233 |
+
weights = torch.tensor(weights.T, dtype=torch.float32)
|
| 234 |
+
|
| 235 |
+
# We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
|
| 236 |
+
# tmp_im[field_of_view.T], (bsxfun style)
|
| 237 |
+
weights_list.append(
|
| 238 |
+
nn.Parameter(
|
| 239 |
+
torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]),
|
| 240 |
+
requires_grad=False,
|
| 241 |
+
)
|
| 242 |
+
)
|
| 243 |
+
field_of_view_list.append(
|
| 244 |
+
nn.Parameter(
|
| 245 |
+
torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False
|
| 246 |
+
)
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
self.field_of_view = nn.ParameterList(field_of_view_list)
|
| 250 |
+
self.weights = nn.ParameterList(weights_list)
|
| 251 |
+
|
| 252 |
+
def forward(self, in_tensor):
|
| 253 |
+
x = in_tensor
|
| 254 |
+
|
| 255 |
+
# Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
|
| 256 |
+
for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
|
| 257 |
+
# To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
|
| 258 |
+
x = torch.transpose(x, dim, 0)
|
| 259 |
+
|
| 260 |
+
# This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1.
|
| 261 |
+
# for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
|
| 262 |
+
# only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
|
| 263 |
+
# the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
|
| 264 |
+
# matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
|
| 265 |
+
# same number
|
| 266 |
+
x = torch.sum(x[fov] * w, dim=0)
|
| 267 |
+
|
| 268 |
+
# Finally we swap back the axes to the original order
|
| 269 |
+
x = torch.transpose(x, dim, 0)
|
| 270 |
+
|
| 271 |
+
return x
|
| 272 |
+
|
| 273 |
+
def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
|
| 274 |
+
# First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
|
| 275 |
+
# same size as the number of input dimensions)
|
| 276 |
+
if scale_factor is not None:
|
| 277 |
+
# By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
|
| 278 |
+
if np.isscalar(scale_factor) and len(input_shape) > 1:
|
| 279 |
+
scale_factor = [scale_factor, scale_factor]
|
| 280 |
+
|
| 281 |
+
# We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
|
| 282 |
+
scale_factor = list(scale_factor)
|
| 283 |
+
scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
|
| 284 |
+
|
| 285 |
+
# Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
|
| 286 |
+
# to all the unspecified dimensions
|
| 287 |
+
if output_shape is not None:
|
| 288 |
+
output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape)))
|
| 289 |
+
|
| 290 |
+
# Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
|
| 291 |
+
# sub-optimal, because there can be different scales to the same output-shape.
|
| 292 |
+
if scale_factor is None:
|
| 293 |
+
scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
|
| 294 |
+
|
| 295 |
+
# Dealing with missing output-shape. calculating according to scale-factor
|
| 296 |
+
if output_shape is None:
|
| 297 |
+
output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
|
| 298 |
+
|
| 299 |
+
return scale_factor, output_shape
|
| 300 |
+
|
| 301 |
+
def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
| 302 |
+
# This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
|
| 303 |
+
# such that each position from the field_of_view will be multiplied with a matching filter from the
|
| 304 |
+
# 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
|
| 305 |
+
# around it. This is only done for one dimension of the image.
|
| 306 |
+
|
| 307 |
+
# When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
|
| 308 |
+
# 1/sf. this means filtering is more 'low-pass filter'.
|
| 309 |
+
fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
|
| 310 |
+
kernel_width *= 1.0 / scale if antialiasing else 1.0
|
| 311 |
+
|
| 312 |
+
# These are the coordinates of the output image
|
| 313 |
+
out_coordinates = np.arange(1, out_length + 1)
|
| 314 |
+
|
| 315 |
+
# since both scale-factor and output size can be provided simultaneously, preserving the center of the image requires shifting
|
| 316 |
+
# the output coordinates. the deviation is because out_length doesn't necessary equal in_length*scale.
|
| 317 |
+
# to keep the center we need to subtract half of this deviation so that we get equal margins for both sides and center is preserved.
|
| 318 |
+
shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
|
| 319 |
+
|
| 320 |
+
# These are the matching positions of the output-coordinates on the input image coordinates.
|
| 321 |
+
# Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
|
| 322 |
+
# [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
|
| 323 |
+
# The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
|
| 324 |
+
# the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
|
| 325 |
+
# one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
|
| 326 |
+
# So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
|
| 327 |
+
# at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
|
| 328 |
+
# (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
|
| 329 |
+
match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
|
| 330 |
+
|
| 331 |
+
# This is the left boundary to start multiplying the filter from, it depends on the size of the filter
|
| 332 |
+
left_boundary = np.floor(match_coordinates - kernel_width / 2)
|
| 333 |
+
|
| 334 |
+
# Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
|
| 335 |
+
# of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
|
| 336 |
+
expanded_kernel_width = np.ceil(kernel_width) + 2
|
| 337 |
+
|
| 338 |
+
# Determine a set of field_of_view for each each output position, these are the pixels in the input image
|
| 339 |
+
# that the pixel in the output image 'sees'. We get a matrix whose horizontal dim is the output pixels (big) and the
|
| 340 |
+
# vertical dim is the pixels it 'sees' (kernel_size + 2)
|
| 341 |
+
field_of_view = np.squeeze(
|
| 342 |
+
np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
# Assign weight to each pixel in the field of view. A matrix whose horizontal dim is the output pixels and the
|
| 346 |
+
# vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
|
| 347 |
+
# 'field_of_view')
|
| 348 |
+
weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
|
| 349 |
+
|
| 350 |
+
# Normalize weights to sum up to 1. be careful from dividing by 0
|
| 351 |
+
sum_weights = np.sum(weights, axis=1)
|
| 352 |
+
sum_weights[sum_weights == 0] = 1.0
|
| 353 |
+
weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
|
| 354 |
+
|
| 355 |
+
# We use this mirror structure as a trick for reflection padding at the boundaries
|
| 356 |
+
mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
|
| 357 |
+
field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
|
| 358 |
+
|
| 359 |
+
# Get rid of weights and pixel positions that are of zero weight
|
| 360 |
+
non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
|
| 361 |
+
weights = np.squeeze(weights[:, non_zero_out_pixels])
|
| 362 |
+
field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
|
| 363 |
+
|
| 364 |
+
# Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
|
| 365 |
+
return weights, field_of_view
|
| 366 |
+
|
| 367 |
+
self.down_sample = Resizer(in_shape, 1 / scale_factor)
|
| 368 |
+
for param in self.parameters():
|
| 369 |
+
param.requires_grad = False
|
| 370 |
+
|
| 371 |
+
def forward(self, data, **kwargs):
|
| 372 |
+
return self.down_sample(data)
|
| 373 |
+
|
| 374 |
+
# Gaussian blurring operator
|
| 375 |
+
class GaussialBlurOperator(nn.Module):
|
| 376 |
+
def __init__(self, kernel_size, intensity):
|
| 377 |
+
super().__init__()
|
| 378 |
+
|
| 379 |
+
class Blurkernel(nn.Module):
|
| 380 |
+
def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0):
|
| 381 |
+
super().__init__()
|
| 382 |
+
self.blur_type = blur_type
|
| 383 |
+
self.kernel_size = kernel_size
|
| 384 |
+
self.std = std
|
| 385 |
+
self.seq = nn.Sequential(
|
| 386 |
+
nn.ReflectionPad2d(self.kernel_size // 2),
|
| 387 |
+
nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3),
|
| 388 |
+
)
|
| 389 |
+
self.weights_init()
|
| 390 |
+
|
| 391 |
+
def forward(self, x):
|
| 392 |
+
return self.seq(x)
|
| 393 |
+
|
| 394 |
+
def weights_init(self):
|
| 395 |
+
if self.blur_type == "gaussian":
|
| 396 |
+
n = np.zeros((self.kernel_size, self.kernel_size))
|
| 397 |
+
n[self.kernel_size // 2, self.kernel_size // 2] = 1
|
| 398 |
+
k = scipy.ndimage.gaussian_filter(n, sigma=self.std)
|
| 399 |
+
k = torch.from_numpy(k)
|
| 400 |
+
self.k = k
|
| 401 |
+
for name, f in self.named_parameters():
|
| 402 |
+
f.data.copy_(k)
|
| 403 |
+
|
| 404 |
+
def update_weights(self, k):
|
| 405 |
+
if not torch.is_tensor(k):
|
| 406 |
+
k = torch.from_numpy(k)
|
| 407 |
+
for name, f in self.named_parameters():
|
| 408 |
+
f.data.copy_(k)
|
| 409 |
+
|
| 410 |
+
def get_kernel(self):
|
| 411 |
+
return self.k
|
| 412 |
+
|
| 413 |
+
self.kernel_size = kernel_size
|
| 414 |
+
self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity)
|
| 415 |
+
self.kernel = self.conv.get_kernel()
|
| 416 |
+
self.conv.update_weights(self.kernel.type(torch.float32))
|
| 417 |
+
|
| 418 |
+
for param in self.parameters():
|
| 419 |
+
param.requires_grad = False
|
| 420 |
+
|
| 421 |
+
def forward(self, data, **kwargs):
|
| 422 |
+
return self.conv(data)
|
| 423 |
+
|
| 424 |
+
def transpose(self, data, **kwargs):
|
| 425 |
+
return data
|
| 426 |
+
|
| 427 |
+
def get_kernel(self):
|
| 428 |
+
return self.kernel.view(1, 1, self.kernel_size, self.kernel_size)
|
| 429 |
+
|
| 430 |
+
# assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm
|
| 431 |
+
def RMSELoss(yhat, y):
|
| 432 |
+
return torch.sqrt(torch.sum((yhat - y) ** 2))
|
| 433 |
+
|
| 434 |
+
# set up source image
|
| 435 |
+
src = Image.open("sample.png")
|
| 436 |
+
# read image into [1,3,H,W]
|
| 437 |
+
src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None]
|
| 438 |
+
# normalize image to [-1,1]
|
| 439 |
+
src = (src / 127.5) - 1.0
|
| 440 |
+
src = src.to("cuda")
|
| 441 |
+
|
| 442 |
+
# set up operator and measurement
|
| 443 |
+
# operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda")
|
| 444 |
+
operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda")
|
| 445 |
+
measurement = operator(src)
|
| 446 |
+
|
| 447 |
+
# set up scheduler
|
| 448 |
+
scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256")
|
| 449 |
+
scheduler.set_timesteps(1000)
|
| 450 |
+
|
| 451 |
+
# set up model
|
| 452 |
+
model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda")
|
| 453 |
+
|
| 454 |
+
save_image((src + 1.0) / 2.0, "dps_src.png")
|
| 455 |
+
save_image((measurement + 1.0) / 2.0, "dps_mea.png")
|
| 456 |
+
|
| 457 |
+
# finally, the pipeline
|
| 458 |
+
dpspipe = DPSPipeline(model, scheduler)
|
| 459 |
+
image = dpspipe(
|
| 460 |
+
measurement=measurement,
|
| 461 |
+
operator=operator,
|
| 462 |
+
loss_fn=RMSELoss,
|
| 463 |
+
zeta=1.0,
|
| 464 |
+
).images[0]
|
| 465 |
+
|
| 466 |
+
image.save("dps_generated_image.png")
|
v0.36.0/edict_pipeline.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from tqdm.auto import tqdm
|
| 6 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 7 |
+
|
| 8 |
+
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
|
| 9 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
+
from diffusers.utils import (
|
| 11 |
+
deprecate,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class EDICTPipeline(DiffusionPipeline):
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
vae: AutoencoderKL,
|
| 19 |
+
text_encoder: CLIPTextModel,
|
| 20 |
+
tokenizer: CLIPTokenizer,
|
| 21 |
+
unet: UNet2DConditionModel,
|
| 22 |
+
scheduler: DDIMScheduler,
|
| 23 |
+
mixing_coeff: float = 0.93,
|
| 24 |
+
leapfrog_steps: bool = True,
|
| 25 |
+
):
|
| 26 |
+
self.mixing_coeff = mixing_coeff
|
| 27 |
+
self.leapfrog_steps = leapfrog_steps
|
| 28 |
+
|
| 29 |
+
super().__init__()
|
| 30 |
+
self.register_modules(
|
| 31 |
+
vae=vae,
|
| 32 |
+
text_encoder=text_encoder,
|
| 33 |
+
tokenizer=tokenizer,
|
| 34 |
+
unet=unet,
|
| 35 |
+
scheduler=scheduler,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 39 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 40 |
+
|
| 41 |
+
def _encode_prompt(
|
| 42 |
+
self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
|
| 43 |
+
):
|
| 44 |
+
text_inputs = self.tokenizer(
|
| 45 |
+
prompt,
|
| 46 |
+
padding="max_length",
|
| 47 |
+
max_length=self.tokenizer.model_max_length,
|
| 48 |
+
truncation=True,
|
| 49 |
+
return_tensors="pt",
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
|
| 53 |
+
|
| 54 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
|
| 55 |
+
|
| 56 |
+
if do_classifier_free_guidance:
|
| 57 |
+
uncond_tokens = "" if negative_prompt is None else negative_prompt
|
| 58 |
+
|
| 59 |
+
uncond_input = self.tokenizer(
|
| 60 |
+
uncond_tokens,
|
| 61 |
+
padding="max_length",
|
| 62 |
+
max_length=self.tokenizer.model_max_length,
|
| 63 |
+
truncation=True,
|
| 64 |
+
return_tensors="pt",
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
|
| 68 |
+
|
| 69 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 70 |
+
|
| 71 |
+
return prompt_embeds
|
| 72 |
+
|
| 73 |
+
def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
| 74 |
+
x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
|
| 75 |
+
y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
|
| 76 |
+
|
| 77 |
+
return [x, y]
|
| 78 |
+
|
| 79 |
+
def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
|
| 80 |
+
y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
|
| 81 |
+
x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
|
| 82 |
+
|
| 83 |
+
return [x, y]
|
| 84 |
+
|
| 85 |
+
def _get_alpha_and_beta(self, t: torch.Tensor):
|
| 86 |
+
# as self.alphas_cumprod is always in cpu
|
| 87 |
+
t = int(t)
|
| 88 |
+
|
| 89 |
+
alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
|
| 90 |
+
|
| 91 |
+
return alpha_prod, 1 - alpha_prod
|
| 92 |
+
|
| 93 |
+
def noise_step(
|
| 94 |
+
self,
|
| 95 |
+
base: torch.Tensor,
|
| 96 |
+
model_input: torch.Tensor,
|
| 97 |
+
model_output: torch.Tensor,
|
| 98 |
+
timestep: torch.Tensor,
|
| 99 |
+
):
|
| 100 |
+
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
| 101 |
+
|
| 102 |
+
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
| 103 |
+
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
| 104 |
+
|
| 105 |
+
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
| 106 |
+
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
| 107 |
+
|
| 108 |
+
next_model_input = (base - b_t * model_output) / a_t
|
| 109 |
+
|
| 110 |
+
return model_input, next_model_input.to(base.dtype)
|
| 111 |
+
|
| 112 |
+
def denoise_step(
|
| 113 |
+
self,
|
| 114 |
+
base: torch.Tensor,
|
| 115 |
+
model_input: torch.Tensor,
|
| 116 |
+
model_output: torch.Tensor,
|
| 117 |
+
timestep: torch.Tensor,
|
| 118 |
+
):
|
| 119 |
+
prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
|
| 120 |
+
|
| 121 |
+
alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
|
| 122 |
+
alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
|
| 123 |
+
|
| 124 |
+
a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
|
| 125 |
+
b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
|
| 126 |
+
next_model_input = a_t * base + b_t * model_output
|
| 127 |
+
|
| 128 |
+
return model_input, next_model_input.to(base.dtype)
|
| 129 |
+
|
| 130 |
+
@torch.no_grad()
|
| 131 |
+
def decode_latents(self, latents: torch.Tensor):
|
| 132 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 133 |
+
image = self.vae.decode(latents).sample
|
| 134 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 135 |
+
return image
|
| 136 |
+
|
| 137 |
+
@torch.no_grad()
|
| 138 |
+
def prepare_latents(
|
| 139 |
+
self,
|
| 140 |
+
image: Image.Image,
|
| 141 |
+
text_embeds: torch.Tensor,
|
| 142 |
+
timesteps: torch.Tensor,
|
| 143 |
+
guidance_scale: float,
|
| 144 |
+
generator: Optional[torch.Generator] = None,
|
| 145 |
+
):
|
| 146 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 147 |
+
|
| 148 |
+
image = image.to(device=self.device, dtype=text_embeds.dtype)
|
| 149 |
+
latent = self.vae.encode(image).latent_dist.sample(generator)
|
| 150 |
+
|
| 151 |
+
latent = self.vae.config.scaling_factor * latent
|
| 152 |
+
|
| 153 |
+
coupled_latents = [latent.clone(), latent.clone()]
|
| 154 |
+
|
| 155 |
+
for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
|
| 156 |
+
coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
| 157 |
+
|
| 158 |
+
# j - model_input index, k - base index
|
| 159 |
+
for j in range(2):
|
| 160 |
+
k = j ^ 1
|
| 161 |
+
|
| 162 |
+
if self.leapfrog_steps:
|
| 163 |
+
if i % 2 == 0:
|
| 164 |
+
k, j = j, k
|
| 165 |
+
|
| 166 |
+
model_input = coupled_latents[j]
|
| 167 |
+
base = coupled_latents[k]
|
| 168 |
+
|
| 169 |
+
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
| 170 |
+
|
| 171 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
|
| 172 |
+
|
| 173 |
+
if do_classifier_free_guidance:
|
| 174 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 175 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 176 |
+
|
| 177 |
+
base, model_input = self.noise_step(
|
| 178 |
+
base=base,
|
| 179 |
+
model_input=model_input,
|
| 180 |
+
model_output=noise_pred,
|
| 181 |
+
timestep=t,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
coupled_latents[k] = model_input
|
| 185 |
+
|
| 186 |
+
return coupled_latents
|
| 187 |
+
|
| 188 |
+
@torch.no_grad()
|
| 189 |
+
def __call__(
|
| 190 |
+
self,
|
| 191 |
+
base_prompt: str,
|
| 192 |
+
target_prompt: str,
|
| 193 |
+
image: Image.Image,
|
| 194 |
+
guidance_scale: float = 3.0,
|
| 195 |
+
num_inference_steps: int = 50,
|
| 196 |
+
strength: float = 0.8,
|
| 197 |
+
negative_prompt: Optional[str] = None,
|
| 198 |
+
generator: Optional[torch.Generator] = None,
|
| 199 |
+
output_type: Optional[str] = "pil",
|
| 200 |
+
):
|
| 201 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 202 |
+
|
| 203 |
+
image = self.image_processor.preprocess(image)
|
| 204 |
+
|
| 205 |
+
base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
|
| 206 |
+
target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
|
| 207 |
+
|
| 208 |
+
self.scheduler.set_timesteps(num_inference_steps, self.device)
|
| 209 |
+
|
| 210 |
+
t_limit = num_inference_steps - int(num_inference_steps * strength)
|
| 211 |
+
fwd_timesteps = self.scheduler.timesteps[t_limit:]
|
| 212 |
+
bwd_timesteps = fwd_timesteps.flip(0)
|
| 213 |
+
|
| 214 |
+
coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
|
| 215 |
+
|
| 216 |
+
for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
|
| 217 |
+
# j - model_input index, k - base index
|
| 218 |
+
for k in range(2):
|
| 219 |
+
j = k ^ 1
|
| 220 |
+
|
| 221 |
+
if self.leapfrog_steps:
|
| 222 |
+
if i % 2 == 1:
|
| 223 |
+
k, j = j, k
|
| 224 |
+
|
| 225 |
+
model_input = coupled_latents[j]
|
| 226 |
+
base = coupled_latents[k]
|
| 227 |
+
|
| 228 |
+
latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
|
| 229 |
+
|
| 230 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
|
| 231 |
+
|
| 232 |
+
if do_classifier_free_guidance:
|
| 233 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 234 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 235 |
+
|
| 236 |
+
base, model_input = self.denoise_step(
|
| 237 |
+
base=base,
|
| 238 |
+
model_input=model_input,
|
| 239 |
+
model_output=noise_pred,
|
| 240 |
+
timestep=t,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
coupled_latents[k] = model_input
|
| 244 |
+
|
| 245 |
+
coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
|
| 246 |
+
|
| 247 |
+
# either one is fine
|
| 248 |
+
final_latent = coupled_latents[0]
|
| 249 |
+
|
| 250 |
+
if output_type not in ["latent", "pt", "np", "pil"]:
|
| 251 |
+
deprecation_message = (
|
| 252 |
+
f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
|
| 253 |
+
"`pil`, `np`, `pt`, `latent`"
|
| 254 |
+
)
|
| 255 |
+
deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
|
| 256 |
+
output_type = "np"
|
| 257 |
+
|
| 258 |
+
if output_type == "latent":
|
| 259 |
+
image = final_latent
|
| 260 |
+
else:
|
| 261 |
+
image = self.decode_latents(final_latent)
|
| 262 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 263 |
+
|
| 264 |
+
return image
|
v0.36.0/fresco_v2v.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.36.0/gluegen.py
ADDED
|
@@ -0,0 +1,816 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Any, Dict, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor
|
| 7 |
+
|
| 8 |
+
from diffusers import DiffusionPipeline
|
| 9 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 10 |
+
from diffusers.loaders import StableDiffusionLoraLoaderMixin
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 13 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 14 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 15 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 16 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 17 |
+
from diffusers.utils import (
|
| 18 |
+
USE_PEFT_BACKEND,
|
| 19 |
+
logging,
|
| 20 |
+
scale_lora_layers,
|
| 21 |
+
unscale_lora_layers,
|
| 22 |
+
)
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class TranslatorBase(nn.Module):
|
| 30 |
+
def __init__(self, num_tok, dim, dim_out, mult=2):
|
| 31 |
+
super().__init__()
|
| 32 |
+
|
| 33 |
+
self.dim_in = dim
|
| 34 |
+
self.dim_out = dim_out
|
| 35 |
+
|
| 36 |
+
self.net_tok = nn.Sequential(
|
| 37 |
+
nn.Linear(num_tok, int(num_tok * mult)),
|
| 38 |
+
nn.LayerNorm(int(num_tok * mult)),
|
| 39 |
+
nn.GELU(),
|
| 40 |
+
nn.Linear(int(num_tok * mult), int(num_tok * mult)),
|
| 41 |
+
nn.LayerNorm(int(num_tok * mult)),
|
| 42 |
+
nn.GELU(),
|
| 43 |
+
nn.Linear(int(num_tok * mult), num_tok),
|
| 44 |
+
nn.LayerNorm(num_tok),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
self.net_sen = nn.Sequential(
|
| 48 |
+
nn.Linear(dim, int(dim * mult)),
|
| 49 |
+
nn.LayerNorm(int(dim * mult)),
|
| 50 |
+
nn.GELU(),
|
| 51 |
+
nn.Linear(int(dim * mult), int(dim * mult)),
|
| 52 |
+
nn.LayerNorm(int(dim * mult)),
|
| 53 |
+
nn.GELU(),
|
| 54 |
+
nn.Linear(int(dim * mult), dim_out),
|
| 55 |
+
nn.LayerNorm(dim_out),
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
def forward(self, x):
|
| 59 |
+
if self.dim_in == self.dim_out:
|
| 60 |
+
indentity_0 = x
|
| 61 |
+
x = self.net_sen(x)
|
| 62 |
+
x += indentity_0
|
| 63 |
+
x = x.transpose(1, 2)
|
| 64 |
+
|
| 65 |
+
indentity_1 = x
|
| 66 |
+
x = self.net_tok(x)
|
| 67 |
+
x += indentity_1
|
| 68 |
+
x = x.transpose(1, 2)
|
| 69 |
+
else:
|
| 70 |
+
x = self.net_sen(x)
|
| 71 |
+
x = x.transpose(1, 2)
|
| 72 |
+
|
| 73 |
+
x = self.net_tok(x)
|
| 74 |
+
x = x.transpose(1, 2)
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class TranslatorBaseNoLN(nn.Module):
|
| 79 |
+
def __init__(self, num_tok, dim, dim_out, mult=2):
|
| 80 |
+
super().__init__()
|
| 81 |
+
|
| 82 |
+
self.dim_in = dim
|
| 83 |
+
self.dim_out = dim_out
|
| 84 |
+
|
| 85 |
+
self.net_tok = nn.Sequential(
|
| 86 |
+
nn.Linear(num_tok, int(num_tok * mult)),
|
| 87 |
+
nn.GELU(),
|
| 88 |
+
nn.Linear(int(num_tok * mult), int(num_tok * mult)),
|
| 89 |
+
nn.GELU(),
|
| 90 |
+
nn.Linear(int(num_tok * mult), num_tok),
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
self.net_sen = nn.Sequential(
|
| 94 |
+
nn.Linear(dim, int(dim * mult)),
|
| 95 |
+
nn.GELU(),
|
| 96 |
+
nn.Linear(int(dim * mult), int(dim * mult)),
|
| 97 |
+
nn.GELU(),
|
| 98 |
+
nn.Linear(int(dim * mult), dim_out),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
def forward(self, x):
|
| 102 |
+
if self.dim_in == self.dim_out:
|
| 103 |
+
indentity_0 = x
|
| 104 |
+
x = self.net_sen(x)
|
| 105 |
+
x += indentity_0
|
| 106 |
+
x = x.transpose(1, 2)
|
| 107 |
+
|
| 108 |
+
indentity_1 = x
|
| 109 |
+
x = self.net_tok(x)
|
| 110 |
+
x += indentity_1
|
| 111 |
+
x = x.transpose(1, 2)
|
| 112 |
+
else:
|
| 113 |
+
x = self.net_sen(x)
|
| 114 |
+
x = x.transpose(1, 2)
|
| 115 |
+
|
| 116 |
+
x = self.net_tok(x)
|
| 117 |
+
x = x.transpose(1, 2)
|
| 118 |
+
return x
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class TranslatorNoLN(nn.Module):
|
| 122 |
+
def __init__(self, num_tok, dim, dim_out, mult=2, depth=5):
|
| 123 |
+
super().__init__()
|
| 124 |
+
|
| 125 |
+
self.blocks = nn.ModuleList([TranslatorBase(num_tok, dim, dim, mult=2) for d in range(depth)])
|
| 126 |
+
self.gelu = nn.GELU()
|
| 127 |
+
|
| 128 |
+
self.tail = TranslatorBaseNoLN(num_tok, dim, dim_out, mult=2)
|
| 129 |
+
|
| 130 |
+
def forward(self, x):
|
| 131 |
+
for block in self.blocks:
|
| 132 |
+
x = block(x) + x
|
| 133 |
+
x = self.gelu(x)
|
| 134 |
+
|
| 135 |
+
x = self.tail(x)
|
| 136 |
+
return x
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 140 |
+
"""
|
| 141 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 142 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4
|
| 143 |
+
"""
|
| 144 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 145 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 146 |
+
# rescale the results from guidance (fixes overexposure)
|
| 147 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 148 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 149 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 150 |
+
return noise_cfg
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def retrieve_timesteps(
|
| 154 |
+
scheduler,
|
| 155 |
+
num_inference_steps: Optional[int] = None,
|
| 156 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 157 |
+
timesteps: Optional[List[int]] = None,
|
| 158 |
+
**kwargs,
|
| 159 |
+
):
|
| 160 |
+
"""
|
| 161 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 162 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
scheduler (`SchedulerMixin`):
|
| 166 |
+
The scheduler to get timesteps from.
|
| 167 |
+
num_inference_steps (`int`):
|
| 168 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 169 |
+
`timesteps` must be `None`.
|
| 170 |
+
device (`str` or `torch.device`, *optional*):
|
| 171 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 172 |
+
timesteps (`List[int]`, *optional*):
|
| 173 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 174 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 175 |
+
must be `None`.
|
| 176 |
+
|
| 177 |
+
Returns:
|
| 178 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 179 |
+
second element is the number of inference steps.
|
| 180 |
+
"""
|
| 181 |
+
if timesteps is not None:
|
| 182 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 183 |
+
if not accepts_timesteps:
|
| 184 |
+
raise ValueError(
|
| 185 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 186 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 187 |
+
)
|
| 188 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 189 |
+
timesteps = scheduler.timesteps
|
| 190 |
+
num_inference_steps = len(timesteps)
|
| 191 |
+
else:
|
| 192 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 193 |
+
timesteps = scheduler.timesteps
|
| 194 |
+
return timesteps, num_inference_steps
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class GlueGenStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, StableDiffusionLoraLoaderMixin):
|
| 198 |
+
def __init__(
|
| 199 |
+
self,
|
| 200 |
+
vae: AutoencoderKL,
|
| 201 |
+
text_encoder: AutoModel,
|
| 202 |
+
tokenizer: AutoTokenizer,
|
| 203 |
+
unet: UNet2DConditionModel,
|
| 204 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 205 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 206 |
+
feature_extractor: CLIPImageProcessor,
|
| 207 |
+
language_adapter: TranslatorNoLN = None,
|
| 208 |
+
tensor_norm: torch.Tensor = None,
|
| 209 |
+
requires_safety_checker: bool = True,
|
| 210 |
+
):
|
| 211 |
+
super().__init__()
|
| 212 |
+
|
| 213 |
+
self.register_modules(
|
| 214 |
+
vae=vae,
|
| 215 |
+
text_encoder=text_encoder,
|
| 216 |
+
tokenizer=tokenizer,
|
| 217 |
+
unet=unet,
|
| 218 |
+
scheduler=scheduler,
|
| 219 |
+
safety_checker=safety_checker,
|
| 220 |
+
feature_extractor=feature_extractor,
|
| 221 |
+
language_adapter=language_adapter,
|
| 222 |
+
tensor_norm=tensor_norm,
|
| 223 |
+
)
|
| 224 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 225 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 226 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 227 |
+
|
| 228 |
+
def load_language_adapter(
|
| 229 |
+
self,
|
| 230 |
+
model_path: str,
|
| 231 |
+
num_token: int,
|
| 232 |
+
dim: int,
|
| 233 |
+
dim_out: int,
|
| 234 |
+
tensor_norm: torch.Tensor,
|
| 235 |
+
mult: int = 2,
|
| 236 |
+
depth: int = 5,
|
| 237 |
+
):
|
| 238 |
+
device = self._execution_device
|
| 239 |
+
self.tensor_norm = tensor_norm.to(device)
|
| 240 |
+
self.language_adapter = TranslatorNoLN(num_tok=num_token, dim=dim, dim_out=dim_out, mult=mult, depth=depth).to(
|
| 241 |
+
device
|
| 242 |
+
)
|
| 243 |
+
self.language_adapter.load_state_dict(torch.load(model_path))
|
| 244 |
+
|
| 245 |
+
def _adapt_language(self, prompt_embeds: torch.Tensor):
|
| 246 |
+
prompt_embeds = prompt_embeds / 3
|
| 247 |
+
prompt_embeds = self.language_adapter(prompt_embeds) * (self.tensor_norm / 2)
|
| 248 |
+
return prompt_embeds
|
| 249 |
+
|
| 250 |
+
def encode_prompt(
|
| 251 |
+
self,
|
| 252 |
+
prompt,
|
| 253 |
+
device,
|
| 254 |
+
num_images_per_prompt,
|
| 255 |
+
do_classifier_free_guidance,
|
| 256 |
+
negative_prompt=None,
|
| 257 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 258 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 259 |
+
lora_scale: Optional[float] = None,
|
| 260 |
+
clip_skip: Optional[int] = None,
|
| 261 |
+
):
|
| 262 |
+
r"""
|
| 263 |
+
Encodes the prompt into text encoder hidden states.
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 267 |
+
prompt to be encoded
|
| 268 |
+
device: (`torch.device`):
|
| 269 |
+
torch device
|
| 270 |
+
num_images_per_prompt (`int`):
|
| 271 |
+
number of images that should be generated per prompt
|
| 272 |
+
do_classifier_free_guidance (`bool`):
|
| 273 |
+
whether to use classifier free guidance or not
|
| 274 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 275 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 276 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 277 |
+
less than `1`).
|
| 278 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 279 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 280 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 281 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 282 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 283 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 284 |
+
argument.
|
| 285 |
+
lora_scale (`float`, *optional*):
|
| 286 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 287 |
+
clip_skip (`int`, *optional*):
|
| 288 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 289 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 290 |
+
"""
|
| 291 |
+
# set lora scale so that monkey patched LoRA
|
| 292 |
+
# function of text encoder can correctly access it
|
| 293 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 294 |
+
self._lora_scale = lora_scale
|
| 295 |
+
|
| 296 |
+
# dynamically adjust the LoRA scale
|
| 297 |
+
if not USE_PEFT_BACKEND:
|
| 298 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 299 |
+
else:
|
| 300 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 301 |
+
|
| 302 |
+
if prompt is not None and isinstance(prompt, str):
|
| 303 |
+
batch_size = 1
|
| 304 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 305 |
+
batch_size = len(prompt)
|
| 306 |
+
else:
|
| 307 |
+
batch_size = prompt_embeds.shape[0]
|
| 308 |
+
|
| 309 |
+
if prompt_embeds is None:
|
| 310 |
+
text_inputs = self.tokenizer(
|
| 311 |
+
prompt,
|
| 312 |
+
padding="max_length",
|
| 313 |
+
max_length=self.tokenizer.model_max_length,
|
| 314 |
+
truncation=True,
|
| 315 |
+
return_tensors="pt",
|
| 316 |
+
)
|
| 317 |
+
text_input_ids = text_inputs.input_ids
|
| 318 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 319 |
+
|
| 320 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 321 |
+
text_input_ids, untruncated_ids
|
| 322 |
+
):
|
| 323 |
+
removed_text = self.tokenizer.batch_decode(
|
| 324 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 325 |
+
)
|
| 326 |
+
logger.warning(
|
| 327 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 328 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 332 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 333 |
+
elif self.language_adapter is not None:
|
| 334 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 335 |
+
else:
|
| 336 |
+
attention_mask = None
|
| 337 |
+
|
| 338 |
+
if clip_skip is None:
|
| 339 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 340 |
+
prompt_embeds = prompt_embeds[0]
|
| 341 |
+
|
| 342 |
+
else:
|
| 343 |
+
prompt_embeds = self.text_encoder(
|
| 344 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 345 |
+
)
|
| 346 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 347 |
+
# all the hidden states from the encoder layers. Then index into
|
| 348 |
+
# the tuple to access the hidden states from the desired layer.
|
| 349 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 350 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 351 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 352 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 353 |
+
# layer.
|
| 354 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 355 |
+
|
| 356 |
+
# Run prompt language adapter
|
| 357 |
+
if self.language_adapter is not None:
|
| 358 |
+
prompt_embeds = self._adapt_language(prompt_embeds)
|
| 359 |
+
|
| 360 |
+
if self.text_encoder is not None:
|
| 361 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 362 |
+
elif self.unet is not None:
|
| 363 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 364 |
+
else:
|
| 365 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 366 |
+
|
| 367 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 368 |
+
|
| 369 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 370 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 371 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 372 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 373 |
+
|
| 374 |
+
# get unconditional embeddings for classifier free guidance
|
| 375 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 376 |
+
uncond_tokens: List[str]
|
| 377 |
+
if negative_prompt is None:
|
| 378 |
+
uncond_tokens = [""] * batch_size
|
| 379 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 380 |
+
raise TypeError(
|
| 381 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 382 |
+
f" {type(prompt)}."
|
| 383 |
+
)
|
| 384 |
+
elif isinstance(negative_prompt, str):
|
| 385 |
+
uncond_tokens = [negative_prompt]
|
| 386 |
+
elif batch_size != len(negative_prompt):
|
| 387 |
+
raise ValueError(
|
| 388 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 389 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 390 |
+
" the batch size of `prompt`."
|
| 391 |
+
)
|
| 392 |
+
else:
|
| 393 |
+
uncond_tokens = negative_prompt
|
| 394 |
+
|
| 395 |
+
max_length = prompt_embeds.shape[1]
|
| 396 |
+
uncond_input = self.tokenizer(
|
| 397 |
+
uncond_tokens,
|
| 398 |
+
padding="max_length",
|
| 399 |
+
max_length=max_length,
|
| 400 |
+
truncation=True,
|
| 401 |
+
return_tensors="pt",
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 405 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 406 |
+
else:
|
| 407 |
+
attention_mask = None
|
| 408 |
+
|
| 409 |
+
negative_prompt_embeds = self.text_encoder(
|
| 410 |
+
uncond_input.input_ids.to(device),
|
| 411 |
+
attention_mask=attention_mask,
|
| 412 |
+
)
|
| 413 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 414 |
+
# Run negative prompt language adapter
|
| 415 |
+
if self.language_adapter is not None:
|
| 416 |
+
negative_prompt_embeds = self._adapt_language(negative_prompt_embeds)
|
| 417 |
+
|
| 418 |
+
if do_classifier_free_guidance:
|
| 419 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 420 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 421 |
+
|
| 422 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 423 |
+
|
| 424 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 425 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 426 |
+
|
| 427 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 428 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 429 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 430 |
+
|
| 431 |
+
return prompt_embeds, negative_prompt_embeds
|
| 432 |
+
|
| 433 |
+
def run_safety_checker(self, image, device, dtype):
|
| 434 |
+
if self.safety_checker is None:
|
| 435 |
+
has_nsfw_concept = None
|
| 436 |
+
else:
|
| 437 |
+
if torch.is_tensor(image):
|
| 438 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 439 |
+
else:
|
| 440 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 441 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 442 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 443 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 444 |
+
)
|
| 445 |
+
return image, has_nsfw_concept
|
| 446 |
+
|
| 447 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 448 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 449 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 450 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 451 |
+
# and should be between [0, 1]
|
| 452 |
+
|
| 453 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 454 |
+
extra_step_kwargs = {}
|
| 455 |
+
if accepts_eta:
|
| 456 |
+
extra_step_kwargs["eta"] = eta
|
| 457 |
+
|
| 458 |
+
# check if the scheduler accepts generator
|
| 459 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 460 |
+
if accepts_generator:
|
| 461 |
+
extra_step_kwargs["generator"] = generator
|
| 462 |
+
return extra_step_kwargs
|
| 463 |
+
|
| 464 |
+
def check_inputs(
|
| 465 |
+
self,
|
| 466 |
+
prompt,
|
| 467 |
+
height,
|
| 468 |
+
width,
|
| 469 |
+
negative_prompt=None,
|
| 470 |
+
prompt_embeds=None,
|
| 471 |
+
negative_prompt_embeds=None,
|
| 472 |
+
):
|
| 473 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 474 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 475 |
+
|
| 476 |
+
if prompt is not None and prompt_embeds is not None:
|
| 477 |
+
raise ValueError(
|
| 478 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 479 |
+
" only forward one of the two."
|
| 480 |
+
)
|
| 481 |
+
elif prompt is None and prompt_embeds is None:
|
| 482 |
+
raise ValueError(
|
| 483 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 484 |
+
)
|
| 485 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 486 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 487 |
+
|
| 488 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 489 |
+
raise ValueError(
|
| 490 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 491 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 495 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 496 |
+
raise ValueError(
|
| 497 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 498 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 499 |
+
f" {negative_prompt_embeds.shape}."
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 503 |
+
shape = (
|
| 504 |
+
batch_size,
|
| 505 |
+
num_channels_latents,
|
| 506 |
+
int(height) // self.vae_scale_factor,
|
| 507 |
+
int(width) // self.vae_scale_factor,
|
| 508 |
+
)
|
| 509 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 510 |
+
raise ValueError(
|
| 511 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 512 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
if latents is None:
|
| 516 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 517 |
+
else:
|
| 518 |
+
latents = latents.to(device)
|
| 519 |
+
|
| 520 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 521 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 522 |
+
return latents
|
| 523 |
+
|
| 524 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 525 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 526 |
+
"""
|
| 527 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 528 |
+
|
| 529 |
+
Args:
|
| 530 |
+
timesteps (`torch.Tensor`):
|
| 531 |
+
generate embedding vectors at these timesteps
|
| 532 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 533 |
+
dimension of the embeddings to generate
|
| 534 |
+
dtype:
|
| 535 |
+
data type of the generated embeddings
|
| 536 |
+
|
| 537 |
+
Returns:
|
| 538 |
+
`torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 539 |
+
"""
|
| 540 |
+
assert len(w.shape) == 1
|
| 541 |
+
w = w * 1000.0
|
| 542 |
+
|
| 543 |
+
half_dim = embedding_dim // 2
|
| 544 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 545 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 546 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 547 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 548 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 549 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 550 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 551 |
+
return emb
|
| 552 |
+
|
| 553 |
+
@property
|
| 554 |
+
def guidance_scale(self):
|
| 555 |
+
return self._guidance_scale
|
| 556 |
+
|
| 557 |
+
@property
|
| 558 |
+
def guidance_rescale(self):
|
| 559 |
+
return self._guidance_rescale
|
| 560 |
+
|
| 561 |
+
@property
|
| 562 |
+
def clip_skip(self):
|
| 563 |
+
return self._clip_skip
|
| 564 |
+
|
| 565 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 566 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 567 |
+
# corresponds to doing no classifier free guidance.
|
| 568 |
+
@property
|
| 569 |
+
def do_classifier_free_guidance(self):
|
| 570 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 571 |
+
|
| 572 |
+
@property
|
| 573 |
+
def cross_attention_kwargs(self):
|
| 574 |
+
return self._cross_attention_kwargs
|
| 575 |
+
|
| 576 |
+
@property
|
| 577 |
+
def num_timesteps(self):
|
| 578 |
+
return self._num_timesteps
|
| 579 |
+
|
| 580 |
+
@property
|
| 581 |
+
def interrupt(self):
|
| 582 |
+
return self._interrupt
|
| 583 |
+
|
| 584 |
+
@torch.no_grad()
|
| 585 |
+
def __call__(
|
| 586 |
+
self,
|
| 587 |
+
prompt: Union[str, List[str]] = None,
|
| 588 |
+
height: Optional[int] = None,
|
| 589 |
+
width: Optional[int] = None,
|
| 590 |
+
num_inference_steps: int = 50,
|
| 591 |
+
timesteps: List[int] = None,
|
| 592 |
+
guidance_scale: float = 7.5,
|
| 593 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 594 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 595 |
+
eta: float = 0.0,
|
| 596 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 597 |
+
latents: Optional[torch.Tensor] = None,
|
| 598 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 599 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 600 |
+
output_type: Optional[str] = "pil",
|
| 601 |
+
return_dict: bool = True,
|
| 602 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 603 |
+
guidance_rescale: float = 0.0,
|
| 604 |
+
clip_skip: Optional[int] = None,
|
| 605 |
+
**kwargs,
|
| 606 |
+
):
|
| 607 |
+
r"""
|
| 608 |
+
The call function to the pipeline for generation.
|
| 609 |
+
|
| 610 |
+
Args:
|
| 611 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 612 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 613 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 614 |
+
The height in pixels of the generated image.
|
| 615 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 616 |
+
The width in pixels of the generated image.
|
| 617 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 618 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 619 |
+
expense of slower inference.
|
| 620 |
+
timesteps (`List[int]`, *optional*):
|
| 621 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 622 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 623 |
+
passed will be used. Must be in descending order.
|
| 624 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 625 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 626 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 627 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 628 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 629 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 630 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 631 |
+
The number of images to generate per prompt.
|
| 632 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 633 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 634 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 635 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 636 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 637 |
+
generation deterministic.
|
| 638 |
+
latents (`torch.Tensor`, *optional*):
|
| 639 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 640 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 641 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 642 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 643 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 644 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 645 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 646 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 647 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 648 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 649 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 650 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 651 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 652 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 653 |
+
plain tuple.
|
| 654 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 655 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 656 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 657 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 658 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 659 |
+
Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
|
| 660 |
+
using zero terminal SNR.
|
| 661 |
+
clip_skip (`int`, *optional*):
|
| 662 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 663 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 664 |
+
|
| 665 |
+
Examples:
|
| 666 |
+
|
| 667 |
+
Returns:
|
| 668 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 669 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 670 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 671 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 672 |
+
"not-safe-for-work" (nsfw) content.
|
| 673 |
+
"""
|
| 674 |
+
|
| 675 |
+
# 0. Default height and width to unet
|
| 676 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 677 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 678 |
+
# to deal with lora scaling and other possible forward hooks
|
| 679 |
+
|
| 680 |
+
# 1. Check inputs. Raise error if not correct
|
| 681 |
+
self.check_inputs(
|
| 682 |
+
prompt,
|
| 683 |
+
height,
|
| 684 |
+
width,
|
| 685 |
+
negative_prompt,
|
| 686 |
+
prompt_embeds,
|
| 687 |
+
negative_prompt_embeds,
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
self._guidance_scale = guidance_scale
|
| 691 |
+
self._guidance_rescale = guidance_rescale
|
| 692 |
+
self._clip_skip = clip_skip
|
| 693 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 694 |
+
self._interrupt = False
|
| 695 |
+
|
| 696 |
+
# 2. Define call parameters
|
| 697 |
+
if prompt is not None and isinstance(prompt, str):
|
| 698 |
+
batch_size = 1
|
| 699 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 700 |
+
batch_size = len(prompt)
|
| 701 |
+
else:
|
| 702 |
+
batch_size = prompt_embeds.shape[0]
|
| 703 |
+
|
| 704 |
+
device = self._execution_device
|
| 705 |
+
|
| 706 |
+
# 3. Encode input prompt
|
| 707 |
+
lora_scale = (
|
| 708 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 712 |
+
prompt,
|
| 713 |
+
device,
|
| 714 |
+
num_images_per_prompt,
|
| 715 |
+
self.do_classifier_free_guidance,
|
| 716 |
+
negative_prompt,
|
| 717 |
+
prompt_embeds=prompt_embeds,
|
| 718 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 719 |
+
lora_scale=lora_scale,
|
| 720 |
+
clip_skip=self.clip_skip,
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 724 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 725 |
+
# to avoid doing two forward passes
|
| 726 |
+
if self.do_classifier_free_guidance:
|
| 727 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 728 |
+
|
| 729 |
+
# 4. Prepare timesteps
|
| 730 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 731 |
+
|
| 732 |
+
# 5. Prepare latent variables
|
| 733 |
+
num_channels_latents = self.unet.config.in_channels
|
| 734 |
+
latents = self.prepare_latents(
|
| 735 |
+
batch_size * num_images_per_prompt,
|
| 736 |
+
num_channels_latents,
|
| 737 |
+
height,
|
| 738 |
+
width,
|
| 739 |
+
prompt_embeds.dtype,
|
| 740 |
+
device,
|
| 741 |
+
generator,
|
| 742 |
+
latents,
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 746 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 747 |
+
|
| 748 |
+
# 6.2 Optionally get Guidance Scale Embedding
|
| 749 |
+
timestep_cond = None
|
| 750 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 751 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 752 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 753 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 754 |
+
).to(device=device, dtype=latents.dtype)
|
| 755 |
+
|
| 756 |
+
# 7. Denoising loop
|
| 757 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 758 |
+
self._num_timesteps = len(timesteps)
|
| 759 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 760 |
+
for i, t in enumerate(timesteps):
|
| 761 |
+
if self.interrupt:
|
| 762 |
+
continue
|
| 763 |
+
|
| 764 |
+
# expand the latents if we are doing classifier free guidance
|
| 765 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 766 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 767 |
+
|
| 768 |
+
# predict the noise residual
|
| 769 |
+
noise_pred = self.unet(
|
| 770 |
+
latent_model_input,
|
| 771 |
+
t,
|
| 772 |
+
encoder_hidden_states=prompt_embeds,
|
| 773 |
+
timestep_cond=timestep_cond,
|
| 774 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 775 |
+
return_dict=False,
|
| 776 |
+
)[0]
|
| 777 |
+
|
| 778 |
+
# perform guidance
|
| 779 |
+
if self.do_classifier_free_guidance:
|
| 780 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 781 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 782 |
+
|
| 783 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 784 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 785 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 786 |
+
|
| 787 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 788 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 789 |
+
|
| 790 |
+
# call the callback, if provided
|
| 791 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 792 |
+
progress_bar.update()
|
| 793 |
+
|
| 794 |
+
if not output_type == "latent":
|
| 795 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 796 |
+
0
|
| 797 |
+
]
|
| 798 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 799 |
+
else:
|
| 800 |
+
image = latents
|
| 801 |
+
has_nsfw_concept = None
|
| 802 |
+
|
| 803 |
+
if has_nsfw_concept is None:
|
| 804 |
+
do_denormalize = [True] * image.shape[0]
|
| 805 |
+
else:
|
| 806 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 807 |
+
|
| 808 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 809 |
+
|
| 810 |
+
# Offload all models
|
| 811 |
+
self.maybe_free_model_hooks()
|
| 812 |
+
|
| 813 |
+
if not return_dict:
|
| 814 |
+
return (image, has_nsfw_concept)
|
| 815 |
+
|
| 816 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/hd_painter.py
ADDED
|
@@ -0,0 +1,1001 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import numbers
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from diffusers.image_processor import PipelineImageInput
|
| 10 |
+
from diffusers.models import AsymmetricAutoencoderKL, ImageProjection
|
| 11 |
+
from diffusers.models.attention_processor import Attention, AttnProcessor
|
| 12 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 13 |
+
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import (
|
| 14 |
+
StableDiffusionInpaintPipeline,
|
| 15 |
+
retrieve_timesteps,
|
| 16 |
+
)
|
| 17 |
+
from diffusers.utils import deprecate
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class RASGAttnProcessor:
|
| 21 |
+
def __init__(self, mask, token_idx, scale_factor):
|
| 22 |
+
self.attention_scores = None # Stores the last output of the similarity matrix here. Each layer will get its own RASGAttnProcessor assigned
|
| 23 |
+
self.mask = mask
|
| 24 |
+
self.token_idx = token_idx
|
| 25 |
+
self.scale_factor = scale_factor
|
| 26 |
+
self.mask_resoltuion = mask.shape[-1] * mask.shape[-2] # 64 x 64 if the image is 512x512
|
| 27 |
+
|
| 28 |
+
def __call__(
|
| 29 |
+
self,
|
| 30 |
+
attn: Attention,
|
| 31 |
+
hidden_states: torch.Tensor,
|
| 32 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 33 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 34 |
+
temb: Optional[torch.Tensor] = None,
|
| 35 |
+
scale: float = 1.0,
|
| 36 |
+
) -> torch.Tensor:
|
| 37 |
+
# Same as the default AttnProcessor up until the part where similarity matrix gets saved
|
| 38 |
+
downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
|
| 39 |
+
residual = hidden_states
|
| 40 |
+
|
| 41 |
+
if attn.spatial_norm is not None:
|
| 42 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 43 |
+
|
| 44 |
+
input_ndim = hidden_states.ndim
|
| 45 |
+
|
| 46 |
+
if input_ndim == 4:
|
| 47 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 48 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 49 |
+
|
| 50 |
+
batch_size, sequence_length, _ = (
|
| 51 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 52 |
+
)
|
| 53 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 54 |
+
|
| 55 |
+
if attn.group_norm is not None:
|
| 56 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 57 |
+
|
| 58 |
+
query = attn.to_q(hidden_states)
|
| 59 |
+
|
| 60 |
+
if encoder_hidden_states is None:
|
| 61 |
+
encoder_hidden_states = hidden_states
|
| 62 |
+
elif attn.norm_cross:
|
| 63 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 64 |
+
|
| 65 |
+
key = attn.to_k(encoder_hidden_states)
|
| 66 |
+
value = attn.to_v(encoder_hidden_states)
|
| 67 |
+
|
| 68 |
+
query = attn.head_to_batch_dim(query)
|
| 69 |
+
key = attn.head_to_batch_dim(key)
|
| 70 |
+
value = attn.head_to_batch_dim(value)
|
| 71 |
+
|
| 72 |
+
# Automatically recognize the resolution and save the attention similarity values
|
| 73 |
+
# We need to use the values before the softmax function, hence the rewritten get_attention_scores function.
|
| 74 |
+
if downscale_factor == self.scale_factor**2:
|
| 75 |
+
self.attention_scores = get_attention_scores(attn, query, key, attention_mask)
|
| 76 |
+
attention_probs = self.attention_scores.softmax(dim=-1)
|
| 77 |
+
attention_probs = attention_probs.to(query.dtype)
|
| 78 |
+
else:
|
| 79 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask) # Original code
|
| 80 |
+
|
| 81 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 82 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 83 |
+
|
| 84 |
+
# linear proj
|
| 85 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 86 |
+
# dropout
|
| 87 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 88 |
+
|
| 89 |
+
if input_ndim == 4:
|
| 90 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 91 |
+
|
| 92 |
+
if attn.residual_connection:
|
| 93 |
+
hidden_states = hidden_states + residual
|
| 94 |
+
|
| 95 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 96 |
+
|
| 97 |
+
return hidden_states
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class PAIntAAttnProcessor:
|
| 101 |
+
def __init__(self, transformer_block, mask, token_idx, do_classifier_free_guidance, scale_factors):
|
| 102 |
+
self.transformer_block = transformer_block # Stores the parent transformer block.
|
| 103 |
+
self.mask = mask
|
| 104 |
+
self.scale_factors = scale_factors
|
| 105 |
+
self.do_classifier_free_guidance = do_classifier_free_guidance
|
| 106 |
+
self.token_idx = token_idx
|
| 107 |
+
self.shape = mask.shape[2:]
|
| 108 |
+
self.mask_resoltuion = mask.shape[-1] * mask.shape[-2] # 64 x 64
|
| 109 |
+
self.default_processor = AttnProcessor()
|
| 110 |
+
|
| 111 |
+
def __call__(
|
| 112 |
+
self,
|
| 113 |
+
attn: Attention,
|
| 114 |
+
hidden_states: torch.Tensor,
|
| 115 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
| 116 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 117 |
+
temb: Optional[torch.Tensor] = None,
|
| 118 |
+
scale: float = 1.0,
|
| 119 |
+
) -> torch.Tensor:
|
| 120 |
+
# Automatically recognize the resolution of the current attention layer and resize the masks accordingly
|
| 121 |
+
downscale_factor = self.mask_resoltuion // hidden_states.shape[1]
|
| 122 |
+
|
| 123 |
+
mask = None
|
| 124 |
+
for factor in self.scale_factors:
|
| 125 |
+
if downscale_factor == factor**2:
|
| 126 |
+
shape = (self.shape[0] // factor, self.shape[1] // factor)
|
| 127 |
+
mask = F.interpolate(self.mask, shape, mode="bicubic") # B, 1, H, W
|
| 128 |
+
break
|
| 129 |
+
if mask is None:
|
| 130 |
+
return self.default_processor(attn, hidden_states, encoder_hidden_states, attention_mask, temb, scale)
|
| 131 |
+
|
| 132 |
+
# STARTS HERE
|
| 133 |
+
residual = hidden_states
|
| 134 |
+
# Save the input hidden_states for later use
|
| 135 |
+
input_hidden_states = hidden_states
|
| 136 |
+
|
| 137 |
+
# ================================================== #
|
| 138 |
+
# =============== SELF ATTENTION 1 ================= #
|
| 139 |
+
# ================================================== #
|
| 140 |
+
|
| 141 |
+
if attn.spatial_norm is not None:
|
| 142 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 143 |
+
|
| 144 |
+
input_ndim = hidden_states.ndim
|
| 145 |
+
|
| 146 |
+
if input_ndim == 4:
|
| 147 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 148 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 149 |
+
|
| 150 |
+
batch_size, sequence_length, _ = (
|
| 151 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 152 |
+
)
|
| 153 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 154 |
+
|
| 155 |
+
if attn.group_norm is not None:
|
| 156 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 157 |
+
|
| 158 |
+
query = attn.to_q(hidden_states)
|
| 159 |
+
|
| 160 |
+
if encoder_hidden_states is None:
|
| 161 |
+
encoder_hidden_states = hidden_states
|
| 162 |
+
elif attn.norm_cross:
|
| 163 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 164 |
+
|
| 165 |
+
key = attn.to_k(encoder_hidden_states)
|
| 166 |
+
value = attn.to_v(encoder_hidden_states)
|
| 167 |
+
|
| 168 |
+
query = attn.head_to_batch_dim(query)
|
| 169 |
+
key = attn.head_to_batch_dim(key)
|
| 170 |
+
value = attn.head_to_batch_dim(value)
|
| 171 |
+
|
| 172 |
+
# self_attention_probs = attn.get_attention_scores(query, key, attention_mask) # We can't use post-softmax attention scores in this case
|
| 173 |
+
self_attention_scores = get_attention_scores(
|
| 174 |
+
attn, query, key, attention_mask
|
| 175 |
+
) # The custom function returns pre-softmax probabilities
|
| 176 |
+
self_attention_probs = self_attention_scores.softmax(
|
| 177 |
+
dim=-1
|
| 178 |
+
) # Manually compute the probabilities here, the scores will be reused in the second part of PAIntA
|
| 179 |
+
self_attention_probs = self_attention_probs.to(query.dtype)
|
| 180 |
+
|
| 181 |
+
hidden_states = torch.bmm(self_attention_probs, value)
|
| 182 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 183 |
+
|
| 184 |
+
# linear proj
|
| 185 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 186 |
+
# dropout
|
| 187 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 188 |
+
|
| 189 |
+
# x = x + self.attn1(self.norm1(x))
|
| 190 |
+
|
| 191 |
+
if input_ndim == 4:
|
| 192 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 193 |
+
|
| 194 |
+
if attn.residual_connection: # So many residuals everywhere
|
| 195 |
+
hidden_states = hidden_states + residual
|
| 196 |
+
|
| 197 |
+
self_attention_output_hidden_states = hidden_states / attn.rescale_output_factor
|
| 198 |
+
|
| 199 |
+
# ================================================== #
|
| 200 |
+
# ============ BasicTransformerBlock =============== #
|
| 201 |
+
# ================================================== #
|
| 202 |
+
# We use a hack by running the code from the BasicTransformerBlock that is between Self and Cross attentions here
|
| 203 |
+
# The other option would've been modifying the BasicTransformerBlock and adding this functionality here.
|
| 204 |
+
# I assumed that changing the BasicTransformerBlock would have been a bigger deal and decided to use this hack instead.
|
| 205 |
+
|
| 206 |
+
# The SelfAttention block receives the normalized latents from the BasicTransformerBlock,
|
| 207 |
+
# But the residual of the output is the non-normalized version.
|
| 208 |
+
# Therefore we unnormalize the input hidden state here
|
| 209 |
+
unnormalized_input_hidden_states = (
|
| 210 |
+
input_hidden_states + self.transformer_block.norm1.bias
|
| 211 |
+
) * self.transformer_block.norm1.weight
|
| 212 |
+
|
| 213 |
+
# TODO: return if necessary
|
| 214 |
+
# if self.use_ada_layer_norm_zero:
|
| 215 |
+
# attn_output = gate_msa.unsqueeze(1) * attn_output
|
| 216 |
+
# elif self.use_ada_layer_norm_single:
|
| 217 |
+
# attn_output = gate_msa * attn_output
|
| 218 |
+
|
| 219 |
+
transformer_hidden_states = self_attention_output_hidden_states + unnormalized_input_hidden_states
|
| 220 |
+
if transformer_hidden_states.ndim == 4:
|
| 221 |
+
transformer_hidden_states = transformer_hidden_states.squeeze(1)
|
| 222 |
+
|
| 223 |
+
# TODO: return if necessary
|
| 224 |
+
# 2.5 GLIGEN Control
|
| 225 |
+
# if gligen_kwargs is not None:
|
| 226 |
+
# transformer_hidden_states = self.fuser(transformer_hidden_states, gligen_kwargs["objs"])
|
| 227 |
+
# NOTE: we experimented with using GLIGEN and HDPainter together, the results were not that great
|
| 228 |
+
|
| 229 |
+
# 3. Cross-Attention
|
| 230 |
+
if self.transformer_block.use_ada_layer_norm:
|
| 231 |
+
# transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states, timestep)
|
| 232 |
+
raise NotImplementedError()
|
| 233 |
+
elif self.transformer_block.use_ada_layer_norm_zero or self.transformer_block.use_layer_norm:
|
| 234 |
+
transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states)
|
| 235 |
+
elif self.transformer_block.use_ada_layer_norm_single:
|
| 236 |
+
# For PixArt norm2 isn't applied here:
|
| 237 |
+
# https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
|
| 238 |
+
transformer_norm_hidden_states = transformer_hidden_states
|
| 239 |
+
elif self.transformer_block.use_ada_layer_norm_continuous:
|
| 240 |
+
# transformer_norm_hidden_states = self.transformer_block.norm2(transformer_hidden_states, added_cond_kwargs["pooled_text_emb"])
|
| 241 |
+
raise NotImplementedError()
|
| 242 |
+
else:
|
| 243 |
+
raise ValueError("Incorrect norm")
|
| 244 |
+
|
| 245 |
+
if self.transformer_block.pos_embed is not None and self.transformer_block.use_ada_layer_norm_single is False:
|
| 246 |
+
transformer_norm_hidden_states = self.transformer_block.pos_embed(transformer_norm_hidden_states)
|
| 247 |
+
|
| 248 |
+
# ================================================== #
|
| 249 |
+
# ================= CROSS ATTENTION ================ #
|
| 250 |
+
# ================================================== #
|
| 251 |
+
|
| 252 |
+
# We do an initial pass of the CrossAttention up to obtaining the similarity matrix here.
|
| 253 |
+
# The similarity matrix is used to obtain scaling coefficients for the attention matrix of the self attention
|
| 254 |
+
# We reuse the previously computed self-attention matrix, and only repeat the steps after the softmax
|
| 255 |
+
|
| 256 |
+
cross_attention_input_hidden_states = (
|
| 257 |
+
transformer_norm_hidden_states # Renaming the variable for the sake of readability
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# TODO: check if classifier_free_guidance is being used before splitting here
|
| 261 |
+
if self.do_classifier_free_guidance:
|
| 262 |
+
# Our scaling coefficients depend only on the conditional part, so we split the inputs
|
| 263 |
+
(
|
| 264 |
+
_cross_attention_input_hidden_states_unconditional,
|
| 265 |
+
cross_attention_input_hidden_states_conditional,
|
| 266 |
+
) = cross_attention_input_hidden_states.chunk(2)
|
| 267 |
+
|
| 268 |
+
# Same split for the encoder_hidden_states i.e. the tokens
|
| 269 |
+
# Since the SelfAttention processors don't get the encoder states as input, we inject them into the processor in the beginning.
|
| 270 |
+
_encoder_hidden_states_unconditional, encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(
|
| 271 |
+
2
|
| 272 |
+
)
|
| 273 |
+
else:
|
| 274 |
+
cross_attention_input_hidden_states_conditional = cross_attention_input_hidden_states
|
| 275 |
+
encoder_hidden_states_conditional = self.encoder_hidden_states.chunk(2)
|
| 276 |
+
|
| 277 |
+
# Rename the variables for the sake of readability
|
| 278 |
+
# The part below is the beginning of the __call__ function of the following CrossAttention layer
|
| 279 |
+
cross_attention_hidden_states = cross_attention_input_hidden_states_conditional
|
| 280 |
+
cross_attention_encoder_hidden_states = encoder_hidden_states_conditional
|
| 281 |
+
|
| 282 |
+
attn2 = self.transformer_block.attn2
|
| 283 |
+
|
| 284 |
+
if attn2.spatial_norm is not None:
|
| 285 |
+
cross_attention_hidden_states = attn2.spatial_norm(cross_attention_hidden_states, temb)
|
| 286 |
+
|
| 287 |
+
input_ndim = cross_attention_hidden_states.ndim
|
| 288 |
+
|
| 289 |
+
if input_ndim == 4:
|
| 290 |
+
batch_size, channel, height, width = cross_attention_hidden_states.shape
|
| 291 |
+
cross_attention_hidden_states = cross_attention_hidden_states.view(
|
| 292 |
+
batch_size, channel, height * width
|
| 293 |
+
).transpose(1, 2)
|
| 294 |
+
|
| 295 |
+
(
|
| 296 |
+
batch_size,
|
| 297 |
+
sequence_length,
|
| 298 |
+
_,
|
| 299 |
+
) = cross_attention_hidden_states.shape # It is definitely a cross attention, so no need for an if block
|
| 300 |
+
# TODO: change the attention_mask here
|
| 301 |
+
attention_mask = attn2.prepare_attention_mask(
|
| 302 |
+
None, sequence_length, batch_size
|
| 303 |
+
) # I assume the attention mask is the same...
|
| 304 |
+
|
| 305 |
+
if attn2.group_norm is not None:
|
| 306 |
+
cross_attention_hidden_states = attn2.group_norm(cross_attention_hidden_states.transpose(1, 2)).transpose(
|
| 307 |
+
1, 2
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
query2 = attn2.to_q(cross_attention_hidden_states)
|
| 311 |
+
|
| 312 |
+
if attn2.norm_cross:
|
| 313 |
+
cross_attention_encoder_hidden_states = attn2.norm_encoder_hidden_states(
|
| 314 |
+
cross_attention_encoder_hidden_states
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
key2 = attn2.to_k(cross_attention_encoder_hidden_states)
|
| 318 |
+
query2 = attn2.head_to_batch_dim(query2)
|
| 319 |
+
key2 = attn2.head_to_batch_dim(key2)
|
| 320 |
+
|
| 321 |
+
cross_attention_probs = attn2.get_attention_scores(query2, key2, attention_mask)
|
| 322 |
+
|
| 323 |
+
# CrossAttention ends here, the remaining part is not used
|
| 324 |
+
|
| 325 |
+
# ================================================== #
|
| 326 |
+
# ================ SELF ATTENTION 2 ================ #
|
| 327 |
+
# ================================================== #
|
| 328 |
+
# DEJA VU!
|
| 329 |
+
|
| 330 |
+
mask = (mask > 0.5).to(self_attention_output_hidden_states.dtype)
|
| 331 |
+
m = mask.to(self_attention_output_hidden_states.device)
|
| 332 |
+
# m = rearrange(m, 'b c h w -> b (h w) c').contiguous()
|
| 333 |
+
m = m.permute(0, 2, 3, 1).reshape((m.shape[0], -1, m.shape[1])).contiguous() # B HW 1
|
| 334 |
+
m = torch.matmul(m, m.permute(0, 2, 1)) + (1 - m)
|
| 335 |
+
|
| 336 |
+
# # Compute scaling coefficients for the similarity matrix
|
| 337 |
+
# # Select the cross attention values for the correct tokens only!
|
| 338 |
+
# cross_attention_probs = cross_attention_probs.mean(dim = 0)
|
| 339 |
+
# cross_attention_probs = cross_attention_probs[:, self.token_idx].sum(dim=1)
|
| 340 |
+
|
| 341 |
+
# cross_attention_probs = cross_attention_probs.reshape(shape)
|
| 342 |
+
# gaussian_smoothing = GaussianSmoothing(channels=1, kernel_size=3, sigma=0.5, dim=2).to(self_attention_output_hidden_states.device)
|
| 343 |
+
# cross_attention_probs = gaussian_smoothing(cross_attention_probs.unsqueeze(0))[0] # optional smoothing
|
| 344 |
+
# cross_attention_probs = cross_attention_probs.reshape(-1)
|
| 345 |
+
# cross_attention_probs = ((cross_attention_probs - torch.median(cross_attention_probs.ravel())) / torch.max(cross_attention_probs.ravel())).clip(0, 1)
|
| 346 |
+
|
| 347 |
+
# c = (1 - m) * cross_attention_probs.reshape(1, 1, -1) + m # PAIntA scaling coefficients
|
| 348 |
+
|
| 349 |
+
# Compute scaling coefficients for the similarity matrix
|
| 350 |
+
# Select the cross attention values for the correct tokens only!
|
| 351 |
+
|
| 352 |
+
batch_size, dims, channels = cross_attention_probs.shape
|
| 353 |
+
batch_size = batch_size // attn.heads
|
| 354 |
+
cross_attention_probs = cross_attention_probs.reshape((batch_size, attn.heads, dims, channels)) # B, D, HW, T
|
| 355 |
+
|
| 356 |
+
cross_attention_probs = cross_attention_probs.mean(dim=1) # B, HW, T
|
| 357 |
+
cross_attention_probs = cross_attention_probs[..., self.token_idx].sum(dim=-1) # B, HW
|
| 358 |
+
cross_attention_probs = cross_attention_probs.reshape((batch_size,) + shape) # , B, H, W
|
| 359 |
+
|
| 360 |
+
gaussian_smoothing = GaussianSmoothing(channels=1, kernel_size=3, sigma=0.5, dim=2).to(
|
| 361 |
+
self_attention_output_hidden_states.device
|
| 362 |
+
)
|
| 363 |
+
cross_attention_probs = gaussian_smoothing(cross_attention_probs[:, None])[:, 0] # optional smoothing B, H, W
|
| 364 |
+
|
| 365 |
+
# Median normalization
|
| 366 |
+
cross_attention_probs = cross_attention_probs.reshape(batch_size, -1) # B, HW
|
| 367 |
+
cross_attention_probs = (
|
| 368 |
+
cross_attention_probs - cross_attention_probs.median(dim=-1, keepdim=True).values
|
| 369 |
+
) / cross_attention_probs.max(dim=-1, keepdim=True).values
|
| 370 |
+
cross_attention_probs = cross_attention_probs.clip(0, 1)
|
| 371 |
+
|
| 372 |
+
c = (1 - m) * cross_attention_probs.reshape(batch_size, 1, -1) + m
|
| 373 |
+
c = c.repeat_interleave(attn.heads, 0) # BD, HW
|
| 374 |
+
if self.do_classifier_free_guidance:
|
| 375 |
+
c = torch.cat([c, c]) # 2BD, HW
|
| 376 |
+
|
| 377 |
+
# Rescaling the original self-attention matrix
|
| 378 |
+
self_attention_scores_rescaled = self_attention_scores * c
|
| 379 |
+
self_attention_probs_rescaled = self_attention_scores_rescaled.softmax(dim=-1)
|
| 380 |
+
|
| 381 |
+
# Continuing the self attention normally using the new matrix
|
| 382 |
+
hidden_states = torch.bmm(self_attention_probs_rescaled, value)
|
| 383 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 384 |
+
|
| 385 |
+
# linear proj
|
| 386 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 387 |
+
# dropout
|
| 388 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 389 |
+
|
| 390 |
+
if input_ndim == 4:
|
| 391 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 392 |
+
|
| 393 |
+
if attn.residual_connection:
|
| 394 |
+
hidden_states = hidden_states + input_hidden_states
|
| 395 |
+
|
| 396 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 397 |
+
|
| 398 |
+
return hidden_states
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
|
| 402 |
+
def get_tokenized_prompt(self, prompt):
|
| 403 |
+
out = self.tokenizer(prompt)
|
| 404 |
+
return [self.tokenizer.decode(x) for x in out["input_ids"]]
|
| 405 |
+
|
| 406 |
+
def init_attn_processors(
|
| 407 |
+
self,
|
| 408 |
+
mask,
|
| 409 |
+
token_idx,
|
| 410 |
+
use_painta=True,
|
| 411 |
+
use_rasg=True,
|
| 412 |
+
painta_scale_factors=[2, 4], # 64x64 -> [16x16, 32x32]
|
| 413 |
+
rasg_scale_factor=4, # 64x64 -> 16x16
|
| 414 |
+
self_attention_layer_name="attn1",
|
| 415 |
+
cross_attention_layer_name="attn2",
|
| 416 |
+
list_of_painta_layer_names=None,
|
| 417 |
+
list_of_rasg_layer_names=None,
|
| 418 |
+
):
|
| 419 |
+
default_processor = AttnProcessor()
|
| 420 |
+
width, height = mask.shape[-2:]
|
| 421 |
+
width, height = width // self.vae_scale_factor, height // self.vae_scale_factor
|
| 422 |
+
|
| 423 |
+
painta_scale_factors = [x * self.vae_scale_factor for x in painta_scale_factors]
|
| 424 |
+
rasg_scale_factor = self.vae_scale_factor * rasg_scale_factor
|
| 425 |
+
|
| 426 |
+
attn_processors = {}
|
| 427 |
+
for x in self.unet.attn_processors:
|
| 428 |
+
if (list_of_painta_layer_names is None and self_attention_layer_name in x) or (
|
| 429 |
+
list_of_painta_layer_names is not None and x in list_of_painta_layer_names
|
| 430 |
+
):
|
| 431 |
+
if use_painta:
|
| 432 |
+
transformer_block = self.unet.get_submodule(x.replace(".attn1.processor", ""))
|
| 433 |
+
attn_processors[x] = PAIntAAttnProcessor(
|
| 434 |
+
transformer_block, mask, token_idx, self.do_classifier_free_guidance, painta_scale_factors
|
| 435 |
+
)
|
| 436 |
+
else:
|
| 437 |
+
attn_processors[x] = default_processor
|
| 438 |
+
elif (list_of_rasg_layer_names is None and cross_attention_layer_name in x) or (
|
| 439 |
+
list_of_rasg_layer_names is not None and x in list_of_rasg_layer_names
|
| 440 |
+
):
|
| 441 |
+
if use_rasg:
|
| 442 |
+
attn_processors[x] = RASGAttnProcessor(mask, token_idx, rasg_scale_factor)
|
| 443 |
+
else:
|
| 444 |
+
attn_processors[x] = default_processor
|
| 445 |
+
|
| 446 |
+
self.unet.set_attn_processor(attn_processors)
|
| 447 |
+
# import json
|
| 448 |
+
# with open('/home/hayk.manukyan/repos/diffusers/debug.txt', 'a') as f:
|
| 449 |
+
# json.dump({x:str(y) for x,y in self.unet.attn_processors.items()}, f, indent=4)
|
| 450 |
+
|
| 451 |
+
@torch.no_grad()
|
| 452 |
+
def __call__(
|
| 453 |
+
self,
|
| 454 |
+
prompt: Union[str, List[str]] = None,
|
| 455 |
+
image: PipelineImageInput = None,
|
| 456 |
+
mask_image: PipelineImageInput = None,
|
| 457 |
+
masked_image_latents: torch.Tensor = None,
|
| 458 |
+
height: Optional[int] = None,
|
| 459 |
+
width: Optional[int] = None,
|
| 460 |
+
padding_mask_crop: Optional[int] = None,
|
| 461 |
+
strength: float = 1.0,
|
| 462 |
+
num_inference_steps: int = 50,
|
| 463 |
+
timesteps: List[int] = None,
|
| 464 |
+
guidance_scale: float = 7.5,
|
| 465 |
+
positive_prompt: Optional[str] = "",
|
| 466 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 467 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 468 |
+
eta: float = 0.01,
|
| 469 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 470 |
+
latents: Optional[torch.Tensor] = None,
|
| 471 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 472 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 473 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 474 |
+
output_type: Optional[str] = "pil",
|
| 475 |
+
return_dict: bool = True,
|
| 476 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 477 |
+
clip_skip: int = None,
|
| 478 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 479 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 480 |
+
use_painta=True,
|
| 481 |
+
use_rasg=True,
|
| 482 |
+
self_attention_layer_name=".attn1",
|
| 483 |
+
cross_attention_layer_name=".attn2",
|
| 484 |
+
painta_scale_factors=[2, 4], # 16 x 16 and 32 x 32
|
| 485 |
+
rasg_scale_factor=4, # 16x16 by default
|
| 486 |
+
list_of_painta_layer_names=None,
|
| 487 |
+
list_of_rasg_layer_names=None,
|
| 488 |
+
**kwargs,
|
| 489 |
+
):
|
| 490 |
+
callback = kwargs.pop("callback", None)
|
| 491 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 492 |
+
|
| 493 |
+
if callback is not None:
|
| 494 |
+
deprecate(
|
| 495 |
+
"callback",
|
| 496 |
+
"1.0.0",
|
| 497 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 498 |
+
)
|
| 499 |
+
if callback_steps is not None:
|
| 500 |
+
deprecate(
|
| 501 |
+
"callback_steps",
|
| 502 |
+
"1.0.0",
|
| 503 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
# 0. Default height and width to unet
|
| 507 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 508 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 509 |
+
|
| 510 |
+
#
|
| 511 |
+
prompt_no_positives = prompt
|
| 512 |
+
if isinstance(prompt, list):
|
| 513 |
+
prompt = [x + positive_prompt for x in prompt]
|
| 514 |
+
else:
|
| 515 |
+
prompt = prompt + positive_prompt
|
| 516 |
+
|
| 517 |
+
# 1. Check inputs
|
| 518 |
+
self.check_inputs(
|
| 519 |
+
prompt,
|
| 520 |
+
image,
|
| 521 |
+
mask_image,
|
| 522 |
+
height,
|
| 523 |
+
width,
|
| 524 |
+
strength,
|
| 525 |
+
callback_steps,
|
| 526 |
+
negative_prompt,
|
| 527 |
+
prompt_embeds,
|
| 528 |
+
negative_prompt_embeds,
|
| 529 |
+
callback_on_step_end_tensor_inputs,
|
| 530 |
+
padding_mask_crop,
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
self._guidance_scale = guidance_scale
|
| 534 |
+
self._clip_skip = clip_skip
|
| 535 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 536 |
+
self._interrupt = False
|
| 537 |
+
|
| 538 |
+
# 2. Define call parameters
|
| 539 |
+
if prompt is not None and isinstance(prompt, str):
|
| 540 |
+
batch_size = 1
|
| 541 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 542 |
+
batch_size = len(prompt)
|
| 543 |
+
else:
|
| 544 |
+
batch_size = prompt_embeds.shape[0]
|
| 545 |
+
|
| 546 |
+
# assert batch_size == 1, "Does not work with batch size > 1 currently"
|
| 547 |
+
|
| 548 |
+
device = self._execution_device
|
| 549 |
+
|
| 550 |
+
# 3. Encode input prompt
|
| 551 |
+
text_encoder_lora_scale = (
|
| 552 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 553 |
+
)
|
| 554 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 555 |
+
prompt,
|
| 556 |
+
device,
|
| 557 |
+
num_images_per_prompt,
|
| 558 |
+
self.do_classifier_free_guidance,
|
| 559 |
+
negative_prompt,
|
| 560 |
+
prompt_embeds=prompt_embeds,
|
| 561 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 562 |
+
lora_scale=text_encoder_lora_scale,
|
| 563 |
+
clip_skip=self.clip_skip,
|
| 564 |
+
)
|
| 565 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 566 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 567 |
+
# to avoid doing two forward passes
|
| 568 |
+
if self.do_classifier_free_guidance:
|
| 569 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 570 |
+
|
| 571 |
+
if ip_adapter_image is not None:
|
| 572 |
+
output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
|
| 573 |
+
image_embeds, negative_image_embeds = self.encode_image(
|
| 574 |
+
ip_adapter_image, device, num_images_per_prompt, output_hidden_state
|
| 575 |
+
)
|
| 576 |
+
if self.do_classifier_free_guidance:
|
| 577 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 578 |
+
|
| 579 |
+
# 4. set timesteps
|
| 580 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 581 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 582 |
+
num_inference_steps=num_inference_steps, strength=strength, device=device
|
| 583 |
+
)
|
| 584 |
+
# check that number of inference steps is not < 1 - as this doesn't make sense
|
| 585 |
+
if num_inference_steps < 1:
|
| 586 |
+
raise ValueError(
|
| 587 |
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
| 588 |
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
| 589 |
+
)
|
| 590 |
+
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
|
| 591 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 592 |
+
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
|
| 593 |
+
is_strength_max = strength == 1.0
|
| 594 |
+
|
| 595 |
+
# 5. Preprocess mask and image
|
| 596 |
+
|
| 597 |
+
if padding_mask_crop is not None:
|
| 598 |
+
crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
|
| 599 |
+
resize_mode = "fill"
|
| 600 |
+
else:
|
| 601 |
+
crops_coords = None
|
| 602 |
+
resize_mode = "default"
|
| 603 |
+
|
| 604 |
+
original_image = image
|
| 605 |
+
init_image = self.image_processor.preprocess(
|
| 606 |
+
image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
|
| 607 |
+
)
|
| 608 |
+
init_image = init_image.to(dtype=torch.float32)
|
| 609 |
+
|
| 610 |
+
# 6. Prepare latent variables
|
| 611 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 612 |
+
num_channels_unet = self.unet.config.in_channels
|
| 613 |
+
return_image_latents = num_channels_unet == 4
|
| 614 |
+
|
| 615 |
+
latents_outputs = self.prepare_latents(
|
| 616 |
+
batch_size * num_images_per_prompt,
|
| 617 |
+
num_channels_latents,
|
| 618 |
+
height,
|
| 619 |
+
width,
|
| 620 |
+
prompt_embeds.dtype,
|
| 621 |
+
device,
|
| 622 |
+
generator,
|
| 623 |
+
latents,
|
| 624 |
+
image=init_image,
|
| 625 |
+
timestep=latent_timestep,
|
| 626 |
+
is_strength_max=is_strength_max,
|
| 627 |
+
return_noise=True,
|
| 628 |
+
return_image_latents=return_image_latents,
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
if return_image_latents:
|
| 632 |
+
latents, noise, image_latents = latents_outputs
|
| 633 |
+
else:
|
| 634 |
+
latents, noise = latents_outputs
|
| 635 |
+
|
| 636 |
+
# 7. Prepare mask latent variables
|
| 637 |
+
mask_condition = self.mask_processor.preprocess(
|
| 638 |
+
mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
if masked_image_latents is None:
|
| 642 |
+
masked_image = init_image * (mask_condition < 0.5)
|
| 643 |
+
else:
|
| 644 |
+
masked_image = masked_image_latents
|
| 645 |
+
|
| 646 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 647 |
+
mask_condition,
|
| 648 |
+
masked_image,
|
| 649 |
+
batch_size * num_images_per_prompt,
|
| 650 |
+
height,
|
| 651 |
+
width,
|
| 652 |
+
prompt_embeds.dtype,
|
| 653 |
+
device,
|
| 654 |
+
generator,
|
| 655 |
+
self.do_classifier_free_guidance,
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# 7.5 Setting up HD-Painter
|
| 659 |
+
|
| 660 |
+
# Get the indices of the tokens to be modified by both RASG and PAIntA
|
| 661 |
+
token_idx = list(range(1, self.get_tokenized_prompt(prompt_no_positives).index("<|endoftext|>"))) + [
|
| 662 |
+
self.get_tokenized_prompt(prompt).index("<|endoftext|>")
|
| 663 |
+
]
|
| 664 |
+
|
| 665 |
+
# Setting up the attention processors
|
| 666 |
+
self.init_attn_processors(
|
| 667 |
+
mask_condition,
|
| 668 |
+
token_idx,
|
| 669 |
+
use_painta,
|
| 670 |
+
use_rasg,
|
| 671 |
+
painta_scale_factors=painta_scale_factors,
|
| 672 |
+
rasg_scale_factor=rasg_scale_factor,
|
| 673 |
+
self_attention_layer_name=self_attention_layer_name,
|
| 674 |
+
cross_attention_layer_name=cross_attention_layer_name,
|
| 675 |
+
list_of_painta_layer_names=list_of_painta_layer_names,
|
| 676 |
+
list_of_rasg_layer_names=list_of_rasg_layer_names,
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
# 8. Check that sizes of mask, masked image and latents match
|
| 680 |
+
if num_channels_unet == 9:
|
| 681 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 682 |
+
num_channels_mask = mask.shape[1]
|
| 683 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 684 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 685 |
+
raise ValueError(
|
| 686 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 687 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 688 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 689 |
+
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
|
| 690 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 691 |
+
)
|
| 692 |
+
elif num_channels_unet != 4:
|
| 693 |
+
raise ValueError(
|
| 694 |
+
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 698 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 699 |
+
|
| 700 |
+
if use_rasg:
|
| 701 |
+
extra_step_kwargs["generator"] = None
|
| 702 |
+
|
| 703 |
+
# 9.1 Add image embeds for IP-Adapter
|
| 704 |
+
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
| 705 |
+
|
| 706 |
+
# 9.2 Optionally get Guidance Scale Embedding
|
| 707 |
+
timestep_cond = None
|
| 708 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 709 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 710 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 711 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 712 |
+
).to(device=device, dtype=latents.dtype)
|
| 713 |
+
|
| 714 |
+
# 10. Denoising loop
|
| 715 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 716 |
+
self._num_timesteps = len(timesteps)
|
| 717 |
+
painta_active = True
|
| 718 |
+
|
| 719 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 720 |
+
for i, t in enumerate(timesteps):
|
| 721 |
+
if self.interrupt:
|
| 722 |
+
continue
|
| 723 |
+
|
| 724 |
+
if t < 500 and painta_active:
|
| 725 |
+
self.init_attn_processors(
|
| 726 |
+
mask_condition,
|
| 727 |
+
token_idx,
|
| 728 |
+
False,
|
| 729 |
+
use_rasg,
|
| 730 |
+
painta_scale_factors=painta_scale_factors,
|
| 731 |
+
rasg_scale_factor=rasg_scale_factor,
|
| 732 |
+
self_attention_layer_name=self_attention_layer_name,
|
| 733 |
+
cross_attention_layer_name=cross_attention_layer_name,
|
| 734 |
+
list_of_painta_layer_names=list_of_painta_layer_names,
|
| 735 |
+
list_of_rasg_layer_names=list_of_rasg_layer_names,
|
| 736 |
+
)
|
| 737 |
+
painta_active = False
|
| 738 |
+
|
| 739 |
+
with torch.enable_grad():
|
| 740 |
+
self.unet.zero_grad()
|
| 741 |
+
latents = latents.detach()
|
| 742 |
+
latents.requires_grad = True
|
| 743 |
+
|
| 744 |
+
# expand the latents if we are doing classifier free guidance
|
| 745 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 746 |
+
|
| 747 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 748 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 749 |
+
|
| 750 |
+
if num_channels_unet == 9:
|
| 751 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 752 |
+
|
| 753 |
+
self.scheduler.latents = latents
|
| 754 |
+
self.encoder_hidden_states = prompt_embeds
|
| 755 |
+
for attn_processor in self.unet.attn_processors.values():
|
| 756 |
+
attn_processor.encoder_hidden_states = prompt_embeds
|
| 757 |
+
|
| 758 |
+
# predict the noise residual
|
| 759 |
+
noise_pred = self.unet(
|
| 760 |
+
latent_model_input,
|
| 761 |
+
t,
|
| 762 |
+
encoder_hidden_states=prompt_embeds,
|
| 763 |
+
timestep_cond=timestep_cond,
|
| 764 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 765 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 766 |
+
return_dict=False,
|
| 767 |
+
)[0]
|
| 768 |
+
|
| 769 |
+
# perform guidance
|
| 770 |
+
if self.do_classifier_free_guidance:
|
| 771 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 772 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 773 |
+
|
| 774 |
+
if use_rasg:
|
| 775 |
+
# Perform RASG
|
| 776 |
+
_, _, height, width = mask_condition.shape # 512 x 512
|
| 777 |
+
scale_factor = self.vae_scale_factor * rasg_scale_factor # 8 * 4 = 32
|
| 778 |
+
|
| 779 |
+
# TODO: Fix for > 1 batch_size
|
| 780 |
+
rasg_mask = F.interpolate(
|
| 781 |
+
mask_condition, (height // scale_factor, width // scale_factor), mode="bicubic"
|
| 782 |
+
)[0, 0] # mode is nearest by default, B, H, W
|
| 783 |
+
|
| 784 |
+
# Aggregate the saved attention maps
|
| 785 |
+
attn_map = []
|
| 786 |
+
for processor in self.unet.attn_processors.values():
|
| 787 |
+
if hasattr(processor, "attention_scores") and processor.attention_scores is not None:
|
| 788 |
+
if self.do_classifier_free_guidance:
|
| 789 |
+
attn_map.append(processor.attention_scores.chunk(2)[1]) # (B/2) x H, 256, 77
|
| 790 |
+
else:
|
| 791 |
+
attn_map.append(processor.attention_scores) # B x H, 256, 77 ?
|
| 792 |
+
|
| 793 |
+
attn_map = (
|
| 794 |
+
torch.cat(attn_map)
|
| 795 |
+
.mean(0)
|
| 796 |
+
.permute(1, 0)
|
| 797 |
+
.reshape((-1, height // scale_factor, width // scale_factor))
|
| 798 |
+
) # 77, 16, 16
|
| 799 |
+
|
| 800 |
+
# Compute the attention score
|
| 801 |
+
attn_score = -sum(
|
| 802 |
+
[
|
| 803 |
+
F.binary_cross_entropy_with_logits(x - 1.0, rasg_mask.to(device))
|
| 804 |
+
for x in attn_map[token_idx]
|
| 805 |
+
]
|
| 806 |
+
)
|
| 807 |
+
|
| 808 |
+
# Backward the score and compute the gradients
|
| 809 |
+
attn_score.backward()
|
| 810 |
+
|
| 811 |
+
# Normalzie the gradients and compute the noise component
|
| 812 |
+
variance_noise = latents.grad.detach()
|
| 813 |
+
# print("VARIANCE SHAPE", variance_noise.shape)
|
| 814 |
+
variance_noise -= torch.mean(variance_noise, [1, 2, 3], keepdim=True)
|
| 815 |
+
variance_noise /= torch.std(variance_noise, [1, 2, 3], keepdim=True)
|
| 816 |
+
else:
|
| 817 |
+
variance_noise = None
|
| 818 |
+
|
| 819 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 820 |
+
latents = self.scheduler.step(
|
| 821 |
+
noise_pred, t, latents, **extra_step_kwargs, return_dict=False, variance_noise=variance_noise
|
| 822 |
+
)[0]
|
| 823 |
+
|
| 824 |
+
if num_channels_unet == 4:
|
| 825 |
+
init_latents_proper = image_latents
|
| 826 |
+
if self.do_classifier_free_guidance:
|
| 827 |
+
init_mask, _ = mask.chunk(2)
|
| 828 |
+
else:
|
| 829 |
+
init_mask = mask
|
| 830 |
+
|
| 831 |
+
if i < len(timesteps) - 1:
|
| 832 |
+
noise_timestep = timesteps[i + 1]
|
| 833 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 834 |
+
init_latents_proper, noise, torch.tensor([noise_timestep])
|
| 835 |
+
)
|
| 836 |
+
|
| 837 |
+
latents = (1 - init_mask) * init_latents_proper + init_mask * latents
|
| 838 |
+
|
| 839 |
+
if callback_on_step_end is not None:
|
| 840 |
+
callback_kwargs = {}
|
| 841 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 842 |
+
callback_kwargs[k] = locals()[k]
|
| 843 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 844 |
+
|
| 845 |
+
latents = callback_outputs.pop("latents", latents)
|
| 846 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 847 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 848 |
+
mask = callback_outputs.pop("mask", mask)
|
| 849 |
+
masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
|
| 850 |
+
|
| 851 |
+
# call the callback, if provided
|
| 852 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 853 |
+
progress_bar.update()
|
| 854 |
+
if callback is not None and i % callback_steps == 0:
|
| 855 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 856 |
+
callback(step_idx, t, latents)
|
| 857 |
+
|
| 858 |
+
if not output_type == "latent":
|
| 859 |
+
condition_kwargs = {}
|
| 860 |
+
if isinstance(self.vae, AsymmetricAutoencoderKL):
|
| 861 |
+
init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
|
| 862 |
+
init_image_condition = init_image.clone()
|
| 863 |
+
init_image = self._encode_vae_image(init_image, generator=generator)
|
| 864 |
+
mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
|
| 865 |
+
condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
|
| 866 |
+
image = self.vae.decode(
|
| 867 |
+
latents / self.vae.config.scaling_factor, return_dict=False, generator=generator, **condition_kwargs
|
| 868 |
+
)[0]
|
| 869 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 870 |
+
else:
|
| 871 |
+
image = latents
|
| 872 |
+
has_nsfw_concept = None
|
| 873 |
+
|
| 874 |
+
if has_nsfw_concept is None:
|
| 875 |
+
do_denormalize = [True] * image.shape[0]
|
| 876 |
+
else:
|
| 877 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 878 |
+
|
| 879 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 880 |
+
|
| 881 |
+
if padding_mask_crop is not None:
|
| 882 |
+
image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
|
| 883 |
+
|
| 884 |
+
# Offload all models
|
| 885 |
+
self.maybe_free_model_hooks()
|
| 886 |
+
|
| 887 |
+
if not return_dict:
|
| 888 |
+
return (image, has_nsfw_concept)
|
| 889 |
+
|
| 890 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
# ============= Utility Functions ============== #
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
class GaussianSmoothing(nn.Module):
|
| 897 |
+
"""
|
| 898 |
+
Apply gaussian smoothing on a
|
| 899 |
+
1d, 2d or 3d tensor. Filtering is performed separately for each channel
|
| 900 |
+
in the input using a depthwise convolution.
|
| 901 |
+
|
| 902 |
+
Args:
|
| 903 |
+
channels (`int` or `sequence`):
|
| 904 |
+
Number of channels of the input tensors. The output will have this number of channels as well.
|
| 905 |
+
kernel_size (`int` or `sequence`):
|
| 906 |
+
Size of the Gaussian kernel.
|
| 907 |
+
sigma (`float` or `sequence`):
|
| 908 |
+
Standard deviation of the Gaussian kernel.
|
| 909 |
+
dim (`int`, *optional*, defaults to `2`):
|
| 910 |
+
The number of dimensions of the data. Default is 2 (spatial dimensions).
|
| 911 |
+
"""
|
| 912 |
+
|
| 913 |
+
def __init__(self, channels, kernel_size, sigma, dim=2):
|
| 914 |
+
super(GaussianSmoothing, self).__init__()
|
| 915 |
+
if isinstance(kernel_size, numbers.Number):
|
| 916 |
+
kernel_size = [kernel_size] * dim
|
| 917 |
+
if isinstance(sigma, numbers.Number):
|
| 918 |
+
sigma = [sigma] * dim
|
| 919 |
+
|
| 920 |
+
# The gaussian kernel is the product of the
|
| 921 |
+
# gaussian function of each dimension.
|
| 922 |
+
kernel = 1
|
| 923 |
+
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
|
| 924 |
+
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
|
| 925 |
+
mean = (size - 1) / 2
|
| 926 |
+
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
|
| 927 |
+
|
| 928 |
+
# Make sure sum of values in gaussian kernel equals 1.
|
| 929 |
+
kernel = kernel / torch.sum(kernel)
|
| 930 |
+
|
| 931 |
+
# Reshape to depthwise convolutional weight
|
| 932 |
+
kernel = kernel.view(1, 1, *kernel.size())
|
| 933 |
+
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
|
| 934 |
+
|
| 935 |
+
self.register_buffer("weight", kernel)
|
| 936 |
+
self.groups = channels
|
| 937 |
+
|
| 938 |
+
if dim == 1:
|
| 939 |
+
self.conv = F.conv1d
|
| 940 |
+
elif dim == 2:
|
| 941 |
+
self.conv = F.conv2d
|
| 942 |
+
elif dim == 3:
|
| 943 |
+
self.conv = F.conv3d
|
| 944 |
+
else:
|
| 945 |
+
raise RuntimeError("Only 1, 2 and 3 dimensions are supported. Received {}.".format(dim))
|
| 946 |
+
|
| 947 |
+
def forward(self, input):
|
| 948 |
+
"""
|
| 949 |
+
Apply gaussian filter to input.
|
| 950 |
+
|
| 951 |
+
Args:
|
| 952 |
+
input (`torch.Tensor` of shape `(N, C, H, W)`):
|
| 953 |
+
Input to apply Gaussian filter on.
|
| 954 |
+
|
| 955 |
+
Returns:
|
| 956 |
+
`torch.Tensor`:
|
| 957 |
+
The filtered output tensor with the same shape as the input.
|
| 958 |
+
"""
|
| 959 |
+
return self.conv(input, weight=self.weight.to(input.dtype), groups=self.groups, padding="same")
|
| 960 |
+
|
| 961 |
+
|
| 962 |
+
def get_attention_scores(
|
| 963 |
+
self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None
|
| 964 |
+
) -> torch.Tensor:
|
| 965 |
+
r"""
|
| 966 |
+
Compute the attention scores.
|
| 967 |
+
|
| 968 |
+
Args:
|
| 969 |
+
query (`torch.Tensor`): The query tensor.
|
| 970 |
+
key (`torch.Tensor`): The key tensor.
|
| 971 |
+
attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
|
| 972 |
+
|
| 973 |
+
Returns:
|
| 974 |
+
`torch.Tensor`: The attention probabilities/scores.
|
| 975 |
+
"""
|
| 976 |
+
if self.upcast_attention:
|
| 977 |
+
query = query.float()
|
| 978 |
+
key = key.float()
|
| 979 |
+
|
| 980 |
+
if attention_mask is None:
|
| 981 |
+
baddbmm_input = torch.empty(
|
| 982 |
+
query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device
|
| 983 |
+
)
|
| 984 |
+
beta = 0
|
| 985 |
+
else:
|
| 986 |
+
baddbmm_input = attention_mask
|
| 987 |
+
beta = 1
|
| 988 |
+
|
| 989 |
+
attention_scores = torch.baddbmm(
|
| 990 |
+
baddbmm_input,
|
| 991 |
+
query,
|
| 992 |
+
key.transpose(-1, -2),
|
| 993 |
+
beta=beta,
|
| 994 |
+
alpha=self.scale,
|
| 995 |
+
)
|
| 996 |
+
del baddbmm_input
|
| 997 |
+
|
| 998 |
+
if self.upcast_softmax:
|
| 999 |
+
attention_scores = attention_scores.float()
|
| 1000 |
+
|
| 1001 |
+
return attention_scores
|
v0.36.0/iadb.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from diffusers import DiffusionPipeline
|
| 6 |
+
from diffusers.configuration_utils import ConfigMixin
|
| 7 |
+
from diffusers.pipelines.pipeline_utils import ImagePipelineOutput
|
| 8 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class IADBScheduler(SchedulerMixin, ConfigMixin):
|
| 12 |
+
"""
|
| 13 |
+
IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
|
| 14 |
+
|
| 15 |
+
For more details, see the original paper: https://huggingface.co/papers/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def step(
|
| 19 |
+
self,
|
| 20 |
+
model_output: torch.Tensor,
|
| 21 |
+
timestep: int,
|
| 22 |
+
x_alpha: torch.Tensor,
|
| 23 |
+
) -> torch.Tensor:
|
| 24 |
+
"""
|
| 25 |
+
Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
|
| 26 |
+
process from the learned model outputs (most often the predicted noise).
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
model_output (`torch.Tensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
|
| 30 |
+
timestep (`float`): current timestep in the diffusion chain.
|
| 31 |
+
x_alpha (`torch.Tensor`): x_alpha sample for the current timestep
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
`torch.Tensor`: the sample at the previous timestep
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
if self.num_inference_steps is None:
|
| 38 |
+
raise ValueError(
|
| 39 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
alpha = timestep / self.num_inference_steps
|
| 43 |
+
alpha_next = (timestep + 1) / self.num_inference_steps
|
| 44 |
+
|
| 45 |
+
d = model_output
|
| 46 |
+
|
| 47 |
+
x_alpha = x_alpha + (alpha_next - alpha) * d
|
| 48 |
+
|
| 49 |
+
return x_alpha
|
| 50 |
+
|
| 51 |
+
def set_timesteps(self, num_inference_steps: int):
|
| 52 |
+
self.num_inference_steps = num_inference_steps
|
| 53 |
+
|
| 54 |
+
def add_noise(
|
| 55 |
+
self,
|
| 56 |
+
original_samples: torch.Tensor,
|
| 57 |
+
noise: torch.Tensor,
|
| 58 |
+
alpha: torch.Tensor,
|
| 59 |
+
) -> torch.Tensor:
|
| 60 |
+
return original_samples * alpha + noise * (1 - alpha)
|
| 61 |
+
|
| 62 |
+
def __len__(self):
|
| 63 |
+
return self.config.num_train_timesteps
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class IADBPipeline(DiffusionPipeline):
|
| 67 |
+
r"""
|
| 68 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 69 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 70 |
+
|
| 71 |
+
Parameters:
|
| 72 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 73 |
+
scheduler ([`SchedulerMixin`]):
|
| 74 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 75 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, unet, scheduler):
|
| 79 |
+
super().__init__()
|
| 80 |
+
|
| 81 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 82 |
+
|
| 83 |
+
@torch.no_grad()
|
| 84 |
+
def __call__(
|
| 85 |
+
self,
|
| 86 |
+
batch_size: int = 1,
|
| 87 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 88 |
+
num_inference_steps: int = 50,
|
| 89 |
+
output_type: Optional[str] = "pil",
|
| 90 |
+
return_dict: bool = True,
|
| 91 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 92 |
+
r"""
|
| 93 |
+
Args:
|
| 94 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 95 |
+
The number of images to generate.
|
| 96 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 97 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 98 |
+
expense of slower inference.
|
| 99 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 100 |
+
The output format of the generate image. Choose between
|
| 101 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 102 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 103 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
| 107 |
+
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
# Sample gaussian noise to begin loop
|
| 111 |
+
if isinstance(self.unet.config.sample_size, int):
|
| 112 |
+
image_shape = (
|
| 113 |
+
batch_size,
|
| 114 |
+
self.unet.config.in_channels,
|
| 115 |
+
self.unet.config.sample_size,
|
| 116 |
+
self.unet.config.sample_size,
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
| 120 |
+
|
| 121 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 122 |
+
raise ValueError(
|
| 123 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 124 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
|
| 128 |
+
|
| 129 |
+
# set step values
|
| 130 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 131 |
+
x_alpha = image.clone()
|
| 132 |
+
for t in self.progress_bar(range(num_inference_steps)):
|
| 133 |
+
alpha = t / num_inference_steps
|
| 134 |
+
|
| 135 |
+
# 1. predict noise model_output
|
| 136 |
+
model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
|
| 137 |
+
|
| 138 |
+
# 2. step
|
| 139 |
+
x_alpha = self.scheduler.step(model_output, t, x_alpha)
|
| 140 |
+
|
| 141 |
+
image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
|
| 142 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 143 |
+
if output_type == "pil":
|
| 144 |
+
image = self.numpy_to_pil(image)
|
| 145 |
+
|
| 146 |
+
if not return_dict:
|
| 147 |
+
return (image,)
|
| 148 |
+
|
| 149 |
+
return ImagePipelineOutput(images=image)
|
v0.36.0/imagic_stable_diffusion.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
modeled after the textual_inversion.py / train_dreambooth.py and the work
|
| 3 |
+
of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import inspect
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Union
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import PIL.Image
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from accelerate import Accelerator
|
| 15 |
+
|
| 16 |
+
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
|
| 17 |
+
from packaging import version
|
| 18 |
+
from tqdm.auto import tqdm
|
| 19 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 20 |
+
|
| 21 |
+
from diffusers import DiffusionPipeline
|
| 22 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 23 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 24 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 25 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 26 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 27 |
+
from diffusers.utils import logging
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
| 31 |
+
PIL_INTERPOLATION = {
|
| 32 |
+
"linear": PIL.Image.Resampling.BILINEAR,
|
| 33 |
+
"bilinear": PIL.Image.Resampling.BILINEAR,
|
| 34 |
+
"bicubic": PIL.Image.Resampling.BICUBIC,
|
| 35 |
+
"lanczos": PIL.Image.Resampling.LANCZOS,
|
| 36 |
+
"nearest": PIL.Image.Resampling.NEAREST,
|
| 37 |
+
}
|
| 38 |
+
else:
|
| 39 |
+
PIL_INTERPOLATION = {
|
| 40 |
+
"linear": PIL.Image.LINEAR,
|
| 41 |
+
"bilinear": PIL.Image.BILINEAR,
|
| 42 |
+
"bicubic": PIL.Image.BICUBIC,
|
| 43 |
+
"lanczos": PIL.Image.LANCZOS,
|
| 44 |
+
"nearest": PIL.Image.NEAREST,
|
| 45 |
+
}
|
| 46 |
+
# ------------------------------------------------------------------------------
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def preprocess(image):
|
| 52 |
+
w, h = image.size
|
| 53 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 54 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 55 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 56 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 57 |
+
image = torch.from_numpy(image)
|
| 58 |
+
return 2.0 * image - 1.0
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 62 |
+
r"""
|
| 63 |
+
Pipeline for imagic image editing.
|
| 64 |
+
See paper here: https://huggingface.co/papers/2210.09276
|
| 65 |
+
|
| 66 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 67 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 68 |
+
Args:
|
| 69 |
+
vae ([`AutoencoderKL`]):
|
| 70 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 71 |
+
text_encoder ([`CLIPTextModel`]):
|
| 72 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 73 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 74 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 75 |
+
tokenizer (`CLIPTokenizer`):
|
| 76 |
+
Tokenizer of class
|
| 77 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 78 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 79 |
+
scheduler ([`SchedulerMixin`]):
|
| 80 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 81 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 82 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 83 |
+
Classification module that estimates whether generated images could be considered offsensive or harmful.
|
| 84 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 85 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 86 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
vae: AutoencoderKL,
|
| 92 |
+
text_encoder: CLIPTextModel,
|
| 93 |
+
tokenizer: CLIPTokenizer,
|
| 94 |
+
unet: UNet2DConditionModel,
|
| 95 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 96 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 97 |
+
feature_extractor: CLIPImageProcessor,
|
| 98 |
+
):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.register_modules(
|
| 101 |
+
vae=vae,
|
| 102 |
+
text_encoder=text_encoder,
|
| 103 |
+
tokenizer=tokenizer,
|
| 104 |
+
unet=unet,
|
| 105 |
+
scheduler=scheduler,
|
| 106 |
+
safety_checker=safety_checker,
|
| 107 |
+
feature_extractor=feature_extractor,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
def train(
|
| 111 |
+
self,
|
| 112 |
+
prompt: Union[str, List[str]],
|
| 113 |
+
image: Union[torch.Tensor, PIL.Image.Image],
|
| 114 |
+
height: Optional[int] = 512,
|
| 115 |
+
width: Optional[int] = 512,
|
| 116 |
+
generator: Optional[torch.Generator] = None,
|
| 117 |
+
embedding_learning_rate: float = 0.001,
|
| 118 |
+
diffusion_model_learning_rate: float = 2e-6,
|
| 119 |
+
text_embedding_optimization_steps: int = 500,
|
| 120 |
+
model_fine_tuning_optimization_steps: int = 1000,
|
| 121 |
+
**kwargs,
|
| 122 |
+
):
|
| 123 |
+
r"""
|
| 124 |
+
Function invoked when calling the pipeline for generation.
|
| 125 |
+
Args:
|
| 126 |
+
prompt (`str` or `List[str]`):
|
| 127 |
+
The prompt or prompts to guide the image generation.
|
| 128 |
+
height (`int`, *optional*, defaults to 512):
|
| 129 |
+
The height in pixels of the generated image.
|
| 130 |
+
width (`int`, *optional*, defaults to 512):
|
| 131 |
+
The width in pixels of the generated image.
|
| 132 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 133 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 134 |
+
expense of slower inference.
|
| 135 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 136 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 137 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 138 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 139 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 140 |
+
usually at the expense of lower image quality.
|
| 141 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 142 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 143 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 144 |
+
generator (`torch.Generator`, *optional*):
|
| 145 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 146 |
+
deterministic.
|
| 147 |
+
latents (`torch.Tensor`, *optional*):
|
| 148 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 149 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 150 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 151 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 152 |
+
The output format of the generate image. Choose between
|
| 153 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
|
| 154 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 155 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 156 |
+
plain tuple.
|
| 157 |
+
Returns:
|
| 158 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 159 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 160 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 161 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 162 |
+
(nsfw) content, according to the `safety_checker`.
|
| 163 |
+
"""
|
| 164 |
+
accelerator = Accelerator(
|
| 165 |
+
gradient_accumulation_steps=1,
|
| 166 |
+
mixed_precision="fp16",
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if "torch_device" in kwargs:
|
| 170 |
+
device = kwargs.pop("torch_device")
|
| 171 |
+
warnings.warn(
|
| 172 |
+
"`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
|
| 173 |
+
" Consider using `pipe.to(torch_device)` instead."
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
if device is None:
|
| 177 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 178 |
+
self.to(device)
|
| 179 |
+
|
| 180 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 181 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 182 |
+
|
| 183 |
+
# Freeze vae and unet
|
| 184 |
+
self.vae.requires_grad_(False)
|
| 185 |
+
self.unet.requires_grad_(False)
|
| 186 |
+
self.text_encoder.requires_grad_(False)
|
| 187 |
+
self.unet.eval()
|
| 188 |
+
self.vae.eval()
|
| 189 |
+
self.text_encoder.eval()
|
| 190 |
+
|
| 191 |
+
if accelerator.is_main_process:
|
| 192 |
+
accelerator.init_trackers(
|
| 193 |
+
"imagic",
|
| 194 |
+
config={
|
| 195 |
+
"embedding_learning_rate": embedding_learning_rate,
|
| 196 |
+
"text_embedding_optimization_steps": text_embedding_optimization_steps,
|
| 197 |
+
},
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
# get text embeddings for prompt
|
| 201 |
+
text_input = self.tokenizer(
|
| 202 |
+
prompt,
|
| 203 |
+
padding="max_length",
|
| 204 |
+
max_length=self.tokenizer.model_max_length,
|
| 205 |
+
truncation=True,
|
| 206 |
+
return_tensors="pt",
|
| 207 |
+
)
|
| 208 |
+
text_embeddings = torch.nn.Parameter(
|
| 209 |
+
self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
|
| 210 |
+
)
|
| 211 |
+
text_embeddings = text_embeddings.detach()
|
| 212 |
+
text_embeddings.requires_grad_()
|
| 213 |
+
text_embeddings_orig = text_embeddings.clone()
|
| 214 |
+
|
| 215 |
+
# Initialize the optimizer
|
| 216 |
+
optimizer = torch.optim.Adam(
|
| 217 |
+
[text_embeddings], # only optimize the embeddings
|
| 218 |
+
lr=embedding_learning_rate,
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
if isinstance(image, PIL.Image.Image):
|
| 222 |
+
image = preprocess(image)
|
| 223 |
+
|
| 224 |
+
latents_dtype = text_embeddings.dtype
|
| 225 |
+
image = image.to(device=self.device, dtype=latents_dtype)
|
| 226 |
+
init_latent_image_dist = self.vae.encode(image).latent_dist
|
| 227 |
+
image_latents = init_latent_image_dist.sample(generator=generator)
|
| 228 |
+
image_latents = 0.18215 * image_latents
|
| 229 |
+
|
| 230 |
+
progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
|
| 231 |
+
progress_bar.set_description("Steps")
|
| 232 |
+
|
| 233 |
+
global_step = 0
|
| 234 |
+
|
| 235 |
+
logger.info("First optimizing the text embedding to better reconstruct the init image")
|
| 236 |
+
for _ in range(text_embedding_optimization_steps):
|
| 237 |
+
with accelerator.accumulate(text_embeddings):
|
| 238 |
+
# Sample noise that we'll add to the latents
|
| 239 |
+
noise = torch.randn(image_latents.shape).to(image_latents.device)
|
| 240 |
+
timesteps = torch.randint(1000, (1,), device=image_latents.device)
|
| 241 |
+
|
| 242 |
+
# Add noise to the latents according to the noise magnitude at each timestep
|
| 243 |
+
# (this is the forward diffusion process)
|
| 244 |
+
noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
|
| 245 |
+
|
| 246 |
+
# Predict the noise residual
|
| 247 |
+
noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
|
| 248 |
+
|
| 249 |
+
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
|
| 250 |
+
accelerator.backward(loss)
|
| 251 |
+
|
| 252 |
+
optimizer.step()
|
| 253 |
+
optimizer.zero_grad()
|
| 254 |
+
|
| 255 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 256 |
+
if accelerator.sync_gradients:
|
| 257 |
+
progress_bar.update(1)
|
| 258 |
+
global_step += 1
|
| 259 |
+
|
| 260 |
+
logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
|
| 261 |
+
progress_bar.set_postfix(**logs)
|
| 262 |
+
accelerator.log(logs, step=global_step)
|
| 263 |
+
|
| 264 |
+
accelerator.wait_for_everyone()
|
| 265 |
+
|
| 266 |
+
text_embeddings.requires_grad_(False)
|
| 267 |
+
|
| 268 |
+
# Now we fine tune the unet to better reconstruct the image
|
| 269 |
+
self.unet.requires_grad_(True)
|
| 270 |
+
self.unet.train()
|
| 271 |
+
optimizer = torch.optim.Adam(
|
| 272 |
+
self.unet.parameters(), # only optimize unet
|
| 273 |
+
lr=diffusion_model_learning_rate,
|
| 274 |
+
)
|
| 275 |
+
progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
|
| 276 |
+
|
| 277 |
+
logger.info("Next fine tuning the entire model to better reconstruct the init image")
|
| 278 |
+
for _ in range(model_fine_tuning_optimization_steps):
|
| 279 |
+
with accelerator.accumulate(self.unet.parameters()):
|
| 280 |
+
# Sample noise that we'll add to the latents
|
| 281 |
+
noise = torch.randn(image_latents.shape).to(image_latents.device)
|
| 282 |
+
timesteps = torch.randint(1000, (1,), device=image_latents.device)
|
| 283 |
+
|
| 284 |
+
# Add noise to the latents according to the noise magnitude at each timestep
|
| 285 |
+
# (this is the forward diffusion process)
|
| 286 |
+
noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
|
| 287 |
+
|
| 288 |
+
# Predict the noise residual
|
| 289 |
+
noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
|
| 290 |
+
|
| 291 |
+
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
|
| 292 |
+
accelerator.backward(loss)
|
| 293 |
+
|
| 294 |
+
optimizer.step()
|
| 295 |
+
optimizer.zero_grad()
|
| 296 |
+
|
| 297 |
+
# Checks if the accelerator has performed an optimization step behind the scenes
|
| 298 |
+
if accelerator.sync_gradients:
|
| 299 |
+
progress_bar.update(1)
|
| 300 |
+
global_step += 1
|
| 301 |
+
|
| 302 |
+
logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
|
| 303 |
+
progress_bar.set_postfix(**logs)
|
| 304 |
+
accelerator.log(logs, step=global_step)
|
| 305 |
+
|
| 306 |
+
accelerator.wait_for_everyone()
|
| 307 |
+
self.text_embeddings_orig = text_embeddings_orig
|
| 308 |
+
self.text_embeddings = text_embeddings
|
| 309 |
+
|
| 310 |
+
@torch.no_grad()
|
| 311 |
+
def __call__(
|
| 312 |
+
self,
|
| 313 |
+
alpha: float = 1.2,
|
| 314 |
+
height: Optional[int] = 512,
|
| 315 |
+
width: Optional[int] = 512,
|
| 316 |
+
num_inference_steps: Optional[int] = 50,
|
| 317 |
+
generator: Optional[torch.Generator] = None,
|
| 318 |
+
output_type: Optional[str] = "pil",
|
| 319 |
+
return_dict: bool = True,
|
| 320 |
+
guidance_scale: float = 7.5,
|
| 321 |
+
eta: float = 0.0,
|
| 322 |
+
):
|
| 323 |
+
r"""
|
| 324 |
+
Function invoked when calling the pipeline for generation.
|
| 325 |
+
Args:
|
| 326 |
+
alpha (`float`, *optional*, defaults to 1.2):
|
| 327 |
+
The interpolation factor between the original and optimized text embeddings. A value closer to 0
|
| 328 |
+
will resemble the original input image.
|
| 329 |
+
height (`int`, *optional*, defaults to 512):
|
| 330 |
+
The height in pixels of the generated image.
|
| 331 |
+
width (`int`, *optional*, defaults to 512):
|
| 332 |
+
The width in pixels of the generated image.
|
| 333 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 334 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 335 |
+
expense of slower inference.
|
| 336 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 337 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 338 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 339 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 340 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 341 |
+
usually at the expense of lower image quality.
|
| 342 |
+
generator (`torch.Generator`, *optional*):
|
| 343 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 344 |
+
deterministic.
|
| 345 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 346 |
+
The output format of the generate image. Choose between
|
| 347 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
|
| 348 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 349 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 350 |
+
plain tuple.
|
| 351 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 352 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 353 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 354 |
+
Returns:
|
| 355 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 356 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 357 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 358 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 359 |
+
(nsfw) content, according to the `safety_checker`.
|
| 360 |
+
"""
|
| 361 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 362 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 363 |
+
if self.text_embeddings is None:
|
| 364 |
+
raise ValueError("Please run the pipe.train() before trying to generate an image.")
|
| 365 |
+
if self.text_embeddings_orig is None:
|
| 366 |
+
raise ValueError("Please run the pipe.train() before trying to generate an image.")
|
| 367 |
+
|
| 368 |
+
text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
|
| 369 |
+
|
| 370 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 371 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 372 |
+
# corresponds to doing no classifier free guidance.
|
| 373 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 374 |
+
# get unconditional embeddings for classifier free guidance
|
| 375 |
+
if do_classifier_free_guidance:
|
| 376 |
+
uncond_tokens = [""]
|
| 377 |
+
max_length = self.tokenizer.model_max_length
|
| 378 |
+
uncond_input = self.tokenizer(
|
| 379 |
+
uncond_tokens,
|
| 380 |
+
padding="max_length",
|
| 381 |
+
max_length=max_length,
|
| 382 |
+
truncation=True,
|
| 383 |
+
return_tensors="pt",
|
| 384 |
+
)
|
| 385 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 386 |
+
|
| 387 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 388 |
+
seq_len = uncond_embeddings.shape[1]
|
| 389 |
+
uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
|
| 390 |
+
|
| 391 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 392 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 393 |
+
# to avoid doing two forward passes
|
| 394 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 395 |
+
|
| 396 |
+
# get the initial random noise unless the user supplied it
|
| 397 |
+
|
| 398 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 399 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 400 |
+
# However this currently doesn't work in `mps`.
|
| 401 |
+
latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
|
| 402 |
+
latents_dtype = text_embeddings.dtype
|
| 403 |
+
if self.device.type == "mps":
|
| 404 |
+
# randn does not exist on mps
|
| 405 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 406 |
+
self.device
|
| 407 |
+
)
|
| 408 |
+
else:
|
| 409 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 410 |
+
|
| 411 |
+
# set timesteps
|
| 412 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 413 |
+
|
| 414 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 415 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 416 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 417 |
+
|
| 418 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 419 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 420 |
+
|
| 421 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 422 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 423 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 424 |
+
# and should be between [0, 1]
|
| 425 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 426 |
+
extra_step_kwargs = {}
|
| 427 |
+
if accepts_eta:
|
| 428 |
+
extra_step_kwargs["eta"] = eta
|
| 429 |
+
|
| 430 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 431 |
+
# expand the latents if we are doing classifier free guidance
|
| 432 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 433 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 434 |
+
|
| 435 |
+
# predict the noise residual
|
| 436 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 437 |
+
|
| 438 |
+
# perform guidance
|
| 439 |
+
if do_classifier_free_guidance:
|
| 440 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 441 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 442 |
+
|
| 443 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 444 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 445 |
+
|
| 446 |
+
latents = 1 / 0.18215 * latents
|
| 447 |
+
image = self.vae.decode(latents).sample
|
| 448 |
+
|
| 449 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 450 |
+
|
| 451 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 452 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 453 |
+
|
| 454 |
+
if self.safety_checker is not None:
|
| 455 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 456 |
+
self.device
|
| 457 |
+
)
|
| 458 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 459 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 460 |
+
)
|
| 461 |
+
else:
|
| 462 |
+
has_nsfw_concept = None
|
| 463 |
+
|
| 464 |
+
if output_type == "pil":
|
| 465 |
+
image = self.numpy_to_pil(image)
|
| 466 |
+
|
| 467 |
+
if not return_dict:
|
| 468 |
+
return (image, has_nsfw_concept)
|
| 469 |
+
|
| 470 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/img2img_inpainting.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Callable, List, Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
import torch
|
| 7 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 8 |
+
|
| 9 |
+
from diffusers import DiffusionPipeline
|
| 10 |
+
from diffusers.configuration_utils import FrozenDict
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 13 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 14 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 15 |
+
from diffusers.utils import deprecate, logging
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def prepare_mask_and_masked_image(image, mask):
|
| 22 |
+
image = np.array(image.convert("RGB"))
|
| 23 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 24 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 25 |
+
|
| 26 |
+
mask = np.array(mask.convert("L"))
|
| 27 |
+
mask = mask.astype(np.float32) / 255.0
|
| 28 |
+
mask = mask[None, None]
|
| 29 |
+
mask[mask < 0.5] = 0
|
| 30 |
+
mask[mask >= 0.5] = 1
|
| 31 |
+
mask = torch.from_numpy(mask)
|
| 32 |
+
|
| 33 |
+
masked_image = image * (mask < 0.5)
|
| 34 |
+
|
| 35 |
+
return mask, masked_image
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def check_size(image, height, width):
|
| 39 |
+
if isinstance(image, PIL.Image.Image):
|
| 40 |
+
w, h = image.size
|
| 41 |
+
elif isinstance(image, torch.Tensor):
|
| 42 |
+
*_, h, w = image.shape
|
| 43 |
+
|
| 44 |
+
if h != height or w != width:
|
| 45 |
+
raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def overlay_inner_image(image, inner_image, paste_offset: Tuple[int, ...] = (0, 0)):
|
| 49 |
+
inner_image = inner_image.convert("RGBA")
|
| 50 |
+
image = image.convert("RGB")
|
| 51 |
+
|
| 52 |
+
image.paste(inner_image, paste_offset, inner_image)
|
| 53 |
+
image = image.convert("RGB")
|
| 54 |
+
|
| 55 |
+
return image
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ImageToImageInpaintingPipeline(DiffusionPipeline):
|
| 59 |
+
r"""
|
| 60 |
+
Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
|
| 61 |
+
|
| 62 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 63 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
vae ([`AutoencoderKL`]):
|
| 67 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 68 |
+
text_encoder ([`CLIPTextModel`]):
|
| 69 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 70 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 71 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 72 |
+
tokenizer (`CLIPTokenizer`):
|
| 73 |
+
Tokenizer of class
|
| 74 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 75 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 76 |
+
scheduler ([`SchedulerMixin`]):
|
| 77 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 78 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 82 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
vae: AutoencoderKL,
|
| 89 |
+
text_encoder: CLIPTextModel,
|
| 90 |
+
tokenizer: CLIPTokenizer,
|
| 91 |
+
unet: UNet2DConditionModel,
|
| 92 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 93 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 94 |
+
feature_extractor: CLIPImageProcessor,
|
| 95 |
+
):
|
| 96 |
+
super().__init__()
|
| 97 |
+
|
| 98 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 99 |
+
deprecation_message = (
|
| 100 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 101 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 102 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 103 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 104 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 105 |
+
" file"
|
| 106 |
+
)
|
| 107 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 108 |
+
new_config = dict(scheduler.config)
|
| 109 |
+
new_config["steps_offset"] = 1
|
| 110 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 111 |
+
|
| 112 |
+
if safety_checker is None:
|
| 113 |
+
logger.warning(
|
| 114 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 115 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 116 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 117 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 118 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 119 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
self.register_modules(
|
| 123 |
+
vae=vae,
|
| 124 |
+
text_encoder=text_encoder,
|
| 125 |
+
tokenizer=tokenizer,
|
| 126 |
+
unet=unet,
|
| 127 |
+
scheduler=scheduler,
|
| 128 |
+
safety_checker=safety_checker,
|
| 129 |
+
feature_extractor=feature_extractor,
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
@torch.no_grad()
|
| 133 |
+
def __call__(
|
| 134 |
+
self,
|
| 135 |
+
prompt: Union[str, List[str]],
|
| 136 |
+
image: Union[torch.Tensor, PIL.Image.Image],
|
| 137 |
+
inner_image: Union[torch.Tensor, PIL.Image.Image],
|
| 138 |
+
mask_image: Union[torch.Tensor, PIL.Image.Image],
|
| 139 |
+
height: int = 512,
|
| 140 |
+
width: int = 512,
|
| 141 |
+
num_inference_steps: int = 50,
|
| 142 |
+
guidance_scale: float = 7.5,
|
| 143 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 144 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 145 |
+
eta: float = 0.0,
|
| 146 |
+
generator: Optional[torch.Generator] = None,
|
| 147 |
+
latents: Optional[torch.Tensor] = None,
|
| 148 |
+
output_type: Optional[str] = "pil",
|
| 149 |
+
return_dict: bool = True,
|
| 150 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 151 |
+
callback_steps: int = 1,
|
| 152 |
+
**kwargs,
|
| 153 |
+
):
|
| 154 |
+
r"""
|
| 155 |
+
Function invoked when calling the pipeline for generation.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
prompt (`str` or `List[str]`):
|
| 159 |
+
The prompt or prompts to guide the image generation.
|
| 160 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 161 |
+
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
| 162 |
+
be masked out with `mask_image` and repainted according to `prompt`.
|
| 163 |
+
inner_image (`torch.Tensor` or `PIL.Image.Image`):
|
| 164 |
+
`Image`, or tensor representing an image batch which will be overlaid onto `image`. Non-transparent
|
| 165 |
+
regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
|
| 166 |
+
the last channel representing the alpha channel, which will be used to blend `inner_image` with
|
| 167 |
+
`image`. If not provided, it will be forcibly cast to RGBA.
|
| 168 |
+
mask_image (`PIL.Image.Image`):
|
| 169 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 170 |
+
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
| 171 |
+
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
| 172 |
+
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 173 |
+
height (`int`, *optional*, defaults to 512):
|
| 174 |
+
The height in pixels of the generated image.
|
| 175 |
+
width (`int`, *optional*, defaults to 512):
|
| 176 |
+
The width in pixels of the generated image.
|
| 177 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 178 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 179 |
+
expense of slower inference.
|
| 180 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 181 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 182 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 183 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 184 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 185 |
+
usually at the expense of lower image quality.
|
| 186 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 187 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 188 |
+
if `guidance_scale` is less than `1`).
|
| 189 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 190 |
+
The number of images to generate per prompt.
|
| 191 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 192 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 193 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 194 |
+
generator (`torch.Generator`, *optional*):
|
| 195 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 196 |
+
deterministic.
|
| 197 |
+
latents (`torch.Tensor`, *optional*):
|
| 198 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 199 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 200 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 201 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 202 |
+
The output format of the generate image. Choose between
|
| 203 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 204 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 205 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 206 |
+
plain tuple.
|
| 207 |
+
callback (`Callable`, *optional*):
|
| 208 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 209 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 210 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 211 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 212 |
+
called at every step.
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 216 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 217 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 218 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 219 |
+
(nsfw) content, according to the `safety_checker`.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
if isinstance(prompt, str):
|
| 223 |
+
batch_size = 1
|
| 224 |
+
elif isinstance(prompt, list):
|
| 225 |
+
batch_size = len(prompt)
|
| 226 |
+
else:
|
| 227 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 228 |
+
|
| 229 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 230 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 231 |
+
|
| 232 |
+
if (callback_steps is None) or (
|
| 233 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 234 |
+
):
|
| 235 |
+
raise ValueError(
|
| 236 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 237 |
+
f" {type(callback_steps)}."
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
# check if input sizes are correct
|
| 241 |
+
check_size(image, height, width)
|
| 242 |
+
check_size(inner_image, height, width)
|
| 243 |
+
check_size(mask_image, height, width)
|
| 244 |
+
|
| 245 |
+
# get prompt text embeddings
|
| 246 |
+
text_inputs = self.tokenizer(
|
| 247 |
+
prompt,
|
| 248 |
+
padding="max_length",
|
| 249 |
+
max_length=self.tokenizer.model_max_length,
|
| 250 |
+
return_tensors="pt",
|
| 251 |
+
)
|
| 252 |
+
text_input_ids = text_inputs.input_ids
|
| 253 |
+
|
| 254 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 255 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 256 |
+
logger.warning(
|
| 257 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 258 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 259 |
+
)
|
| 260 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 261 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 262 |
+
|
| 263 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 264 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 265 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 266 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 267 |
+
|
| 268 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 269 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 270 |
+
# corresponds to doing no classifier free guidance.
|
| 271 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 272 |
+
# get unconditional embeddings for classifier free guidance
|
| 273 |
+
if do_classifier_free_guidance:
|
| 274 |
+
uncond_tokens: List[str]
|
| 275 |
+
if negative_prompt is None:
|
| 276 |
+
uncond_tokens = [""]
|
| 277 |
+
elif type(prompt) is not type(negative_prompt):
|
| 278 |
+
raise TypeError(
|
| 279 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 280 |
+
f" {type(prompt)}."
|
| 281 |
+
)
|
| 282 |
+
elif isinstance(negative_prompt, str):
|
| 283 |
+
uncond_tokens = [negative_prompt]
|
| 284 |
+
elif batch_size != len(negative_prompt):
|
| 285 |
+
raise ValueError(
|
| 286 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 287 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 288 |
+
" the batch size of `prompt`."
|
| 289 |
+
)
|
| 290 |
+
else:
|
| 291 |
+
uncond_tokens = negative_prompt
|
| 292 |
+
|
| 293 |
+
max_length = text_input_ids.shape[-1]
|
| 294 |
+
uncond_input = self.tokenizer(
|
| 295 |
+
uncond_tokens,
|
| 296 |
+
padding="max_length",
|
| 297 |
+
max_length=max_length,
|
| 298 |
+
truncation=True,
|
| 299 |
+
return_tensors="pt",
|
| 300 |
+
)
|
| 301 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 302 |
+
|
| 303 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 304 |
+
seq_len = uncond_embeddings.shape[1]
|
| 305 |
+
uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
|
| 306 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 307 |
+
|
| 308 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 309 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 310 |
+
# to avoid doing two forward passes
|
| 311 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 312 |
+
|
| 313 |
+
# get the initial random noise unless the user supplied it
|
| 314 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 315 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 316 |
+
# However this currently doesn't work in `mps`.
|
| 317 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 318 |
+
latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
|
| 319 |
+
latents_dtype = text_embeddings.dtype
|
| 320 |
+
if latents is None:
|
| 321 |
+
if self.device.type == "mps":
|
| 322 |
+
# randn does not exist on mps
|
| 323 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 324 |
+
self.device
|
| 325 |
+
)
|
| 326 |
+
else:
|
| 327 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 328 |
+
else:
|
| 329 |
+
if latents.shape != latents_shape:
|
| 330 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 331 |
+
latents = latents.to(self.device)
|
| 332 |
+
|
| 333 |
+
# overlay the inner image
|
| 334 |
+
image = overlay_inner_image(image, inner_image)
|
| 335 |
+
|
| 336 |
+
# prepare mask and masked_image
|
| 337 |
+
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
|
| 338 |
+
mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
|
| 339 |
+
masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
|
| 340 |
+
|
| 341 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 342 |
+
mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
|
| 343 |
+
|
| 344 |
+
# encode the mask image into latents space so we can concatenate it to the latents
|
| 345 |
+
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
|
| 346 |
+
masked_image_latents = 0.18215 * masked_image_latents
|
| 347 |
+
|
| 348 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 349 |
+
mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
|
| 350 |
+
masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
|
| 351 |
+
|
| 352 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 353 |
+
masked_image_latents = (
|
| 354 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
num_channels_mask = mask.shape[1]
|
| 358 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 359 |
+
|
| 360 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 361 |
+
raise ValueError(
|
| 362 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 363 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 364 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 365 |
+
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
|
| 366 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# set timesteps
|
| 370 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 371 |
+
|
| 372 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 373 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 374 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 375 |
+
|
| 376 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 377 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 378 |
+
|
| 379 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 380 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 381 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 382 |
+
# and should be between [0, 1]
|
| 383 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 384 |
+
extra_step_kwargs = {}
|
| 385 |
+
if accepts_eta:
|
| 386 |
+
extra_step_kwargs["eta"] = eta
|
| 387 |
+
|
| 388 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 389 |
+
# expand the latents if we are doing classifier free guidance
|
| 390 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 391 |
+
|
| 392 |
+
# concat latents, mask, masked_image_latents in the channel dimension
|
| 393 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 394 |
+
|
| 395 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 396 |
+
|
| 397 |
+
# predict the noise residual
|
| 398 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 399 |
+
|
| 400 |
+
# perform guidance
|
| 401 |
+
if do_classifier_free_guidance:
|
| 402 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 403 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 404 |
+
|
| 405 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 406 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 407 |
+
|
| 408 |
+
# call the callback, if provided
|
| 409 |
+
if callback is not None and i % callback_steps == 0:
|
| 410 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 411 |
+
callback(step_idx, t, latents)
|
| 412 |
+
|
| 413 |
+
latents = 1 / 0.18215 * latents
|
| 414 |
+
image = self.vae.decode(latents).sample
|
| 415 |
+
|
| 416 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 417 |
+
|
| 418 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 419 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 420 |
+
|
| 421 |
+
if self.safety_checker is not None:
|
| 422 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 423 |
+
self.device
|
| 424 |
+
)
|
| 425 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 426 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 427 |
+
)
|
| 428 |
+
else:
|
| 429 |
+
has_nsfw_concept = None
|
| 430 |
+
|
| 431 |
+
if output_type == "pil":
|
| 432 |
+
image = self.numpy_to_pil(image)
|
| 433 |
+
|
| 434 |
+
if not return_dict:
|
| 435 |
+
return (image, has_nsfw_concept)
|
| 436 |
+
|
| 437 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/instaflow_one_step.py
ADDED
|
@@ -0,0 +1,693 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from packaging import version
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 21 |
+
|
| 22 |
+
from diffusers.configuration_utils import FrozenDict
|
| 23 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 24 |
+
from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 25 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 26 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 27 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 28 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 29 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 30 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 31 |
+
from diffusers.utils import (
|
| 32 |
+
deprecate,
|
| 33 |
+
logging,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 42 |
+
"""
|
| 43 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 44 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4
|
| 45 |
+
"""
|
| 46 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 47 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 48 |
+
# rescale the results from guidance (fixes overexposure)
|
| 49 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 50 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 51 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 52 |
+
return noise_cfg
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class InstaFlowPipeline(
|
| 56 |
+
DiffusionPipeline,
|
| 57 |
+
StableDiffusionMixin,
|
| 58 |
+
TextualInversionLoaderMixin,
|
| 59 |
+
StableDiffusionLoraLoaderMixin,
|
| 60 |
+
FromSingleFileMixin,
|
| 61 |
+
):
|
| 62 |
+
r"""
|
| 63 |
+
Pipeline for text-to-image generation using Rectified Flow and Euler discretization.
|
| 64 |
+
This customized pipeline is based on StableDiffusionPipeline from the official Diffusers library (0.21.4)
|
| 65 |
+
|
| 66 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 67 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 68 |
+
|
| 69 |
+
The pipeline also inherits the following loading methods:
|
| 70 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 71 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 72 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 73 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
vae ([`AutoencoderKL`]):
|
| 77 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 78 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 79 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 80 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 81 |
+
A `CLIPTokenizer` to tokenize text.
|
| 82 |
+
unet ([`UNet2DConditionModel`]):
|
| 83 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 84 |
+
scheduler ([`SchedulerMixin`]):
|
| 85 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 86 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 90 |
+
about a model's potential harms.
|
| 91 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 92 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 96 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 97 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 98 |
+
|
| 99 |
+
def __init__(
|
| 100 |
+
self,
|
| 101 |
+
vae: AutoencoderKL,
|
| 102 |
+
text_encoder: CLIPTextModel,
|
| 103 |
+
tokenizer: CLIPTokenizer,
|
| 104 |
+
unet: UNet2DConditionModel,
|
| 105 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 106 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 107 |
+
feature_extractor: CLIPImageProcessor,
|
| 108 |
+
requires_safety_checker: bool = True,
|
| 109 |
+
):
|
| 110 |
+
super().__init__()
|
| 111 |
+
|
| 112 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 113 |
+
deprecation_message = (
|
| 114 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 115 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 116 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 117 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 118 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 119 |
+
" file"
|
| 120 |
+
)
|
| 121 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 122 |
+
new_config = dict(scheduler.config)
|
| 123 |
+
new_config["steps_offset"] = 1
|
| 124 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 125 |
+
|
| 126 |
+
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
|
| 127 |
+
deprecation_message = (
|
| 128 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 129 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 130 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 131 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 132 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 133 |
+
)
|
| 134 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 135 |
+
new_config = dict(scheduler.config)
|
| 136 |
+
new_config["clip_sample"] = False
|
| 137 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 138 |
+
|
| 139 |
+
if safety_checker is None and requires_safety_checker:
|
| 140 |
+
logger.warning(
|
| 141 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 142 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 143 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 144 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 145 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 146 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if safety_checker is not None and feature_extractor is None:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 152 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
is_unet_version_less_0_9_0 = (
|
| 156 |
+
unet is not None
|
| 157 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 158 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 159 |
+
)
|
| 160 |
+
is_unet_sample_size_less_64 = (
|
| 161 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 162 |
+
)
|
| 163 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 164 |
+
deprecation_message = (
|
| 165 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 166 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 167 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 168 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 169 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 170 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 171 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 172 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 173 |
+
" the `unet/config.json` file"
|
| 174 |
+
)
|
| 175 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 176 |
+
new_config = dict(unet.config)
|
| 177 |
+
new_config["sample_size"] = 64
|
| 178 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 179 |
+
|
| 180 |
+
self.register_modules(
|
| 181 |
+
vae=vae,
|
| 182 |
+
text_encoder=text_encoder,
|
| 183 |
+
tokenizer=tokenizer,
|
| 184 |
+
unet=unet,
|
| 185 |
+
scheduler=scheduler,
|
| 186 |
+
safety_checker=safety_checker,
|
| 187 |
+
feature_extractor=feature_extractor,
|
| 188 |
+
)
|
| 189 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 190 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 191 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 192 |
+
|
| 193 |
+
def _encode_prompt(
|
| 194 |
+
self,
|
| 195 |
+
prompt,
|
| 196 |
+
device,
|
| 197 |
+
num_images_per_prompt,
|
| 198 |
+
do_classifier_free_guidance,
|
| 199 |
+
negative_prompt=None,
|
| 200 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 201 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 202 |
+
lora_scale: Optional[float] = None,
|
| 203 |
+
):
|
| 204 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 205 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 206 |
+
|
| 207 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 208 |
+
prompt=prompt,
|
| 209 |
+
device=device,
|
| 210 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 211 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 212 |
+
negative_prompt=negative_prompt,
|
| 213 |
+
prompt_embeds=prompt_embeds,
|
| 214 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 215 |
+
lora_scale=lora_scale,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# concatenate for backwards comp
|
| 219 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 220 |
+
|
| 221 |
+
return prompt_embeds
|
| 222 |
+
|
| 223 |
+
def encode_prompt(
|
| 224 |
+
self,
|
| 225 |
+
prompt,
|
| 226 |
+
device,
|
| 227 |
+
num_images_per_prompt,
|
| 228 |
+
do_classifier_free_guidance,
|
| 229 |
+
negative_prompt=None,
|
| 230 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 231 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 232 |
+
lora_scale: Optional[float] = None,
|
| 233 |
+
):
|
| 234 |
+
r"""
|
| 235 |
+
Encodes the prompt into text encoder hidden states.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 239 |
+
prompt to be encoded
|
| 240 |
+
device: (`torch.device`):
|
| 241 |
+
torch device
|
| 242 |
+
num_images_per_prompt (`int`):
|
| 243 |
+
number of images that should be generated per prompt
|
| 244 |
+
do_classifier_free_guidance (`bool`):
|
| 245 |
+
whether to use classifier free guidance or not
|
| 246 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 247 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 248 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 249 |
+
less than `1`).
|
| 250 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 251 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 252 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 253 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 254 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 255 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 256 |
+
argument.
|
| 257 |
+
lora_scale (`float`, *optional*):
|
| 258 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 259 |
+
"""
|
| 260 |
+
# set lora scale so that monkey patched LoRA
|
| 261 |
+
# function of text encoder can correctly access it
|
| 262 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 263 |
+
self._lora_scale = lora_scale
|
| 264 |
+
|
| 265 |
+
# dynamically adjust the LoRA scale
|
| 266 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 267 |
+
|
| 268 |
+
if prompt is not None and isinstance(prompt, str):
|
| 269 |
+
batch_size = 1
|
| 270 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 271 |
+
batch_size = len(prompt)
|
| 272 |
+
else:
|
| 273 |
+
batch_size = prompt_embeds.shape[0]
|
| 274 |
+
|
| 275 |
+
if prompt_embeds is None:
|
| 276 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 277 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 278 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 279 |
+
|
| 280 |
+
text_inputs = self.tokenizer(
|
| 281 |
+
prompt,
|
| 282 |
+
padding="max_length",
|
| 283 |
+
max_length=self.tokenizer.model_max_length,
|
| 284 |
+
truncation=True,
|
| 285 |
+
return_tensors="pt",
|
| 286 |
+
)
|
| 287 |
+
text_input_ids = text_inputs.input_ids
|
| 288 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 289 |
+
|
| 290 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 291 |
+
text_input_ids, untruncated_ids
|
| 292 |
+
):
|
| 293 |
+
removed_text = self.tokenizer.batch_decode(
|
| 294 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 295 |
+
)
|
| 296 |
+
logger.warning(
|
| 297 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 298 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 302 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 303 |
+
else:
|
| 304 |
+
attention_mask = None
|
| 305 |
+
|
| 306 |
+
prompt_embeds = self.text_encoder(
|
| 307 |
+
text_input_ids.to(device),
|
| 308 |
+
attention_mask=attention_mask,
|
| 309 |
+
)
|
| 310 |
+
prompt_embeds = prompt_embeds[0]
|
| 311 |
+
|
| 312 |
+
if self.text_encoder is not None:
|
| 313 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 314 |
+
elif self.unet is not None:
|
| 315 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 316 |
+
else:
|
| 317 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 318 |
+
|
| 319 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 320 |
+
|
| 321 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 322 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 323 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 324 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 325 |
+
|
| 326 |
+
# get unconditional embeddings for classifier free guidance
|
| 327 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 328 |
+
uncond_tokens: List[str]
|
| 329 |
+
if negative_prompt is None:
|
| 330 |
+
uncond_tokens = [""] * batch_size
|
| 331 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 332 |
+
raise TypeError(
|
| 333 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 334 |
+
f" {type(prompt)}."
|
| 335 |
+
)
|
| 336 |
+
elif isinstance(negative_prompt, str):
|
| 337 |
+
uncond_tokens = [negative_prompt]
|
| 338 |
+
elif batch_size != len(negative_prompt):
|
| 339 |
+
raise ValueError(
|
| 340 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 341 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 342 |
+
" the batch size of `prompt`."
|
| 343 |
+
)
|
| 344 |
+
else:
|
| 345 |
+
uncond_tokens = negative_prompt
|
| 346 |
+
|
| 347 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 348 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 349 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 350 |
+
|
| 351 |
+
max_length = prompt_embeds.shape[1]
|
| 352 |
+
uncond_input = self.tokenizer(
|
| 353 |
+
uncond_tokens,
|
| 354 |
+
padding="max_length",
|
| 355 |
+
max_length=max_length,
|
| 356 |
+
truncation=True,
|
| 357 |
+
return_tensors="pt",
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 361 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 362 |
+
else:
|
| 363 |
+
attention_mask = None
|
| 364 |
+
|
| 365 |
+
negative_prompt_embeds = self.text_encoder(
|
| 366 |
+
uncond_input.input_ids.to(device),
|
| 367 |
+
attention_mask=attention_mask,
|
| 368 |
+
)
|
| 369 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 370 |
+
|
| 371 |
+
if do_classifier_free_guidance:
|
| 372 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 373 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 374 |
+
|
| 375 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 376 |
+
|
| 377 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 378 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 379 |
+
|
| 380 |
+
return prompt_embeds, negative_prompt_embeds
|
| 381 |
+
|
| 382 |
+
def run_safety_checker(self, image, device, dtype):
|
| 383 |
+
if self.safety_checker is None:
|
| 384 |
+
has_nsfw_concept = None
|
| 385 |
+
else:
|
| 386 |
+
if torch.is_tensor(image):
|
| 387 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 388 |
+
else:
|
| 389 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 390 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 391 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 392 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 393 |
+
)
|
| 394 |
+
return image, has_nsfw_concept
|
| 395 |
+
|
| 396 |
+
def decode_latents(self, latents):
|
| 397 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 398 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 399 |
+
|
| 400 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 401 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 402 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 403 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 404 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 405 |
+
return image
|
| 406 |
+
|
| 407 |
+
def merge_dW_to_unet(pipe, dW_dict, alpha=1.0):
|
| 408 |
+
_tmp_sd = pipe.unet.state_dict()
|
| 409 |
+
for key in dW_dict.keys():
|
| 410 |
+
_tmp_sd[key] += dW_dict[key] * alpha
|
| 411 |
+
pipe.unet.load_state_dict(_tmp_sd, strict=False)
|
| 412 |
+
return pipe
|
| 413 |
+
|
| 414 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 415 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 416 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 417 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 418 |
+
# and should be between [0, 1]
|
| 419 |
+
|
| 420 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 421 |
+
extra_step_kwargs = {}
|
| 422 |
+
if accepts_eta:
|
| 423 |
+
extra_step_kwargs["eta"] = eta
|
| 424 |
+
|
| 425 |
+
# check if the scheduler accepts generator
|
| 426 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 427 |
+
if accepts_generator:
|
| 428 |
+
extra_step_kwargs["generator"] = generator
|
| 429 |
+
return extra_step_kwargs
|
| 430 |
+
|
| 431 |
+
def check_inputs(
|
| 432 |
+
self,
|
| 433 |
+
prompt,
|
| 434 |
+
height,
|
| 435 |
+
width,
|
| 436 |
+
callback_steps,
|
| 437 |
+
negative_prompt=None,
|
| 438 |
+
prompt_embeds=None,
|
| 439 |
+
negative_prompt_embeds=None,
|
| 440 |
+
):
|
| 441 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 442 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 443 |
+
|
| 444 |
+
if (callback_steps is None) or (
|
| 445 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 446 |
+
):
|
| 447 |
+
raise ValueError(
|
| 448 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 449 |
+
f" {type(callback_steps)}."
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
if prompt is not None and prompt_embeds is not None:
|
| 453 |
+
raise ValueError(
|
| 454 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 455 |
+
" only forward one of the two."
|
| 456 |
+
)
|
| 457 |
+
elif prompt is None and prompt_embeds is None:
|
| 458 |
+
raise ValueError(
|
| 459 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 460 |
+
)
|
| 461 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 462 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 463 |
+
|
| 464 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 465 |
+
raise ValueError(
|
| 466 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 467 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 471 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 472 |
+
raise ValueError(
|
| 473 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 474 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 475 |
+
f" {negative_prompt_embeds.shape}."
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 479 |
+
shape = (
|
| 480 |
+
batch_size,
|
| 481 |
+
num_channels_latents,
|
| 482 |
+
int(height) // self.vae_scale_factor,
|
| 483 |
+
int(width) // self.vae_scale_factor,
|
| 484 |
+
)
|
| 485 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 486 |
+
raise ValueError(
|
| 487 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 488 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
if latents is None:
|
| 492 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 493 |
+
else:
|
| 494 |
+
latents = latents.to(device)
|
| 495 |
+
|
| 496 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 497 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 498 |
+
return latents
|
| 499 |
+
|
| 500 |
+
@torch.no_grad()
|
| 501 |
+
def __call__(
|
| 502 |
+
self,
|
| 503 |
+
prompt: Union[str, List[str]] = None,
|
| 504 |
+
height: Optional[int] = None,
|
| 505 |
+
width: Optional[int] = None,
|
| 506 |
+
num_inference_steps: int = 50,
|
| 507 |
+
guidance_scale: float = 7.5,
|
| 508 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 509 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 510 |
+
eta: float = 0.0,
|
| 511 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 512 |
+
latents: Optional[torch.Tensor] = None,
|
| 513 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 514 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 515 |
+
output_type: Optional[str] = "pil",
|
| 516 |
+
return_dict: bool = True,
|
| 517 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 518 |
+
callback_steps: int = 1,
|
| 519 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 520 |
+
guidance_rescale: float = 0.0,
|
| 521 |
+
):
|
| 522 |
+
r"""
|
| 523 |
+
The call function to the pipeline for generation.
|
| 524 |
+
|
| 525 |
+
Args:
|
| 526 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 527 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 528 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 529 |
+
The height in pixels of the generated image.
|
| 530 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 531 |
+
The width in pixels of the generated image.
|
| 532 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 533 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 534 |
+
expense of slower inference.
|
| 535 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 536 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 537 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 538 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 539 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 540 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 541 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 542 |
+
The number of images to generate per prompt.
|
| 543 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 544 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 545 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 546 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 547 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 548 |
+
generation deterministic.
|
| 549 |
+
latents (`torch.Tensor`, *optional*):
|
| 550 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 551 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 552 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 553 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 554 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 555 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 556 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 557 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 558 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 559 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 560 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 561 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 562 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 563 |
+
plain tuple.
|
| 564 |
+
callback (`Callable`, *optional*):
|
| 565 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 566 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 567 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 568 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 569 |
+
every step.
|
| 570 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 571 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 572 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 573 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 574 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 575 |
+
Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
|
| 576 |
+
using zero terminal SNR.
|
| 577 |
+
|
| 578 |
+
Examples:
|
| 579 |
+
|
| 580 |
+
Returns:
|
| 581 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 582 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 583 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 584 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 585 |
+
"not-safe-for-work" (nsfw) content.
|
| 586 |
+
"""
|
| 587 |
+
# 0. Default height and width to unet
|
| 588 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 589 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 590 |
+
|
| 591 |
+
# 1. Check inputs. Raise error if not correct
|
| 592 |
+
self.check_inputs(
|
| 593 |
+
prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# 2. Define call parameters
|
| 597 |
+
if prompt is not None and isinstance(prompt, str):
|
| 598 |
+
batch_size = 1
|
| 599 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 600 |
+
batch_size = len(prompt)
|
| 601 |
+
else:
|
| 602 |
+
batch_size = prompt_embeds.shape[0]
|
| 603 |
+
|
| 604 |
+
device = self._execution_device
|
| 605 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 606 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 607 |
+
# corresponds to doing no classifier free guidance.
|
| 608 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 609 |
+
|
| 610 |
+
# 3. Encode input prompt
|
| 611 |
+
text_encoder_lora_scale = (
|
| 612 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 613 |
+
)
|
| 614 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 615 |
+
prompt,
|
| 616 |
+
device,
|
| 617 |
+
num_images_per_prompt,
|
| 618 |
+
do_classifier_free_guidance,
|
| 619 |
+
negative_prompt,
|
| 620 |
+
prompt_embeds=prompt_embeds,
|
| 621 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 622 |
+
lora_scale=text_encoder_lora_scale,
|
| 623 |
+
)
|
| 624 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 625 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 626 |
+
# to avoid doing two forward passes
|
| 627 |
+
if do_classifier_free_guidance:
|
| 628 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 629 |
+
|
| 630 |
+
# 4. Prepare timesteps
|
| 631 |
+
timesteps = [(1.0 - i / num_inference_steps) * 1000.0 for i in range(num_inference_steps)]
|
| 632 |
+
|
| 633 |
+
# 5. Prepare latent variables
|
| 634 |
+
num_channels_latents = self.unet.config.in_channels
|
| 635 |
+
latents = self.prepare_latents(
|
| 636 |
+
batch_size * num_images_per_prompt,
|
| 637 |
+
num_channels_latents,
|
| 638 |
+
height,
|
| 639 |
+
width,
|
| 640 |
+
prompt_embeds.dtype,
|
| 641 |
+
device,
|
| 642 |
+
generator,
|
| 643 |
+
latents,
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 647 |
+
dt = 1.0 / num_inference_steps
|
| 648 |
+
|
| 649 |
+
# 7. Denoising loop of Euler discretization from t = 0 to t = 1
|
| 650 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 651 |
+
for i, t in enumerate(timesteps):
|
| 652 |
+
# expand the latents if we are doing classifier free guidance
|
| 653 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 654 |
+
|
| 655 |
+
vec_t = torch.ones((latent_model_input.shape[0],), device=latents.device) * t
|
| 656 |
+
|
| 657 |
+
v_pred = self.unet(latent_model_input, vec_t, encoder_hidden_states=prompt_embeds).sample
|
| 658 |
+
|
| 659 |
+
# perform guidance
|
| 660 |
+
if do_classifier_free_guidance:
|
| 661 |
+
v_pred_neg, v_pred_text = v_pred.chunk(2)
|
| 662 |
+
v_pred = v_pred_neg + guidance_scale * (v_pred_text - v_pred_neg)
|
| 663 |
+
|
| 664 |
+
latents = latents + dt * v_pred
|
| 665 |
+
|
| 666 |
+
# call the callback, if provided
|
| 667 |
+
if i == len(timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
|
| 668 |
+
progress_bar.update()
|
| 669 |
+
if callback is not None and i % callback_steps == 0:
|
| 670 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 671 |
+
callback(step_idx, t, latents)
|
| 672 |
+
|
| 673 |
+
if not output_type == "latent":
|
| 674 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 675 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 676 |
+
else:
|
| 677 |
+
image = latents
|
| 678 |
+
has_nsfw_concept = None
|
| 679 |
+
|
| 680 |
+
if has_nsfw_concept is None:
|
| 681 |
+
do_denormalize = [True] * image.shape[0]
|
| 682 |
+
else:
|
| 683 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 684 |
+
|
| 685 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 686 |
+
|
| 687 |
+
# Offload all models
|
| 688 |
+
self.maybe_free_model_hooks()
|
| 689 |
+
|
| 690 |
+
if not return_dict:
|
| 691 |
+
return (image, has_nsfw_concept)
|
| 692 |
+
|
| 693 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/interpolate_stable_diffusion.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import time
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from typing import Callable, List, Optional, Union
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 9 |
+
|
| 10 |
+
from diffusers.configuration_utils import FrozenDict
|
| 11 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 12 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 14 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 15 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 16 |
+
from diffusers.utils import deprecate, logging
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
|
| 23 |
+
"""helper function to spherically interpolate two arrays v1 v2"""
|
| 24 |
+
|
| 25 |
+
if not isinstance(v0, np.ndarray):
|
| 26 |
+
inputs_are_torch = True
|
| 27 |
+
input_device = v0.device
|
| 28 |
+
v0 = v0.cpu().numpy()
|
| 29 |
+
v1 = v1.cpu().numpy()
|
| 30 |
+
|
| 31 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 32 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 33 |
+
v2 = (1 - t) * v0 + t * v1
|
| 34 |
+
else:
|
| 35 |
+
theta_0 = np.arccos(dot)
|
| 36 |
+
sin_theta_0 = np.sin(theta_0)
|
| 37 |
+
theta_t = theta_0 * t
|
| 38 |
+
sin_theta_t = np.sin(theta_t)
|
| 39 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 40 |
+
s1 = sin_theta_t / sin_theta_0
|
| 41 |
+
v2 = s0 * v0 + s1 * v1
|
| 42 |
+
|
| 43 |
+
if inputs_are_torch:
|
| 44 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 45 |
+
|
| 46 |
+
return v2
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 50 |
+
r"""
|
| 51 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 52 |
+
|
| 53 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 54 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
vae ([`AutoencoderKL`]):
|
| 58 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 59 |
+
text_encoder ([`CLIPTextModel`]):
|
| 60 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 61 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 62 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 63 |
+
tokenizer (`CLIPTokenizer`):
|
| 64 |
+
Tokenizer of class
|
| 65 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 66 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 67 |
+
scheduler ([`SchedulerMixin`]):
|
| 68 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 69 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 70 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 71 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 72 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 73 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 74 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(
|
| 78 |
+
self,
|
| 79 |
+
vae: AutoencoderKL,
|
| 80 |
+
text_encoder: CLIPTextModel,
|
| 81 |
+
tokenizer: CLIPTokenizer,
|
| 82 |
+
unet: UNet2DConditionModel,
|
| 83 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 84 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 85 |
+
feature_extractor: CLIPImageProcessor,
|
| 86 |
+
):
|
| 87 |
+
super().__init__()
|
| 88 |
+
|
| 89 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 90 |
+
deprecation_message = (
|
| 91 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 92 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 93 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 94 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 95 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 96 |
+
" file"
|
| 97 |
+
)
|
| 98 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 99 |
+
new_config = dict(scheduler.config)
|
| 100 |
+
new_config["steps_offset"] = 1
|
| 101 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 102 |
+
|
| 103 |
+
if safety_checker is None:
|
| 104 |
+
logger.warning(
|
| 105 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 106 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 107 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 108 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 109 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 110 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
self.register_modules(
|
| 114 |
+
vae=vae,
|
| 115 |
+
text_encoder=text_encoder,
|
| 116 |
+
tokenizer=tokenizer,
|
| 117 |
+
unet=unet,
|
| 118 |
+
scheduler=scheduler,
|
| 119 |
+
safety_checker=safety_checker,
|
| 120 |
+
feature_extractor=feature_extractor,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
@torch.no_grad()
|
| 124 |
+
def __call__(
|
| 125 |
+
self,
|
| 126 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 127 |
+
height: int = 512,
|
| 128 |
+
width: int = 512,
|
| 129 |
+
num_inference_steps: int = 50,
|
| 130 |
+
guidance_scale: float = 7.5,
|
| 131 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 132 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 133 |
+
eta: float = 0.0,
|
| 134 |
+
generator: Optional[torch.Generator] = None,
|
| 135 |
+
latents: Optional[torch.Tensor] = None,
|
| 136 |
+
output_type: Optional[str] = "pil",
|
| 137 |
+
return_dict: bool = True,
|
| 138 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 139 |
+
callback_steps: int = 1,
|
| 140 |
+
text_embeddings: Optional[torch.Tensor] = None,
|
| 141 |
+
**kwargs,
|
| 142 |
+
):
|
| 143 |
+
r"""
|
| 144 |
+
Function invoked when calling the pipeline for generation.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
prompt (`str` or `List[str]`, *optional*, defaults to `None`):
|
| 148 |
+
The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
|
| 149 |
+
height (`int`, *optional*, defaults to 512):
|
| 150 |
+
The height in pixels of the generated image.
|
| 151 |
+
width (`int`, *optional*, defaults to 512):
|
| 152 |
+
The width in pixels of the generated image.
|
| 153 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 154 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 155 |
+
expense of slower inference.
|
| 156 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 157 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 158 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 159 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 160 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 161 |
+
usually at the expense of lower image quality.
|
| 162 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 163 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 164 |
+
if `guidance_scale` is less than `1`).
|
| 165 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 166 |
+
The number of images to generate per prompt.
|
| 167 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 168 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 169 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 170 |
+
generator (`torch.Generator`, *optional*):
|
| 171 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 172 |
+
deterministic.
|
| 173 |
+
latents (`torch.Tensor`, *optional*):
|
| 174 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 175 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 176 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 177 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 178 |
+
The output format of the generate image. Choose between
|
| 179 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 180 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 181 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 182 |
+
plain tuple.
|
| 183 |
+
callback (`Callable`, *optional*):
|
| 184 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 185 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 186 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 187 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 188 |
+
called at every step.
|
| 189 |
+
text_embeddings (`torch.Tensor`, *optional*, defaults to `None`):
|
| 190 |
+
Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
|
| 191 |
+
`prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
|
| 192 |
+
the supplied `prompt`.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 196 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 197 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 198 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 199 |
+
(nsfw) content, according to the `safety_checker`.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 203 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 204 |
+
|
| 205 |
+
if (callback_steps is None) or (
|
| 206 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 207 |
+
):
|
| 208 |
+
raise ValueError(
|
| 209 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 210 |
+
f" {type(callback_steps)}."
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
if text_embeddings is None:
|
| 214 |
+
if isinstance(prompt, str):
|
| 215 |
+
batch_size = 1
|
| 216 |
+
elif isinstance(prompt, list):
|
| 217 |
+
batch_size = len(prompt)
|
| 218 |
+
else:
|
| 219 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 220 |
+
|
| 221 |
+
# get prompt text embeddings
|
| 222 |
+
text_inputs = self.tokenizer(
|
| 223 |
+
prompt,
|
| 224 |
+
padding="max_length",
|
| 225 |
+
max_length=self.tokenizer.model_max_length,
|
| 226 |
+
return_tensors="pt",
|
| 227 |
+
)
|
| 228 |
+
text_input_ids = text_inputs.input_ids
|
| 229 |
+
|
| 230 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 231 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 232 |
+
print(
|
| 233 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 234 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 235 |
+
)
|
| 236 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 237 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 238 |
+
else:
|
| 239 |
+
batch_size = text_embeddings.shape[0]
|
| 240 |
+
|
| 241 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 242 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 243 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 244 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 245 |
+
|
| 246 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 247 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 248 |
+
# corresponds to doing no classifier free guidance.
|
| 249 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 250 |
+
# get unconditional embeddings for classifier free guidance
|
| 251 |
+
if do_classifier_free_guidance:
|
| 252 |
+
uncond_tokens: List[str]
|
| 253 |
+
if negative_prompt is None:
|
| 254 |
+
uncond_tokens = [""] * batch_size
|
| 255 |
+
elif type(prompt) is not type(negative_prompt):
|
| 256 |
+
raise TypeError(
|
| 257 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 258 |
+
f" {type(prompt)}."
|
| 259 |
+
)
|
| 260 |
+
elif isinstance(negative_prompt, str):
|
| 261 |
+
uncond_tokens = [negative_prompt]
|
| 262 |
+
elif batch_size != len(negative_prompt):
|
| 263 |
+
raise ValueError(
|
| 264 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 265 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 266 |
+
" the batch size of `prompt`."
|
| 267 |
+
)
|
| 268 |
+
else:
|
| 269 |
+
uncond_tokens = negative_prompt
|
| 270 |
+
|
| 271 |
+
max_length = self.tokenizer.model_max_length
|
| 272 |
+
uncond_input = self.tokenizer(
|
| 273 |
+
uncond_tokens,
|
| 274 |
+
padding="max_length",
|
| 275 |
+
max_length=max_length,
|
| 276 |
+
truncation=True,
|
| 277 |
+
return_tensors="pt",
|
| 278 |
+
)
|
| 279 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 280 |
+
|
| 281 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 282 |
+
seq_len = uncond_embeddings.shape[1]
|
| 283 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 284 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 285 |
+
|
| 286 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 287 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 288 |
+
# to avoid doing two forward passes
|
| 289 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 290 |
+
|
| 291 |
+
# get the initial random noise unless the user supplied it
|
| 292 |
+
|
| 293 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 294 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 295 |
+
# However this currently doesn't work in `mps`.
|
| 296 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 297 |
+
latents_dtype = text_embeddings.dtype
|
| 298 |
+
if latents is None:
|
| 299 |
+
if self.device.type == "mps":
|
| 300 |
+
# randn does not work reproducibly on mps
|
| 301 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 302 |
+
self.device
|
| 303 |
+
)
|
| 304 |
+
else:
|
| 305 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 306 |
+
else:
|
| 307 |
+
if latents.shape != latents_shape:
|
| 308 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 309 |
+
latents = latents.to(self.device)
|
| 310 |
+
|
| 311 |
+
# set timesteps
|
| 312 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 313 |
+
|
| 314 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 315 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 316 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 317 |
+
|
| 318 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 319 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 320 |
+
|
| 321 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 322 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 323 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 324 |
+
# and should be between [0, 1]
|
| 325 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 326 |
+
extra_step_kwargs = {}
|
| 327 |
+
if accepts_eta:
|
| 328 |
+
extra_step_kwargs["eta"] = eta
|
| 329 |
+
|
| 330 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 331 |
+
# expand the latents if we are doing classifier free guidance
|
| 332 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 333 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 334 |
+
|
| 335 |
+
# predict the noise residual
|
| 336 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 337 |
+
|
| 338 |
+
# perform guidance
|
| 339 |
+
if do_classifier_free_guidance:
|
| 340 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 341 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 342 |
+
|
| 343 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 344 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 345 |
+
|
| 346 |
+
# call the callback, if provided
|
| 347 |
+
if callback is not None and i % callback_steps == 0:
|
| 348 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 349 |
+
callback(step_idx, t, latents)
|
| 350 |
+
|
| 351 |
+
latents = 1 / 0.18215 * latents
|
| 352 |
+
image = self.vae.decode(latents).sample
|
| 353 |
+
|
| 354 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 355 |
+
|
| 356 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 357 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 358 |
+
|
| 359 |
+
if self.safety_checker is not None:
|
| 360 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 361 |
+
self.device
|
| 362 |
+
)
|
| 363 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 364 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 365 |
+
)
|
| 366 |
+
else:
|
| 367 |
+
has_nsfw_concept = None
|
| 368 |
+
|
| 369 |
+
if output_type == "pil":
|
| 370 |
+
image = self.numpy_to_pil(image)
|
| 371 |
+
|
| 372 |
+
if not return_dict:
|
| 373 |
+
return (image, has_nsfw_concept)
|
| 374 |
+
|
| 375 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 376 |
+
|
| 377 |
+
def embed_text(self, text):
|
| 378 |
+
"""takes in text and turns it into text embeddings"""
|
| 379 |
+
text_input = self.tokenizer(
|
| 380 |
+
text,
|
| 381 |
+
padding="max_length",
|
| 382 |
+
max_length=self.tokenizer.model_max_length,
|
| 383 |
+
truncation=True,
|
| 384 |
+
return_tensors="pt",
|
| 385 |
+
)
|
| 386 |
+
with torch.no_grad():
|
| 387 |
+
embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 388 |
+
return embed
|
| 389 |
+
|
| 390 |
+
def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
|
| 391 |
+
"""Takes in random seed and returns corresponding noise vector"""
|
| 392 |
+
return torch.randn(
|
| 393 |
+
(1, self.unet.config.in_channels, height // 8, width // 8),
|
| 394 |
+
generator=torch.Generator(device=self.device).manual_seed(seed),
|
| 395 |
+
device=self.device,
|
| 396 |
+
dtype=dtype,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
def walk(
|
| 400 |
+
self,
|
| 401 |
+
prompts: List[str],
|
| 402 |
+
seeds: List[int],
|
| 403 |
+
num_interpolation_steps: Optional[int] = 6,
|
| 404 |
+
output_dir: Optional[str] = "./dreams",
|
| 405 |
+
name: Optional[str] = None,
|
| 406 |
+
batch_size: Optional[int] = 1,
|
| 407 |
+
height: Optional[int] = 512,
|
| 408 |
+
width: Optional[int] = 512,
|
| 409 |
+
guidance_scale: Optional[float] = 7.5,
|
| 410 |
+
num_inference_steps: Optional[int] = 50,
|
| 411 |
+
eta: Optional[float] = 0.0,
|
| 412 |
+
) -> List[str]:
|
| 413 |
+
"""
|
| 414 |
+
Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
prompts (`List[str]`):
|
| 418 |
+
List of prompts to generate images for.
|
| 419 |
+
seeds (`List[int]`):
|
| 420 |
+
List of seeds corresponding to provided prompts. Must be the same length as prompts.
|
| 421 |
+
num_interpolation_steps (`int`, *optional*, defaults to 6):
|
| 422 |
+
Number of interpolation steps to take between prompts.
|
| 423 |
+
output_dir (`str`, *optional*, defaults to `./dreams`):
|
| 424 |
+
Directory to save the generated images to.
|
| 425 |
+
name (`str`, *optional*, defaults to `None`):
|
| 426 |
+
Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
|
| 427 |
+
be the current time.
|
| 428 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 429 |
+
Number of images to generate at once.
|
| 430 |
+
height (`int`, *optional*, defaults to 512):
|
| 431 |
+
Height of the generated images.
|
| 432 |
+
width (`int`, *optional*, defaults to 512):
|
| 433 |
+
Width of the generated images.
|
| 434 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 435 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 436 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 437 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 438 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 439 |
+
usually at the expense of lower image quality.
|
| 440 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 441 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 442 |
+
expense of slower inference.
|
| 443 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 444 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 445 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 446 |
+
|
| 447 |
+
Returns:
|
| 448 |
+
`List[str]`: List of paths to the generated images.
|
| 449 |
+
"""
|
| 450 |
+
if not len(prompts) == len(seeds):
|
| 451 |
+
raise ValueError(
|
| 452 |
+
f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
name = name or time.strftime("%Y%m%d-%H%M%S")
|
| 456 |
+
save_path = Path(output_dir) / name
|
| 457 |
+
save_path.mkdir(exist_ok=True, parents=True)
|
| 458 |
+
|
| 459 |
+
frame_idx = 0
|
| 460 |
+
frame_filepaths = []
|
| 461 |
+
for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
|
| 462 |
+
# Embed Text
|
| 463 |
+
embed_a = self.embed_text(prompt_a)
|
| 464 |
+
embed_b = self.embed_text(prompt_b)
|
| 465 |
+
|
| 466 |
+
# Get Noise
|
| 467 |
+
noise_dtype = embed_a.dtype
|
| 468 |
+
noise_a = self.get_noise(seed_a, noise_dtype, height, width)
|
| 469 |
+
noise_b = self.get_noise(seed_b, noise_dtype, height, width)
|
| 470 |
+
|
| 471 |
+
noise_batch, embeds_batch = None, None
|
| 472 |
+
T = np.linspace(0.0, 1.0, num_interpolation_steps)
|
| 473 |
+
for i, t in enumerate(T):
|
| 474 |
+
noise = slerp(float(t), noise_a, noise_b)
|
| 475 |
+
embed = torch.lerp(embed_a, embed_b, t)
|
| 476 |
+
|
| 477 |
+
noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
|
| 478 |
+
embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
|
| 479 |
+
|
| 480 |
+
batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
|
| 481 |
+
if batch_is_ready:
|
| 482 |
+
outputs = self(
|
| 483 |
+
latents=noise_batch,
|
| 484 |
+
text_embeddings=embeds_batch,
|
| 485 |
+
height=height,
|
| 486 |
+
width=width,
|
| 487 |
+
guidance_scale=guidance_scale,
|
| 488 |
+
eta=eta,
|
| 489 |
+
num_inference_steps=num_inference_steps,
|
| 490 |
+
)
|
| 491 |
+
noise_batch, embeds_batch = None, None
|
| 492 |
+
|
| 493 |
+
for image in outputs["images"]:
|
| 494 |
+
frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
|
| 495 |
+
image.save(frame_filepath)
|
| 496 |
+
frame_filepaths.append(frame_filepath)
|
| 497 |
+
frame_idx += 1
|
| 498 |
+
return frame_filepaths
|
v0.36.0/ip_adapter_face_id.py
ADDED
|
@@ -0,0 +1,1129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from packaging import version
|
| 22 |
+
from safetensors import safe_open
|
| 23 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 24 |
+
|
| 25 |
+
from diffusers.configuration_utils import FrozenDict
|
| 26 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 27 |
+
from diffusers.loaders import (
|
| 28 |
+
FromSingleFileMixin,
|
| 29 |
+
IPAdapterMixin,
|
| 30 |
+
StableDiffusionLoraLoaderMixin,
|
| 31 |
+
TextualInversionLoaderMixin,
|
| 32 |
+
)
|
| 33 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 34 |
+
from diffusers.models.attention_processor import (
|
| 35 |
+
AttnProcessor,
|
| 36 |
+
AttnProcessor2_0,
|
| 37 |
+
IPAdapterAttnProcessor,
|
| 38 |
+
IPAdapterAttnProcessor2_0,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.models.embeddings import MultiIPAdapterImageProjection
|
| 41 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 42 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 43 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 44 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 45 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 46 |
+
from diffusers.utils import (
|
| 47 |
+
USE_PEFT_BACKEND,
|
| 48 |
+
_get_model_file,
|
| 49 |
+
deprecate,
|
| 50 |
+
logging,
|
| 51 |
+
scale_lora_layers,
|
| 52 |
+
unscale_lora_layers,
|
| 53 |
+
)
|
| 54 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class IPAdapterFullImageProjection(nn.Module):
|
| 61 |
+
def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1):
|
| 62 |
+
super().__init__()
|
| 63 |
+
from diffusers.models.attention import FeedForward
|
| 64 |
+
|
| 65 |
+
self.num_tokens = num_tokens
|
| 66 |
+
self.cross_attention_dim = cross_attention_dim
|
| 67 |
+
self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn="gelu")
|
| 68 |
+
self.norm = nn.LayerNorm(cross_attention_dim)
|
| 69 |
+
|
| 70 |
+
def forward(self, image_embeds: torch.Tensor):
|
| 71 |
+
x = self.ff(image_embeds)
|
| 72 |
+
x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
|
| 73 |
+
return self.norm(x)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 77 |
+
"""
|
| 78 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 79 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4
|
| 80 |
+
"""
|
| 81 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 82 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 83 |
+
# rescale the results from guidance (fixes overexposure)
|
| 84 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 85 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 86 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 87 |
+
return noise_cfg
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def retrieve_timesteps(
|
| 91 |
+
scheduler,
|
| 92 |
+
num_inference_steps: Optional[int] = None,
|
| 93 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 94 |
+
timesteps: Optional[List[int]] = None,
|
| 95 |
+
**kwargs,
|
| 96 |
+
):
|
| 97 |
+
"""
|
| 98 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 99 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
scheduler (`SchedulerMixin`):
|
| 103 |
+
The scheduler to get timesteps from.
|
| 104 |
+
num_inference_steps (`int`):
|
| 105 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 106 |
+
`timesteps` must be `None`.
|
| 107 |
+
device (`str` or `torch.device`, *optional*):
|
| 108 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 109 |
+
timesteps (`List[int]`, *optional*):
|
| 110 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 111 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 112 |
+
must be `None`.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 116 |
+
second element is the number of inference steps.
|
| 117 |
+
"""
|
| 118 |
+
if timesteps is not None:
|
| 119 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 120 |
+
if not accepts_timesteps:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 123 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 124 |
+
)
|
| 125 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 126 |
+
timesteps = scheduler.timesteps
|
| 127 |
+
num_inference_steps = len(timesteps)
|
| 128 |
+
else:
|
| 129 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 130 |
+
timesteps = scheduler.timesteps
|
| 131 |
+
return timesteps, num_inference_steps
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class IPAdapterFaceIDStableDiffusionPipeline(
|
| 135 |
+
DiffusionPipeline,
|
| 136 |
+
StableDiffusionMixin,
|
| 137 |
+
TextualInversionLoaderMixin,
|
| 138 |
+
StableDiffusionLoraLoaderMixin,
|
| 139 |
+
IPAdapterMixin,
|
| 140 |
+
FromSingleFileMixin,
|
| 141 |
+
):
|
| 142 |
+
r"""
|
| 143 |
+
Pipeline for text-to-image generation using Stable Diffusion.
|
| 144 |
+
|
| 145 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 146 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 147 |
+
|
| 148 |
+
The pipeline also inherits the following loading methods:
|
| 149 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 150 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 151 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 152 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 153 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
vae ([`AutoencoderKL`]):
|
| 157 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 158 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 159 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 160 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 161 |
+
A `CLIPTokenizer` to tokenize text.
|
| 162 |
+
unet ([`UNet2DConditionModel`]):
|
| 163 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 164 |
+
scheduler ([`SchedulerMixin`]):
|
| 165 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 166 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 167 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 168 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 169 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 170 |
+
about a model's potential harms.
|
| 171 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 172 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 176 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 177 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 178 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 179 |
+
|
| 180 |
+
def __init__(
|
| 181 |
+
self,
|
| 182 |
+
vae: AutoencoderKL,
|
| 183 |
+
text_encoder: CLIPTextModel,
|
| 184 |
+
tokenizer: CLIPTokenizer,
|
| 185 |
+
unet: UNet2DConditionModel,
|
| 186 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 187 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 188 |
+
feature_extractor: CLIPImageProcessor,
|
| 189 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 190 |
+
requires_safety_checker: bool = True,
|
| 191 |
+
):
|
| 192 |
+
super().__init__()
|
| 193 |
+
|
| 194 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 195 |
+
deprecation_message = (
|
| 196 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 197 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 198 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 199 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 200 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 201 |
+
" file"
|
| 202 |
+
)
|
| 203 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 204 |
+
new_config = dict(scheduler.config)
|
| 205 |
+
new_config["steps_offset"] = 1
|
| 206 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 207 |
+
|
| 208 |
+
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
|
| 209 |
+
deprecation_message = (
|
| 210 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 211 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 212 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 213 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 214 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 215 |
+
)
|
| 216 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 217 |
+
new_config = dict(scheduler.config)
|
| 218 |
+
new_config["clip_sample"] = False
|
| 219 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 220 |
+
|
| 221 |
+
if safety_checker is None and requires_safety_checker:
|
| 222 |
+
logger.warning(
|
| 223 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 224 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 225 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 226 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 227 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 228 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
if safety_checker is not None and feature_extractor is None:
|
| 232 |
+
raise ValueError(
|
| 233 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 234 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
is_unet_version_less_0_9_0 = (
|
| 238 |
+
unet is not None
|
| 239 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 240 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 241 |
+
)
|
| 242 |
+
is_unet_sample_size_less_64 = (
|
| 243 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 244 |
+
)
|
| 245 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 246 |
+
deprecation_message = (
|
| 247 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 248 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 249 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 250 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 251 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 252 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 253 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 254 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 255 |
+
" the `unet/config.json` file"
|
| 256 |
+
)
|
| 257 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 258 |
+
new_config = dict(unet.config)
|
| 259 |
+
new_config["sample_size"] = 64
|
| 260 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 261 |
+
|
| 262 |
+
self.register_modules(
|
| 263 |
+
vae=vae,
|
| 264 |
+
text_encoder=text_encoder,
|
| 265 |
+
tokenizer=tokenizer,
|
| 266 |
+
unet=unet,
|
| 267 |
+
scheduler=scheduler,
|
| 268 |
+
safety_checker=safety_checker,
|
| 269 |
+
feature_extractor=feature_extractor,
|
| 270 |
+
image_encoder=image_encoder,
|
| 271 |
+
)
|
| 272 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 273 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 274 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 275 |
+
|
| 276 |
+
def load_ip_adapter_face_id(self, pretrained_model_name_or_path_or_dict, weight_name, **kwargs):
|
| 277 |
+
cache_dir = kwargs.pop("cache_dir", None)
|
| 278 |
+
force_download = kwargs.pop("force_download", False)
|
| 279 |
+
proxies = kwargs.pop("proxies", None)
|
| 280 |
+
local_files_only = kwargs.pop("local_files_only", None)
|
| 281 |
+
token = kwargs.pop("token", None)
|
| 282 |
+
revision = kwargs.pop("revision", None)
|
| 283 |
+
subfolder = kwargs.pop("subfolder", None)
|
| 284 |
+
|
| 285 |
+
user_agent = {"file_type": "attn_procs_weights", "framework": "pytorch"}
|
| 286 |
+
model_file = _get_model_file(
|
| 287 |
+
pretrained_model_name_or_path_or_dict,
|
| 288 |
+
weights_name=weight_name,
|
| 289 |
+
cache_dir=cache_dir,
|
| 290 |
+
force_download=force_download,
|
| 291 |
+
proxies=proxies,
|
| 292 |
+
local_files_only=local_files_only,
|
| 293 |
+
token=token,
|
| 294 |
+
revision=revision,
|
| 295 |
+
subfolder=subfolder,
|
| 296 |
+
user_agent=user_agent,
|
| 297 |
+
)
|
| 298 |
+
if weight_name.endswith(".safetensors"):
|
| 299 |
+
state_dict = {"image_proj": {}, "ip_adapter": {}}
|
| 300 |
+
with safe_open(model_file, framework="pt", device="cpu") as f:
|
| 301 |
+
for key in f.keys():
|
| 302 |
+
if key.startswith("image_proj."):
|
| 303 |
+
state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
|
| 304 |
+
elif key.startswith("ip_adapter."):
|
| 305 |
+
state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
|
| 306 |
+
else:
|
| 307 |
+
state_dict = torch.load(model_file, map_location="cpu")
|
| 308 |
+
self._load_ip_adapter_weights(state_dict)
|
| 309 |
+
|
| 310 |
+
def convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
|
| 311 |
+
updated_state_dict = {}
|
| 312 |
+
clip_embeddings_dim_in = state_dict["proj.0.weight"].shape[1]
|
| 313 |
+
clip_embeddings_dim_out = state_dict["proj.0.weight"].shape[0]
|
| 314 |
+
multiplier = clip_embeddings_dim_out // clip_embeddings_dim_in
|
| 315 |
+
norm_layer = "norm.weight"
|
| 316 |
+
cross_attention_dim = state_dict[norm_layer].shape[0]
|
| 317 |
+
num_tokens = state_dict["proj.2.weight"].shape[0] // cross_attention_dim
|
| 318 |
+
|
| 319 |
+
image_projection = IPAdapterFullImageProjection(
|
| 320 |
+
cross_attention_dim=cross_attention_dim,
|
| 321 |
+
image_embed_dim=clip_embeddings_dim_in,
|
| 322 |
+
mult=multiplier,
|
| 323 |
+
num_tokens=num_tokens,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
for key, value in state_dict.items():
|
| 327 |
+
diffusers_name = key.replace("proj.0", "ff.net.0.proj")
|
| 328 |
+
diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
|
| 329 |
+
updated_state_dict[diffusers_name] = value
|
| 330 |
+
|
| 331 |
+
image_projection.load_state_dict(updated_state_dict)
|
| 332 |
+
return image_projection
|
| 333 |
+
|
| 334 |
+
def _load_ip_adapter_weights(self, state_dict):
|
| 335 |
+
num_image_text_embeds = 4
|
| 336 |
+
|
| 337 |
+
self.unet.encoder_hid_proj = None
|
| 338 |
+
|
| 339 |
+
# set ip-adapter cross-attention processors & load state_dict
|
| 340 |
+
attn_procs = {}
|
| 341 |
+
lora_dict = {}
|
| 342 |
+
key_id = 0
|
| 343 |
+
for name in self.unet.attn_processors.keys():
|
| 344 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
|
| 345 |
+
if name.startswith("mid_block"):
|
| 346 |
+
hidden_size = self.unet.config.block_out_channels[-1]
|
| 347 |
+
elif name.startswith("up_blocks"):
|
| 348 |
+
block_id = int(name[len("up_blocks.")])
|
| 349 |
+
hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
|
| 350 |
+
elif name.startswith("down_blocks"):
|
| 351 |
+
block_id = int(name[len("down_blocks.")])
|
| 352 |
+
hidden_size = self.unet.config.block_out_channels[block_id]
|
| 353 |
+
if cross_attention_dim is None or "motion_modules" in name:
|
| 354 |
+
attn_processor_class = (
|
| 355 |
+
AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
|
| 356 |
+
)
|
| 357 |
+
attn_procs[name] = attn_processor_class()
|
| 358 |
+
|
| 359 |
+
lora_dict.update(
|
| 360 |
+
{f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.down.weight"]}
|
| 361 |
+
)
|
| 362 |
+
lora_dict.update(
|
| 363 |
+
{f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"]}
|
| 364 |
+
)
|
| 365 |
+
lora_dict.update(
|
| 366 |
+
{f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.down.weight"]}
|
| 367 |
+
)
|
| 368 |
+
lora_dict.update(
|
| 369 |
+
{
|
| 370 |
+
f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
|
| 371 |
+
f"{key_id}.to_out_lora.down.weight"
|
| 372 |
+
]
|
| 373 |
+
}
|
| 374 |
+
)
|
| 375 |
+
lora_dict.update(
|
| 376 |
+
{f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
|
| 377 |
+
)
|
| 378 |
+
lora_dict.update(
|
| 379 |
+
{f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
|
| 380 |
+
)
|
| 381 |
+
lora_dict.update(
|
| 382 |
+
{f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
|
| 383 |
+
)
|
| 384 |
+
lora_dict.update(
|
| 385 |
+
{f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_out_lora.up.weight"]}
|
| 386 |
+
)
|
| 387 |
+
key_id += 1
|
| 388 |
+
else:
|
| 389 |
+
attn_processor_class = (
|
| 390 |
+
IPAdapterAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else IPAdapterAttnProcessor
|
| 391 |
+
)
|
| 392 |
+
attn_procs[name] = attn_processor_class(
|
| 393 |
+
hidden_size=hidden_size,
|
| 394 |
+
cross_attention_dim=cross_attention_dim,
|
| 395 |
+
scale=1.0,
|
| 396 |
+
num_tokens=num_image_text_embeds,
|
| 397 |
+
).to(dtype=self.dtype, device=self.device)
|
| 398 |
+
|
| 399 |
+
lora_dict.update(
|
| 400 |
+
{f"unet.{name}.to_k_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.down.weight"]}
|
| 401 |
+
)
|
| 402 |
+
lora_dict.update(
|
| 403 |
+
{f"unet.{name}.to_q_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"]}
|
| 404 |
+
)
|
| 405 |
+
lora_dict.update(
|
| 406 |
+
{f"unet.{name}.to_v_lora.down.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.down.weight"]}
|
| 407 |
+
)
|
| 408 |
+
lora_dict.update(
|
| 409 |
+
{
|
| 410 |
+
f"unet.{name}.to_out_lora.down.weight": state_dict["ip_adapter"][
|
| 411 |
+
f"{key_id}.to_out_lora.down.weight"
|
| 412 |
+
]
|
| 413 |
+
}
|
| 414 |
+
)
|
| 415 |
+
lora_dict.update(
|
| 416 |
+
{f"unet.{name}.to_k_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_k_lora.up.weight"]}
|
| 417 |
+
)
|
| 418 |
+
lora_dict.update(
|
| 419 |
+
{f"unet.{name}.to_q_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_q_lora.up.weight"]}
|
| 420 |
+
)
|
| 421 |
+
lora_dict.update(
|
| 422 |
+
{f"unet.{name}.to_v_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_v_lora.up.weight"]}
|
| 423 |
+
)
|
| 424 |
+
lora_dict.update(
|
| 425 |
+
{f"unet.{name}.to_out_lora.up.weight": state_dict["ip_adapter"][f"{key_id}.to_out_lora.up.weight"]}
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
value_dict = {}
|
| 429 |
+
value_dict.update({"to_k_ip.0.weight": state_dict["ip_adapter"][f"{key_id}.to_k_ip.weight"]})
|
| 430 |
+
value_dict.update({"to_v_ip.0.weight": state_dict["ip_adapter"][f"{key_id}.to_v_ip.weight"]})
|
| 431 |
+
attn_procs[name].load_state_dict(value_dict)
|
| 432 |
+
key_id += 1
|
| 433 |
+
|
| 434 |
+
self.unet.set_attn_processor(attn_procs)
|
| 435 |
+
|
| 436 |
+
self.load_lora_weights(lora_dict, adapter_name="faceid")
|
| 437 |
+
self.set_adapters(["faceid"], adapter_weights=[1.0])
|
| 438 |
+
|
| 439 |
+
# convert IP-Adapter Image Projection layers to diffusers
|
| 440 |
+
image_projection = self.convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
|
| 441 |
+
image_projection_layers = [image_projection.to(device=self.device, dtype=self.dtype)]
|
| 442 |
+
|
| 443 |
+
self.unet.encoder_hid_proj = MultiIPAdapterImageProjection(image_projection_layers)
|
| 444 |
+
self.unet.config.encoder_hid_dim_type = "ip_image_proj"
|
| 445 |
+
|
| 446 |
+
def set_ip_adapter_scale(self, scale):
|
| 447 |
+
unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
|
| 448 |
+
for attn_processor in unet.attn_processors.values():
|
| 449 |
+
if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
|
| 450 |
+
attn_processor.scale = [scale]
|
| 451 |
+
|
| 452 |
+
def _encode_prompt(
|
| 453 |
+
self,
|
| 454 |
+
prompt,
|
| 455 |
+
device,
|
| 456 |
+
num_images_per_prompt,
|
| 457 |
+
do_classifier_free_guidance,
|
| 458 |
+
negative_prompt=None,
|
| 459 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 460 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 461 |
+
lora_scale: Optional[float] = None,
|
| 462 |
+
**kwargs,
|
| 463 |
+
):
|
| 464 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 465 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 466 |
+
|
| 467 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 468 |
+
prompt=prompt,
|
| 469 |
+
device=device,
|
| 470 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 471 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 472 |
+
negative_prompt=negative_prompt,
|
| 473 |
+
prompt_embeds=prompt_embeds,
|
| 474 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 475 |
+
lora_scale=lora_scale,
|
| 476 |
+
**kwargs,
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
# concatenate for backwards comp
|
| 480 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 481 |
+
|
| 482 |
+
return prompt_embeds
|
| 483 |
+
|
| 484 |
+
def encode_prompt(
|
| 485 |
+
self,
|
| 486 |
+
prompt,
|
| 487 |
+
device,
|
| 488 |
+
num_images_per_prompt,
|
| 489 |
+
do_classifier_free_guidance,
|
| 490 |
+
negative_prompt=None,
|
| 491 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 492 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 493 |
+
lora_scale: Optional[float] = None,
|
| 494 |
+
clip_skip: Optional[int] = None,
|
| 495 |
+
):
|
| 496 |
+
r"""
|
| 497 |
+
Encodes the prompt into text encoder hidden states.
|
| 498 |
+
|
| 499 |
+
Args:
|
| 500 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 501 |
+
prompt to be encoded
|
| 502 |
+
device: (`torch.device`):
|
| 503 |
+
torch device
|
| 504 |
+
num_images_per_prompt (`int`):
|
| 505 |
+
number of images that should be generated per prompt
|
| 506 |
+
do_classifier_free_guidance (`bool`):
|
| 507 |
+
whether to use classifier free guidance or not
|
| 508 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 509 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 510 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 511 |
+
less than `1`).
|
| 512 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 513 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 514 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 515 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 516 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 517 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 518 |
+
argument.
|
| 519 |
+
lora_scale (`float`, *optional*):
|
| 520 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 521 |
+
clip_skip (`int`, *optional*):
|
| 522 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 523 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 524 |
+
"""
|
| 525 |
+
# set lora scale so that monkey patched LoRA
|
| 526 |
+
# function of text encoder can correctly access it
|
| 527 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 528 |
+
self._lora_scale = lora_scale
|
| 529 |
+
|
| 530 |
+
# dynamically adjust the LoRA scale
|
| 531 |
+
if not USE_PEFT_BACKEND:
|
| 532 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 533 |
+
else:
|
| 534 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 535 |
+
|
| 536 |
+
if prompt is not None and isinstance(prompt, str):
|
| 537 |
+
batch_size = 1
|
| 538 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 539 |
+
batch_size = len(prompt)
|
| 540 |
+
else:
|
| 541 |
+
batch_size = prompt_embeds.shape[0]
|
| 542 |
+
|
| 543 |
+
if prompt_embeds is None:
|
| 544 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 545 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 546 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 547 |
+
|
| 548 |
+
text_inputs = self.tokenizer(
|
| 549 |
+
prompt,
|
| 550 |
+
padding="max_length",
|
| 551 |
+
max_length=self.tokenizer.model_max_length,
|
| 552 |
+
truncation=True,
|
| 553 |
+
return_tensors="pt",
|
| 554 |
+
)
|
| 555 |
+
text_input_ids = text_inputs.input_ids
|
| 556 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 557 |
+
|
| 558 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 559 |
+
text_input_ids, untruncated_ids
|
| 560 |
+
):
|
| 561 |
+
removed_text = self.tokenizer.batch_decode(
|
| 562 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 563 |
+
)
|
| 564 |
+
logger.warning(
|
| 565 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 566 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 567 |
+
)
|
| 568 |
+
|
| 569 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 570 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 571 |
+
else:
|
| 572 |
+
attention_mask = None
|
| 573 |
+
|
| 574 |
+
if clip_skip is None:
|
| 575 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 576 |
+
prompt_embeds = prompt_embeds[0]
|
| 577 |
+
else:
|
| 578 |
+
prompt_embeds = self.text_encoder(
|
| 579 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 580 |
+
)
|
| 581 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 582 |
+
# all the hidden states from the encoder layers. Then index into
|
| 583 |
+
# the tuple to access the hidden states from the desired layer.
|
| 584 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 585 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 586 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 587 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 588 |
+
# layer.
|
| 589 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 590 |
+
|
| 591 |
+
if self.text_encoder is not None:
|
| 592 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 593 |
+
elif self.unet is not None:
|
| 594 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 595 |
+
else:
|
| 596 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 597 |
+
|
| 598 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 599 |
+
|
| 600 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 601 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 602 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 603 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 604 |
+
|
| 605 |
+
# get unconditional embeddings for classifier free guidance
|
| 606 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 607 |
+
uncond_tokens: List[str]
|
| 608 |
+
if negative_prompt is None:
|
| 609 |
+
uncond_tokens = [""] * batch_size
|
| 610 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 611 |
+
raise TypeError(
|
| 612 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 613 |
+
f" {type(prompt)}."
|
| 614 |
+
)
|
| 615 |
+
elif isinstance(negative_prompt, str):
|
| 616 |
+
uncond_tokens = [negative_prompt]
|
| 617 |
+
elif batch_size != len(negative_prompt):
|
| 618 |
+
raise ValueError(
|
| 619 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 620 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 621 |
+
" the batch size of `prompt`."
|
| 622 |
+
)
|
| 623 |
+
else:
|
| 624 |
+
uncond_tokens = negative_prompt
|
| 625 |
+
|
| 626 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 627 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 628 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 629 |
+
|
| 630 |
+
max_length = prompt_embeds.shape[1]
|
| 631 |
+
uncond_input = self.tokenizer(
|
| 632 |
+
uncond_tokens,
|
| 633 |
+
padding="max_length",
|
| 634 |
+
max_length=max_length,
|
| 635 |
+
truncation=True,
|
| 636 |
+
return_tensors="pt",
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 640 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 641 |
+
else:
|
| 642 |
+
attention_mask = None
|
| 643 |
+
|
| 644 |
+
negative_prompt_embeds = self.text_encoder(
|
| 645 |
+
uncond_input.input_ids.to(device),
|
| 646 |
+
attention_mask=attention_mask,
|
| 647 |
+
)
|
| 648 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 649 |
+
|
| 650 |
+
if do_classifier_free_guidance:
|
| 651 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 652 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 653 |
+
|
| 654 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 655 |
+
|
| 656 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 657 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 658 |
+
|
| 659 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 660 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 661 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 662 |
+
|
| 663 |
+
return prompt_embeds, negative_prompt_embeds
|
| 664 |
+
|
| 665 |
+
def run_safety_checker(self, image, device, dtype):
|
| 666 |
+
if self.safety_checker is None:
|
| 667 |
+
has_nsfw_concept = None
|
| 668 |
+
else:
|
| 669 |
+
if torch.is_tensor(image):
|
| 670 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 671 |
+
else:
|
| 672 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 673 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 674 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 675 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 676 |
+
)
|
| 677 |
+
return image, has_nsfw_concept
|
| 678 |
+
|
| 679 |
+
def decode_latents(self, latents):
|
| 680 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 681 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 682 |
+
|
| 683 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 684 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 685 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 686 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 687 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 688 |
+
return image
|
| 689 |
+
|
| 690 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 691 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 692 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 693 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 694 |
+
# and should be between [0, 1]
|
| 695 |
+
|
| 696 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 697 |
+
extra_step_kwargs = {}
|
| 698 |
+
if accepts_eta:
|
| 699 |
+
extra_step_kwargs["eta"] = eta
|
| 700 |
+
|
| 701 |
+
# check if the scheduler accepts generator
|
| 702 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 703 |
+
if accepts_generator:
|
| 704 |
+
extra_step_kwargs["generator"] = generator
|
| 705 |
+
return extra_step_kwargs
|
| 706 |
+
|
| 707 |
+
def check_inputs(
|
| 708 |
+
self,
|
| 709 |
+
prompt,
|
| 710 |
+
height,
|
| 711 |
+
width,
|
| 712 |
+
callback_steps,
|
| 713 |
+
negative_prompt=None,
|
| 714 |
+
prompt_embeds=None,
|
| 715 |
+
negative_prompt_embeds=None,
|
| 716 |
+
callback_on_step_end_tensor_inputs=None,
|
| 717 |
+
):
|
| 718 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 719 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 720 |
+
|
| 721 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 722 |
+
raise ValueError(
|
| 723 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 724 |
+
f" {type(callback_steps)}."
|
| 725 |
+
)
|
| 726 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 727 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 728 |
+
):
|
| 729 |
+
raise ValueError(
|
| 730 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
if prompt is not None and prompt_embeds is not None:
|
| 734 |
+
raise ValueError(
|
| 735 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 736 |
+
" only forward one of the two."
|
| 737 |
+
)
|
| 738 |
+
elif prompt is None and prompt_embeds is None:
|
| 739 |
+
raise ValueError(
|
| 740 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 741 |
+
)
|
| 742 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 743 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 744 |
+
|
| 745 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 746 |
+
raise ValueError(
|
| 747 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 748 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 749 |
+
)
|
| 750 |
+
|
| 751 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 752 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 753 |
+
raise ValueError(
|
| 754 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 755 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 756 |
+
f" {negative_prompt_embeds.shape}."
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 760 |
+
shape = (
|
| 761 |
+
batch_size,
|
| 762 |
+
num_channels_latents,
|
| 763 |
+
int(height) // self.vae_scale_factor,
|
| 764 |
+
int(width) // self.vae_scale_factor,
|
| 765 |
+
)
|
| 766 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 767 |
+
raise ValueError(
|
| 768 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 769 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
if latents is None:
|
| 773 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 774 |
+
else:
|
| 775 |
+
latents = latents.to(device)
|
| 776 |
+
|
| 777 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 778 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 779 |
+
return latents
|
| 780 |
+
|
| 781 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 782 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 783 |
+
"""
|
| 784 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 785 |
+
|
| 786 |
+
Args:
|
| 787 |
+
timesteps (`torch.Tensor`):
|
| 788 |
+
generate embedding vectors at these timesteps
|
| 789 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 790 |
+
dimension of the embeddings to generate
|
| 791 |
+
dtype:
|
| 792 |
+
data type of the generated embeddings
|
| 793 |
+
|
| 794 |
+
Returns:
|
| 795 |
+
`torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 796 |
+
"""
|
| 797 |
+
assert len(w.shape) == 1
|
| 798 |
+
w = w * 1000.0
|
| 799 |
+
|
| 800 |
+
half_dim = embedding_dim // 2
|
| 801 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 802 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 803 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 804 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 805 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 806 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 807 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 808 |
+
return emb
|
| 809 |
+
|
| 810 |
+
@property
|
| 811 |
+
def guidance_scale(self):
|
| 812 |
+
return self._guidance_scale
|
| 813 |
+
|
| 814 |
+
@property
|
| 815 |
+
def guidance_rescale(self):
|
| 816 |
+
return self._guidance_rescale
|
| 817 |
+
|
| 818 |
+
@property
|
| 819 |
+
def clip_skip(self):
|
| 820 |
+
return self._clip_skip
|
| 821 |
+
|
| 822 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 823 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 824 |
+
# corresponds to doing no classifier free guidance.
|
| 825 |
+
@property
|
| 826 |
+
def do_classifier_free_guidance(self):
|
| 827 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 828 |
+
|
| 829 |
+
@property
|
| 830 |
+
def cross_attention_kwargs(self):
|
| 831 |
+
return self._cross_attention_kwargs
|
| 832 |
+
|
| 833 |
+
@property
|
| 834 |
+
def num_timesteps(self):
|
| 835 |
+
return self._num_timesteps
|
| 836 |
+
|
| 837 |
+
@property
|
| 838 |
+
def interrupt(self):
|
| 839 |
+
return self._interrupt
|
| 840 |
+
|
| 841 |
+
@torch.no_grad()
|
| 842 |
+
def __call__(
|
| 843 |
+
self,
|
| 844 |
+
prompt: Union[str, List[str]] = None,
|
| 845 |
+
height: Optional[int] = None,
|
| 846 |
+
width: Optional[int] = None,
|
| 847 |
+
num_inference_steps: int = 50,
|
| 848 |
+
timesteps: List[int] = None,
|
| 849 |
+
guidance_scale: float = 7.5,
|
| 850 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 851 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 852 |
+
eta: float = 0.0,
|
| 853 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 854 |
+
latents: Optional[torch.Tensor] = None,
|
| 855 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 856 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 857 |
+
image_embeds: Optional[torch.Tensor] = None,
|
| 858 |
+
output_type: Optional[str] = "pil",
|
| 859 |
+
return_dict: bool = True,
|
| 860 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 861 |
+
guidance_rescale: float = 0.0,
|
| 862 |
+
clip_skip: Optional[int] = None,
|
| 863 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 864 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 865 |
+
**kwargs,
|
| 866 |
+
):
|
| 867 |
+
r"""
|
| 868 |
+
The call function to the pipeline for generation.
|
| 869 |
+
|
| 870 |
+
Args:
|
| 871 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 872 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 873 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 874 |
+
The height in pixels of the generated image.
|
| 875 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 876 |
+
The width in pixels of the generated image.
|
| 877 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 878 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 879 |
+
expense of slower inference.
|
| 880 |
+
timesteps (`List[int]`, *optional*):
|
| 881 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 882 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 883 |
+
passed will be used. Must be in descending order.
|
| 884 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 885 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 886 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 887 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 888 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 889 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 890 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 891 |
+
The number of images to generate per prompt.
|
| 892 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 893 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 894 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 895 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 896 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 897 |
+
generation deterministic.
|
| 898 |
+
latents (`torch.Tensor`, *optional*):
|
| 899 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 900 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 901 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 902 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 903 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 904 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 905 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 906 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 907 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 908 |
+
image_embeds (`torch.Tensor`, *optional*):
|
| 909 |
+
Pre-generated image embeddings.
|
| 910 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 911 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 912 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 913 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 914 |
+
plain tuple.
|
| 915 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 916 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 917 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 918 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 919 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 920 |
+
Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
|
| 921 |
+
using zero terminal SNR.
|
| 922 |
+
clip_skip (`int`, *optional*):
|
| 923 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 924 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 925 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 926 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 927 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 928 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 929 |
+
`callback_on_step_end_tensor_inputs`.
|
| 930 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 931 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 932 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 933 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 934 |
+
|
| 935 |
+
Examples:
|
| 936 |
+
|
| 937 |
+
Returns:
|
| 938 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 939 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 940 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 941 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 942 |
+
"not-safe-for-work" (nsfw) content.
|
| 943 |
+
"""
|
| 944 |
+
|
| 945 |
+
callback = kwargs.pop("callback", None)
|
| 946 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 947 |
+
|
| 948 |
+
if callback is not None:
|
| 949 |
+
deprecate(
|
| 950 |
+
"callback",
|
| 951 |
+
"1.0.0",
|
| 952 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 953 |
+
)
|
| 954 |
+
if callback_steps is not None:
|
| 955 |
+
deprecate(
|
| 956 |
+
"callback_steps",
|
| 957 |
+
"1.0.0",
|
| 958 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 959 |
+
)
|
| 960 |
+
|
| 961 |
+
# 0. Default height and width to unet
|
| 962 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 963 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 964 |
+
# to deal with lora scaling and other possible forward hooks
|
| 965 |
+
|
| 966 |
+
# 1. Check inputs. Raise error if not correct
|
| 967 |
+
self.check_inputs(
|
| 968 |
+
prompt,
|
| 969 |
+
height,
|
| 970 |
+
width,
|
| 971 |
+
callback_steps,
|
| 972 |
+
negative_prompt,
|
| 973 |
+
prompt_embeds,
|
| 974 |
+
negative_prompt_embeds,
|
| 975 |
+
callback_on_step_end_tensor_inputs,
|
| 976 |
+
)
|
| 977 |
+
|
| 978 |
+
self._guidance_scale = guidance_scale
|
| 979 |
+
self._guidance_rescale = guidance_rescale
|
| 980 |
+
self._clip_skip = clip_skip
|
| 981 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 982 |
+
self._interrupt = False
|
| 983 |
+
|
| 984 |
+
# 2. Define call parameters
|
| 985 |
+
if prompt is not None and isinstance(prompt, str):
|
| 986 |
+
batch_size = 1
|
| 987 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 988 |
+
batch_size = len(prompt)
|
| 989 |
+
else:
|
| 990 |
+
batch_size = prompt_embeds.shape[0]
|
| 991 |
+
|
| 992 |
+
device = self._execution_device
|
| 993 |
+
|
| 994 |
+
# 3. Encode input prompt
|
| 995 |
+
lora_scale = (
|
| 996 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 997 |
+
)
|
| 998 |
+
|
| 999 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 1000 |
+
prompt,
|
| 1001 |
+
device,
|
| 1002 |
+
num_images_per_prompt,
|
| 1003 |
+
self.do_classifier_free_guidance,
|
| 1004 |
+
negative_prompt,
|
| 1005 |
+
prompt_embeds=prompt_embeds,
|
| 1006 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1007 |
+
lora_scale=lora_scale,
|
| 1008 |
+
clip_skip=self.clip_skip,
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 1012 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 1013 |
+
# to avoid doing two forward passes
|
| 1014 |
+
if self.do_classifier_free_guidance:
|
| 1015 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 1016 |
+
|
| 1017 |
+
if image_embeds is not None:
|
| 1018 |
+
image_embeds = torch.stack([image_embeds] * num_images_per_prompt, dim=0).to(
|
| 1019 |
+
device=device, dtype=prompt_embeds.dtype
|
| 1020 |
+
)
|
| 1021 |
+
negative_image_embeds = torch.zeros_like(image_embeds)
|
| 1022 |
+
if self.do_classifier_free_guidance:
|
| 1023 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1024 |
+
image_embeds = [image_embeds]
|
| 1025 |
+
# 4. Prepare timesteps
|
| 1026 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 1027 |
+
|
| 1028 |
+
# 5. Prepare latent variables
|
| 1029 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1030 |
+
latents = self.prepare_latents(
|
| 1031 |
+
batch_size * num_images_per_prompt,
|
| 1032 |
+
num_channels_latents,
|
| 1033 |
+
height,
|
| 1034 |
+
width,
|
| 1035 |
+
prompt_embeds.dtype,
|
| 1036 |
+
device,
|
| 1037 |
+
generator,
|
| 1038 |
+
latents,
|
| 1039 |
+
)
|
| 1040 |
+
|
| 1041 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1042 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1043 |
+
|
| 1044 |
+
# 6.1 Add image embeds for IP-Adapter
|
| 1045 |
+
added_cond_kwargs = {"image_embeds": image_embeds} if image_embeds is not None else {}
|
| 1046 |
+
|
| 1047 |
+
# 6.2 Optionally get Guidance Scale Embedding
|
| 1048 |
+
timestep_cond = None
|
| 1049 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1050 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1051 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1052 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1053 |
+
).to(device=device, dtype=latents.dtype)
|
| 1054 |
+
|
| 1055 |
+
# 7. Denoising loop
|
| 1056 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1057 |
+
self._num_timesteps = len(timesteps)
|
| 1058 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1059 |
+
for i, t in enumerate(timesteps):
|
| 1060 |
+
if self.interrupt:
|
| 1061 |
+
continue
|
| 1062 |
+
|
| 1063 |
+
# expand the latents if we are doing classifier free guidance
|
| 1064 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1065 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1066 |
+
|
| 1067 |
+
# predict the noise residual
|
| 1068 |
+
noise_pred = self.unet(
|
| 1069 |
+
latent_model_input,
|
| 1070 |
+
t,
|
| 1071 |
+
encoder_hidden_states=prompt_embeds,
|
| 1072 |
+
timestep_cond=timestep_cond,
|
| 1073 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1074 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1075 |
+
return_dict=False,
|
| 1076 |
+
)[0]
|
| 1077 |
+
|
| 1078 |
+
# perform guidance
|
| 1079 |
+
if self.do_classifier_free_guidance:
|
| 1080 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1081 |
+
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1082 |
+
|
| 1083 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1084 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 1085 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1086 |
+
|
| 1087 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1088 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1089 |
+
|
| 1090 |
+
if callback_on_step_end is not None:
|
| 1091 |
+
callback_kwargs = {}
|
| 1092 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1093 |
+
callback_kwargs[k] = locals()[k]
|
| 1094 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1095 |
+
|
| 1096 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1097 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1098 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1099 |
+
|
| 1100 |
+
# call the callback, if provided
|
| 1101 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1102 |
+
progress_bar.update()
|
| 1103 |
+
if callback is not None and i % callback_steps == 0:
|
| 1104 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1105 |
+
callback(step_idx, t, latents)
|
| 1106 |
+
|
| 1107 |
+
if not output_type == "latent":
|
| 1108 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 1109 |
+
0
|
| 1110 |
+
]
|
| 1111 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1112 |
+
else:
|
| 1113 |
+
image = latents
|
| 1114 |
+
has_nsfw_concept = None
|
| 1115 |
+
|
| 1116 |
+
if has_nsfw_concept is None:
|
| 1117 |
+
do_denormalize = [True] * image.shape[0]
|
| 1118 |
+
else:
|
| 1119 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1120 |
+
|
| 1121 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1122 |
+
|
| 1123 |
+
# Offload all models
|
| 1124 |
+
self.maybe_free_model_hooks()
|
| 1125 |
+
|
| 1126 |
+
if not return_dict:
|
| 1127 |
+
return (image, has_nsfw_concept)
|
| 1128 |
+
|
| 1129 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/kohya_hires_fix.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn as nn
|
| 19 |
+
import torch.utils.checkpoint
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 21 |
+
|
| 22 |
+
from diffusers.configuration_utils import register_to_config
|
| 23 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 24 |
+
from diffusers.models.autoencoders import AutoencoderKL
|
| 25 |
+
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel, UNet2DConditionOutput
|
| 26 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline
|
| 27 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 28 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 29 |
+
from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class UNet2DConditionModelHighResFix(UNet2DConditionModel):
|
| 36 |
+
r"""
|
| 37 |
+
A conditional 2D UNet model that applies Kohya fix proposed for high resolution image generation.
|
| 38 |
+
|
| 39 |
+
This model inherits from [`UNet2DConditionModel`]. Check the superclass documentation for learning about all the parameters.
|
| 40 |
+
|
| 41 |
+
Parameters:
|
| 42 |
+
high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
|
| 43 |
+
Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
_supports_gradient_checkpointing = True
|
| 47 |
+
|
| 48 |
+
@register_to_config
|
| 49 |
+
def __init__(self, high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}], **kwargs):
|
| 50 |
+
super().__init__(**kwargs)
|
| 51 |
+
if high_res_fix:
|
| 52 |
+
self.config.high_res_fix = sorted(high_res_fix, key=lambda x: x["timestep"], reverse=True)
|
| 53 |
+
|
| 54 |
+
@classmethod
|
| 55 |
+
def _resize(cls, sample, target=None, scale_factor=1, mode="bicubic"):
|
| 56 |
+
dtype = sample.dtype
|
| 57 |
+
if dtype == torch.bfloat16:
|
| 58 |
+
sample = sample.to(torch.float32)
|
| 59 |
+
|
| 60 |
+
if target is not None:
|
| 61 |
+
if sample.shape[-2:] != target.shape[-2:]:
|
| 62 |
+
sample = nn.functional.interpolate(sample, size=target.shape[-2:], mode=mode, align_corners=False)
|
| 63 |
+
elif scale_factor != 1:
|
| 64 |
+
sample = nn.functional.interpolate(sample, scale_factor=scale_factor, mode=mode, align_corners=False)
|
| 65 |
+
|
| 66 |
+
return sample.to(dtype)
|
| 67 |
+
|
| 68 |
+
def forward(
|
| 69 |
+
self,
|
| 70 |
+
sample: torch.FloatTensor,
|
| 71 |
+
timestep: Union[torch.Tensor, float, int],
|
| 72 |
+
encoder_hidden_states: torch.Tensor,
|
| 73 |
+
class_labels: Optional[torch.Tensor] = None,
|
| 74 |
+
timestep_cond: Optional[torch.Tensor] = None,
|
| 75 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 76 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 77 |
+
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
|
| 78 |
+
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
|
| 79 |
+
mid_block_additional_residual: Optional[torch.Tensor] = None,
|
| 80 |
+
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
|
| 81 |
+
encoder_attention_mask: Optional[torch.Tensor] = None,
|
| 82 |
+
return_dict: bool = True,
|
| 83 |
+
) -> Union[UNet2DConditionOutput, Tuple]:
|
| 84 |
+
r"""
|
| 85 |
+
The [`UNet2DConditionModel`] forward method.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
sample (`torch.FloatTensor`):
|
| 89 |
+
The noisy input tensor with the following shape `(batch, channel, height, width)`.
|
| 90 |
+
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
|
| 91 |
+
encoder_hidden_states (`torch.FloatTensor`):
|
| 92 |
+
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
|
| 93 |
+
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
|
| 94 |
+
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
|
| 95 |
+
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
|
| 96 |
+
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
|
| 97 |
+
through the `self.time_embedding` layer to obtain the timestep embeddings.
|
| 98 |
+
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
|
| 99 |
+
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
|
| 100 |
+
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
|
| 101 |
+
negative values to the attention scores corresponding to "discard" tokens.
|
| 102 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 103 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 104 |
+
`self.processor` in
|
| 105 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 106 |
+
added_cond_kwargs: (`dict`, *optional*):
|
| 107 |
+
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
|
| 108 |
+
are passed along to the UNet blocks.
|
| 109 |
+
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
|
| 110 |
+
A tuple of tensors that if specified are added to the residuals of down unet blocks.
|
| 111 |
+
mid_block_additional_residual: (`torch.Tensor`, *optional*):
|
| 112 |
+
A tensor that if specified is added to the residual of the middle unet block.
|
| 113 |
+
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
|
| 114 |
+
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
|
| 115 |
+
encoder_attention_mask (`torch.Tensor`):
|
| 116 |
+
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
|
| 117 |
+
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
|
| 118 |
+
which adds large negative values to the attention scores corresponding to "discard" tokens.
|
| 119 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 120 |
+
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
|
| 121 |
+
tuple.
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
[`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
|
| 125 |
+
If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
|
| 126 |
+
otherwise a `tuple` is returned where the first element is the sample tensor.
|
| 127 |
+
"""
|
| 128 |
+
# By default samples have to be AT least a multiple of the overall upsampling factor.
|
| 129 |
+
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
|
| 130 |
+
# However, the upsampling interpolation output size can be forced to fit any upsampling size
|
| 131 |
+
# on the fly if necessary.
|
| 132 |
+
default_overall_up_factor = 2**self.num_upsamplers
|
| 133 |
+
|
| 134 |
+
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
|
| 135 |
+
forward_upsample_size = False
|
| 136 |
+
upsample_size = None
|
| 137 |
+
|
| 138 |
+
for dim in sample.shape[-2:]:
|
| 139 |
+
if dim % default_overall_up_factor != 0:
|
| 140 |
+
# Forward upsample size to force interpolation output size.
|
| 141 |
+
forward_upsample_size = True
|
| 142 |
+
break
|
| 143 |
+
|
| 144 |
+
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
|
| 145 |
+
# expects mask of shape:
|
| 146 |
+
# [batch, key_tokens]
|
| 147 |
+
# adds singleton query_tokens dimension:
|
| 148 |
+
# [batch, 1, key_tokens]
|
| 149 |
+
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
|
| 150 |
+
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
|
| 151 |
+
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
|
| 152 |
+
if attention_mask is not None:
|
| 153 |
+
# assume that mask is expressed as:
|
| 154 |
+
# (1 = keep, 0 = discard)
|
| 155 |
+
# convert mask into a bias that can be added to attention scores:
|
| 156 |
+
# (keep = +0, discard = -10000.0)
|
| 157 |
+
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
|
| 158 |
+
attention_mask = attention_mask.unsqueeze(1)
|
| 159 |
+
|
| 160 |
+
# convert encoder_attention_mask to a bias the same way we do for attention_mask
|
| 161 |
+
if encoder_attention_mask is not None:
|
| 162 |
+
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
|
| 163 |
+
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
|
| 164 |
+
|
| 165 |
+
# 0. center input if necessary
|
| 166 |
+
if self.config.center_input_sample:
|
| 167 |
+
sample = 2 * sample - 1.0
|
| 168 |
+
|
| 169 |
+
# 1. time
|
| 170 |
+
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
|
| 171 |
+
emb = self.time_embedding(t_emb, timestep_cond)
|
| 172 |
+
aug_emb = None
|
| 173 |
+
|
| 174 |
+
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
|
| 175 |
+
if class_emb is not None:
|
| 176 |
+
if self.config.class_embeddings_concat:
|
| 177 |
+
emb = torch.cat([emb, class_emb], dim=-1)
|
| 178 |
+
else:
|
| 179 |
+
emb = emb + class_emb
|
| 180 |
+
|
| 181 |
+
aug_emb = self.get_aug_embed(
|
| 182 |
+
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
|
| 183 |
+
)
|
| 184 |
+
if self.config.addition_embed_type == "image_hint":
|
| 185 |
+
aug_emb, hint = aug_emb
|
| 186 |
+
sample = torch.cat([sample, hint], dim=1)
|
| 187 |
+
|
| 188 |
+
emb = emb + aug_emb if aug_emb is not None else emb
|
| 189 |
+
|
| 190 |
+
if self.time_embed_act is not None:
|
| 191 |
+
emb = self.time_embed_act(emb)
|
| 192 |
+
|
| 193 |
+
encoder_hidden_states = self.process_encoder_hidden_states(
|
| 194 |
+
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# 2. pre-process
|
| 198 |
+
sample = self.conv_in(sample)
|
| 199 |
+
|
| 200 |
+
# 2.5 GLIGEN position net
|
| 201 |
+
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
|
| 202 |
+
cross_attention_kwargs = cross_attention_kwargs.copy()
|
| 203 |
+
gligen_args = cross_attention_kwargs.pop("gligen")
|
| 204 |
+
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
|
| 205 |
+
|
| 206 |
+
# 3. down
|
| 207 |
+
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
|
| 208 |
+
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
|
| 209 |
+
if cross_attention_kwargs is not None:
|
| 210 |
+
cross_attention_kwargs = cross_attention_kwargs.copy()
|
| 211 |
+
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
|
| 212 |
+
else:
|
| 213 |
+
lora_scale = 1.0
|
| 214 |
+
|
| 215 |
+
if USE_PEFT_BACKEND:
|
| 216 |
+
# weight the lora layers by setting `lora_scale` for each PEFT layer
|
| 217 |
+
scale_lora_layers(self, lora_scale)
|
| 218 |
+
|
| 219 |
+
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
|
| 220 |
+
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
|
| 221 |
+
is_adapter = down_intrablock_additional_residuals is not None
|
| 222 |
+
# maintain backward compatibility for legacy usage, where
|
| 223 |
+
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
|
| 224 |
+
# but can only use one or the other
|
| 225 |
+
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
|
| 226 |
+
deprecate(
|
| 227 |
+
"T2I should not use down_block_additional_residuals",
|
| 228 |
+
"1.3.0",
|
| 229 |
+
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
|
| 230 |
+
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
|
| 231 |
+
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
|
| 232 |
+
standard_warn=False,
|
| 233 |
+
)
|
| 234 |
+
down_intrablock_additional_residuals = down_block_additional_residuals
|
| 235 |
+
is_adapter = True
|
| 236 |
+
|
| 237 |
+
down_block_res_samples = (sample,)
|
| 238 |
+
for down_i, downsample_block in enumerate(self.down_blocks):
|
| 239 |
+
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
|
| 240 |
+
# For t2i-adapter CrossAttnDownBlock2D
|
| 241 |
+
additional_residuals = {}
|
| 242 |
+
if is_adapter and len(down_intrablock_additional_residuals) > 0:
|
| 243 |
+
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
|
| 244 |
+
|
| 245 |
+
sample, res_samples = downsample_block(
|
| 246 |
+
hidden_states=sample,
|
| 247 |
+
temb=emb,
|
| 248 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 249 |
+
attention_mask=attention_mask,
|
| 250 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 251 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 252 |
+
**additional_residuals,
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
else:
|
| 256 |
+
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
|
| 257 |
+
if is_adapter and len(down_intrablock_additional_residuals) > 0:
|
| 258 |
+
sample += down_intrablock_additional_residuals.pop(0)
|
| 259 |
+
|
| 260 |
+
down_block_res_samples += res_samples
|
| 261 |
+
|
| 262 |
+
# kohya high res fix
|
| 263 |
+
if self.config.high_res_fix:
|
| 264 |
+
for high_res_fix in self.config.high_res_fix:
|
| 265 |
+
if timestep > high_res_fix["timestep"] and down_i == high_res_fix["block_num"]:
|
| 266 |
+
sample = self.__class__._resize(sample, scale_factor=high_res_fix["scale_factor"])
|
| 267 |
+
break
|
| 268 |
+
|
| 269 |
+
if is_controlnet:
|
| 270 |
+
new_down_block_res_samples = ()
|
| 271 |
+
|
| 272 |
+
for down_block_res_sample, down_block_additional_residual in zip(
|
| 273 |
+
down_block_res_samples, down_block_additional_residuals
|
| 274 |
+
):
|
| 275 |
+
down_block_res_sample = down_block_res_sample + down_block_additional_residual
|
| 276 |
+
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
|
| 277 |
+
|
| 278 |
+
down_block_res_samples = new_down_block_res_samples
|
| 279 |
+
|
| 280 |
+
# 4. mid
|
| 281 |
+
if self.mid_block is not None:
|
| 282 |
+
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
|
| 283 |
+
sample = self.mid_block(
|
| 284 |
+
sample,
|
| 285 |
+
emb,
|
| 286 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 287 |
+
attention_mask=attention_mask,
|
| 288 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 289 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 290 |
+
)
|
| 291 |
+
else:
|
| 292 |
+
sample = self.mid_block(sample, emb)
|
| 293 |
+
|
| 294 |
+
# To support T2I-Adapter-XL
|
| 295 |
+
if (
|
| 296 |
+
is_adapter
|
| 297 |
+
and len(down_intrablock_additional_residuals) > 0
|
| 298 |
+
and sample.shape == down_intrablock_additional_residuals[0].shape
|
| 299 |
+
):
|
| 300 |
+
sample += down_intrablock_additional_residuals.pop(0)
|
| 301 |
+
|
| 302 |
+
if is_controlnet:
|
| 303 |
+
sample = sample + mid_block_additional_residual
|
| 304 |
+
|
| 305 |
+
# 5. up
|
| 306 |
+
for i, upsample_block in enumerate(self.up_blocks):
|
| 307 |
+
is_final_block = i == len(self.up_blocks) - 1
|
| 308 |
+
|
| 309 |
+
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
|
| 310 |
+
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
|
| 311 |
+
|
| 312 |
+
# up scaling of kohya high res fix
|
| 313 |
+
if self.config.high_res_fix is not None:
|
| 314 |
+
if res_samples[0].shape[-2:] != sample.shape[-2:]:
|
| 315 |
+
sample = self.__class__._resize(sample, target=res_samples[0])
|
| 316 |
+
res_samples_up_sampled = (res_samples[0],)
|
| 317 |
+
for res_sample in res_samples[1:]:
|
| 318 |
+
res_samples_up_sampled += (self.__class__._resize(res_sample, target=res_samples[0]),)
|
| 319 |
+
res_samples = res_samples_up_sampled
|
| 320 |
+
|
| 321 |
+
# if we have not reached the final block and need to forward the
|
| 322 |
+
# upsample size, we do it here
|
| 323 |
+
if not is_final_block and forward_upsample_size:
|
| 324 |
+
upsample_size = down_block_res_samples[-1].shape[2:]
|
| 325 |
+
|
| 326 |
+
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
|
| 327 |
+
sample = upsample_block(
|
| 328 |
+
hidden_states=sample,
|
| 329 |
+
temb=emb,
|
| 330 |
+
res_hidden_states_tuple=res_samples,
|
| 331 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 332 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 333 |
+
upsample_size=upsample_size,
|
| 334 |
+
attention_mask=attention_mask,
|
| 335 |
+
encoder_attention_mask=encoder_attention_mask,
|
| 336 |
+
)
|
| 337 |
+
else:
|
| 338 |
+
sample = upsample_block(
|
| 339 |
+
hidden_states=sample,
|
| 340 |
+
temb=emb,
|
| 341 |
+
res_hidden_states_tuple=res_samples,
|
| 342 |
+
upsample_size=upsample_size,
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
# 6. post-process
|
| 346 |
+
if self.conv_norm_out:
|
| 347 |
+
sample = self.conv_norm_out(sample)
|
| 348 |
+
sample = self.conv_act(sample)
|
| 349 |
+
sample = self.conv_out(sample)
|
| 350 |
+
|
| 351 |
+
if USE_PEFT_BACKEND:
|
| 352 |
+
# remove `lora_scale` from each PEFT layer
|
| 353 |
+
unscale_lora_layers(self, lora_scale)
|
| 354 |
+
|
| 355 |
+
if not return_dict:
|
| 356 |
+
return (sample,)
|
| 357 |
+
|
| 358 |
+
return UNet2DConditionOutput(sample=sample)
|
| 359 |
+
|
| 360 |
+
@classmethod
|
| 361 |
+
def from_unet(cls, unet: UNet2DConditionModel, high_res_fix: list):
|
| 362 |
+
config = dict((unet.config))
|
| 363 |
+
config["high_res_fix"] = high_res_fix
|
| 364 |
+
unet_high_res = cls(**config)
|
| 365 |
+
unet_high_res.load_state_dict(unet.state_dict())
|
| 366 |
+
unet_high_res.to(unet.dtype)
|
| 367 |
+
return unet_high_res
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
EXAMPLE_DOC_STRING = """
|
| 371 |
+
Examples:
|
| 372 |
+
```py
|
| 373 |
+
>>> import torch
|
| 374 |
+
>>> from diffusers import DiffusionPipeline
|
| 375 |
+
|
| 376 |
+
>>> pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
|
| 377 |
+
custom_pipeline="kohya_hires_fix",
|
| 378 |
+
torch_dtype=torch.float16,
|
| 379 |
+
high_res_fix=[{'timestep': 600,
|
| 380 |
+
'scale_factor': 0.5,
|
| 381 |
+
'block_num': 1}])
|
| 382 |
+
>>> pipe = pipe.to("cuda")
|
| 383 |
+
|
| 384 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 385 |
+
>>> image = pipe(prompt, height=1000, width=1600).images[0]
|
| 386 |
+
```
|
| 387 |
+
"""
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
class StableDiffusionHighResFixPipeline(StableDiffusionPipeline):
|
| 391 |
+
r"""
|
| 392 |
+
Pipeline for text-to-image generation using Stable Diffusion with Kohya fix for high resolution generation.
|
| 393 |
+
|
| 394 |
+
This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods.
|
| 395 |
+
|
| 396 |
+
The pipeline also inherits the following loading methods:
|
| 397 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 398 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 399 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 400 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 401 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 402 |
+
|
| 403 |
+
Args:
|
| 404 |
+
vae ([`AutoencoderKL`]):
|
| 405 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 406 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 407 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 408 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 409 |
+
A `CLIPTokenizer` to tokenize text.
|
| 410 |
+
unet ([`UNet2DConditionModel`]):
|
| 411 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 412 |
+
scheduler ([`SchedulerMixin`]):
|
| 413 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 414 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 415 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 416 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 417 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 418 |
+
about a model's potential harms.
|
| 419 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 420 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 421 |
+
high_res_fix (`List[Dict]`, *optional*, defaults to `[{'timestep': 600, 'scale_factor': 0.5, 'block_num': 1}]`):
|
| 422 |
+
Enables Kohya fix for high resolution generation. The activation maps are scaled based on the scale_factor up to the timestep at specified block_num.
|
| 423 |
+
"""
|
| 424 |
+
|
| 425 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 426 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 427 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 428 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 429 |
+
|
| 430 |
+
def __init__(
|
| 431 |
+
self,
|
| 432 |
+
vae: AutoencoderKL,
|
| 433 |
+
text_encoder: CLIPTextModel,
|
| 434 |
+
tokenizer: CLIPTokenizer,
|
| 435 |
+
unet: UNet2DConditionModel,
|
| 436 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 437 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 438 |
+
feature_extractor: CLIPImageProcessor,
|
| 439 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 440 |
+
requires_safety_checker: bool = True,
|
| 441 |
+
high_res_fix: List[Dict] = [{"timestep": 600, "scale_factor": 0.5, "block_num": 1}],
|
| 442 |
+
):
|
| 443 |
+
super().__init__(
|
| 444 |
+
vae=vae,
|
| 445 |
+
text_encoder=text_encoder,
|
| 446 |
+
tokenizer=tokenizer,
|
| 447 |
+
unet=unet,
|
| 448 |
+
scheduler=scheduler,
|
| 449 |
+
safety_checker=safety_checker,
|
| 450 |
+
feature_extractor=feature_extractor,
|
| 451 |
+
image_encoder=image_encoder,
|
| 452 |
+
requires_safety_checker=requires_safety_checker,
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
unet = UNet2DConditionModelHighResFix.from_unet(unet=unet, high_res_fix=high_res_fix)
|
| 456 |
+
self.register_modules(
|
| 457 |
+
vae=vae,
|
| 458 |
+
text_encoder=text_encoder,
|
| 459 |
+
tokenizer=tokenizer,
|
| 460 |
+
unet=unet,
|
| 461 |
+
scheduler=scheduler,
|
| 462 |
+
safety_checker=safety_checker,
|
| 463 |
+
feature_extractor=feature_extractor,
|
| 464 |
+
image_encoder=image_encoder,
|
| 465 |
+
)
|
| 466 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 467 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 468 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
v0.36.0/latent_consistency_img2img.py
ADDED
|
@@ -0,0 +1,821 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
|
| 16 |
+
# and https://github.com/hojonathanho/diffusion
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import PIL.Image
|
| 24 |
+
import torch
|
| 25 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 26 |
+
|
| 27 |
+
from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
|
| 28 |
+
from diffusers.configuration_utils import register_to_config
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 31 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 32 |
+
from diffusers.utils import BaseOutput
|
| 33 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
|
| 40 |
+
_optional_components = ["scheduler"]
|
| 41 |
+
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
vae: AutoencoderKL,
|
| 45 |
+
text_encoder: CLIPTextModel,
|
| 46 |
+
tokenizer: CLIPTokenizer,
|
| 47 |
+
unet: UNet2DConditionModel,
|
| 48 |
+
scheduler: "LCMSchedulerWithTimestamp",
|
| 49 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 50 |
+
feature_extractor: CLIPImageProcessor,
|
| 51 |
+
requires_safety_checker: bool = True,
|
| 52 |
+
):
|
| 53 |
+
super().__init__()
|
| 54 |
+
|
| 55 |
+
scheduler = (
|
| 56 |
+
scheduler
|
| 57 |
+
if scheduler is not None
|
| 58 |
+
else LCMSchedulerWithTimestamp(
|
| 59 |
+
beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
|
| 60 |
+
)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
self.register_modules(
|
| 64 |
+
vae=vae,
|
| 65 |
+
text_encoder=text_encoder,
|
| 66 |
+
tokenizer=tokenizer,
|
| 67 |
+
unet=unet,
|
| 68 |
+
scheduler=scheduler,
|
| 69 |
+
safety_checker=safety_checker,
|
| 70 |
+
feature_extractor=feature_extractor,
|
| 71 |
+
)
|
| 72 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 73 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 74 |
+
|
| 75 |
+
def _encode_prompt(
|
| 76 |
+
self,
|
| 77 |
+
prompt,
|
| 78 |
+
device,
|
| 79 |
+
num_images_per_prompt,
|
| 80 |
+
prompt_embeds: None,
|
| 81 |
+
):
|
| 82 |
+
r"""
|
| 83 |
+
Encodes the prompt into text encoder hidden states.
|
| 84 |
+
Args:
|
| 85 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 86 |
+
prompt to be encoded
|
| 87 |
+
device: (`torch.device`):
|
| 88 |
+
torch device
|
| 89 |
+
num_images_per_prompt (`int`):
|
| 90 |
+
number of images that should be generated per prompt
|
| 91 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 92 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 93 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
if prompt is not None and isinstance(prompt, str):
|
| 97 |
+
pass
|
| 98 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 99 |
+
len(prompt)
|
| 100 |
+
else:
|
| 101 |
+
prompt_embeds.shape[0]
|
| 102 |
+
|
| 103 |
+
if prompt_embeds is None:
|
| 104 |
+
text_inputs = self.tokenizer(
|
| 105 |
+
prompt,
|
| 106 |
+
padding="max_length",
|
| 107 |
+
max_length=self.tokenizer.model_max_length,
|
| 108 |
+
truncation=True,
|
| 109 |
+
return_tensors="pt",
|
| 110 |
+
)
|
| 111 |
+
text_input_ids = text_inputs.input_ids
|
| 112 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 113 |
+
|
| 114 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 115 |
+
text_input_ids, untruncated_ids
|
| 116 |
+
):
|
| 117 |
+
removed_text = self.tokenizer.batch_decode(
|
| 118 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 119 |
+
)
|
| 120 |
+
logger.warning(
|
| 121 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 122 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 126 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 127 |
+
else:
|
| 128 |
+
attention_mask = None
|
| 129 |
+
|
| 130 |
+
prompt_embeds = self.text_encoder(
|
| 131 |
+
text_input_ids.to(device),
|
| 132 |
+
attention_mask=attention_mask,
|
| 133 |
+
)
|
| 134 |
+
prompt_embeds = prompt_embeds[0]
|
| 135 |
+
|
| 136 |
+
if self.text_encoder is not None:
|
| 137 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 138 |
+
elif self.unet is not None:
|
| 139 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 140 |
+
else:
|
| 141 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 142 |
+
|
| 143 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 144 |
+
|
| 145 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 146 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 147 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 148 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 149 |
+
|
| 150 |
+
# Don't need to get uncond prompt embedding because of LCM Guided Distillation
|
| 151 |
+
return prompt_embeds
|
| 152 |
+
|
| 153 |
+
def run_safety_checker(self, image, device, dtype):
|
| 154 |
+
if self.safety_checker is None:
|
| 155 |
+
has_nsfw_concept = None
|
| 156 |
+
else:
|
| 157 |
+
if torch.is_tensor(image):
|
| 158 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 159 |
+
else:
|
| 160 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 161 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 162 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 163 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 164 |
+
)
|
| 165 |
+
return image, has_nsfw_concept
|
| 166 |
+
|
| 167 |
+
def prepare_latents(
|
| 168 |
+
self,
|
| 169 |
+
image,
|
| 170 |
+
timestep,
|
| 171 |
+
batch_size,
|
| 172 |
+
num_channels_latents,
|
| 173 |
+
height,
|
| 174 |
+
width,
|
| 175 |
+
dtype,
|
| 176 |
+
device,
|
| 177 |
+
latents=None,
|
| 178 |
+
generator=None,
|
| 179 |
+
):
|
| 180 |
+
shape = (
|
| 181 |
+
batch_size,
|
| 182 |
+
num_channels_latents,
|
| 183 |
+
int(height) // self.vae_scale_factor,
|
| 184 |
+
int(width) // self.vae_scale_factor,
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 188 |
+
raise ValueError(
|
| 189 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
image = image.to(device=device, dtype=dtype)
|
| 193 |
+
|
| 194 |
+
# batch_size = batch_size * num_images_per_prompt
|
| 195 |
+
|
| 196 |
+
if image.shape[1] == 4:
|
| 197 |
+
init_latents = image
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 201 |
+
raise ValueError(
|
| 202 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 203 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
elif isinstance(generator, list):
|
| 207 |
+
init_latents = [
|
| 208 |
+
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
|
| 209 |
+
]
|
| 210 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 211 |
+
else:
|
| 212 |
+
init_latents = self.vae.encode(image).latent_dist.sample(generator)
|
| 213 |
+
|
| 214 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 215 |
+
|
| 216 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 217 |
+
# expand init_latents for batch_size
|
| 218 |
+
(
|
| 219 |
+
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
|
| 220 |
+
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
|
| 221 |
+
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
|
| 222 |
+
" your script to pass as many initial images as text prompts to suppress this warning."
|
| 223 |
+
)
|
| 224 |
+
# deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
|
| 225 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 226 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 227 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 228 |
+
raise ValueError(
|
| 229 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 230 |
+
)
|
| 231 |
+
else:
|
| 232 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 233 |
+
|
| 234 |
+
shape = init_latents.shape
|
| 235 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 236 |
+
|
| 237 |
+
# get latents
|
| 238 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 239 |
+
latents = init_latents
|
| 240 |
+
|
| 241 |
+
return latents
|
| 242 |
+
|
| 243 |
+
def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 244 |
+
"""
|
| 245 |
+
see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 246 |
+
Args:
|
| 247 |
+
timesteps: torch.Tensor: generate embedding vectors at these timesteps
|
| 248 |
+
embedding_dim: int: dimension of the embeddings to generate
|
| 249 |
+
dtype: data type of the generated embeddings
|
| 250 |
+
Returns:
|
| 251 |
+
embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 252 |
+
"""
|
| 253 |
+
assert len(w.shape) == 1
|
| 254 |
+
w = w * 1000.0
|
| 255 |
+
|
| 256 |
+
half_dim = embedding_dim // 2
|
| 257 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 258 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 259 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 260 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 261 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 262 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 263 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 264 |
+
return emb
|
| 265 |
+
|
| 266 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 267 |
+
# get the original timestep using init_timestep
|
| 268 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 269 |
+
|
| 270 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 271 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 272 |
+
|
| 273 |
+
return timesteps, num_inference_steps - t_start
|
| 274 |
+
|
| 275 |
+
@torch.no_grad()
|
| 276 |
+
def __call__(
|
| 277 |
+
self,
|
| 278 |
+
prompt: Union[str, List[str]] = None,
|
| 279 |
+
image: PipelineImageInput = None,
|
| 280 |
+
strength: float = 0.8,
|
| 281 |
+
height: Optional[int] = 768,
|
| 282 |
+
width: Optional[int] = 768,
|
| 283 |
+
guidance_scale: float = 7.5,
|
| 284 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 285 |
+
latents: Optional[torch.Tensor] = None,
|
| 286 |
+
num_inference_steps: int = 4,
|
| 287 |
+
lcm_origin_steps: int = 50,
|
| 288 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 289 |
+
output_type: Optional[str] = "pil",
|
| 290 |
+
return_dict: bool = True,
|
| 291 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 292 |
+
):
|
| 293 |
+
# 0. Default height and width to unet
|
| 294 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 295 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 296 |
+
|
| 297 |
+
# 2. Define call parameters
|
| 298 |
+
if prompt is not None and isinstance(prompt, str):
|
| 299 |
+
batch_size = 1
|
| 300 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 301 |
+
batch_size = len(prompt)
|
| 302 |
+
else:
|
| 303 |
+
batch_size = prompt_embeds.shape[0]
|
| 304 |
+
|
| 305 |
+
device = self._execution_device
|
| 306 |
+
# do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
|
| 307 |
+
|
| 308 |
+
# 3. Encode input prompt
|
| 309 |
+
prompt_embeds = self._encode_prompt(
|
| 310 |
+
prompt,
|
| 311 |
+
device,
|
| 312 |
+
num_images_per_prompt,
|
| 313 |
+
prompt_embeds=prompt_embeds,
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# 3.5 encode image
|
| 317 |
+
image = self.image_processor.preprocess(image)
|
| 318 |
+
|
| 319 |
+
# 4. Prepare timesteps
|
| 320 |
+
self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps)
|
| 321 |
+
# timesteps = self.scheduler.timesteps
|
| 322 |
+
# timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)
|
| 323 |
+
timesteps = self.scheduler.timesteps
|
| 324 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 325 |
+
|
| 326 |
+
print("timesteps: ", timesteps)
|
| 327 |
+
|
| 328 |
+
# 5. Prepare latent variable
|
| 329 |
+
num_channels_latents = self.unet.config.in_channels
|
| 330 |
+
if latents is None:
|
| 331 |
+
latents = self.prepare_latents(
|
| 332 |
+
image,
|
| 333 |
+
latent_timestep,
|
| 334 |
+
batch_size * num_images_per_prompt,
|
| 335 |
+
num_channels_latents,
|
| 336 |
+
height,
|
| 337 |
+
width,
|
| 338 |
+
prompt_embeds.dtype,
|
| 339 |
+
device,
|
| 340 |
+
latents,
|
| 341 |
+
)
|
| 342 |
+
bs = batch_size * num_images_per_prompt
|
| 343 |
+
|
| 344 |
+
# 6. Get Guidance Scale Embedding
|
| 345 |
+
w = torch.tensor(guidance_scale).repeat(bs)
|
| 346 |
+
w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
|
| 347 |
+
|
| 348 |
+
# 7. LCM MultiStep Sampling Loop:
|
| 349 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 350 |
+
for i, t in enumerate(timesteps):
|
| 351 |
+
ts = torch.full((bs,), t, device=device, dtype=torch.long)
|
| 352 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 353 |
+
|
| 354 |
+
# model prediction (v-prediction, eps, x)
|
| 355 |
+
model_pred = self.unet(
|
| 356 |
+
latents,
|
| 357 |
+
ts,
|
| 358 |
+
timestep_cond=w_embedding,
|
| 359 |
+
encoder_hidden_states=prompt_embeds,
|
| 360 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 361 |
+
return_dict=False,
|
| 362 |
+
)[0]
|
| 363 |
+
|
| 364 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 365 |
+
latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
|
| 366 |
+
|
| 367 |
+
# # call the callback, if provided
|
| 368 |
+
# if i == len(timesteps) - 1:
|
| 369 |
+
progress_bar.update()
|
| 370 |
+
|
| 371 |
+
denoised = denoised.to(prompt_embeds.dtype)
|
| 372 |
+
if not output_type == "latent":
|
| 373 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 374 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 375 |
+
else:
|
| 376 |
+
image = denoised
|
| 377 |
+
has_nsfw_concept = None
|
| 378 |
+
|
| 379 |
+
if has_nsfw_concept is None:
|
| 380 |
+
do_denormalize = [True] * image.shape[0]
|
| 381 |
+
else:
|
| 382 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 383 |
+
|
| 384 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 385 |
+
|
| 386 |
+
if not return_dict:
|
| 387 |
+
return (image, has_nsfw_concept)
|
| 388 |
+
|
| 389 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
@dataclass
|
| 393 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
|
| 394 |
+
class LCMSchedulerOutput(BaseOutput):
|
| 395 |
+
"""
|
| 396 |
+
Output class for the scheduler's `step` function output.
|
| 397 |
+
Args:
|
| 398 |
+
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 399 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 400 |
+
denoising loop.
|
| 401 |
+
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 402 |
+
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
|
| 403 |
+
`pred_original_sample` can be used to preview progress or for guidance.
|
| 404 |
+
"""
|
| 405 |
+
|
| 406 |
+
prev_sample: torch.Tensor
|
| 407 |
+
denoised: Optional[torch.Tensor] = None
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
| 411 |
+
def betas_for_alpha_bar(
|
| 412 |
+
num_diffusion_timesteps,
|
| 413 |
+
max_beta=0.999,
|
| 414 |
+
alpha_transform_type="cosine",
|
| 415 |
+
):
|
| 416 |
+
"""
|
| 417 |
+
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
| 418 |
+
(1-beta) over time from t = [0,1].
|
| 419 |
+
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
| 420 |
+
to that part of the diffusion process.
|
| 421 |
+
Args:
|
| 422 |
+
num_diffusion_timesteps (`int`): the number of betas to produce.
|
| 423 |
+
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
| 424 |
+
prevent singularities.
|
| 425 |
+
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
| 426 |
+
Choose from `cosine` or `exp`
|
| 427 |
+
Returns:
|
| 428 |
+
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
| 429 |
+
"""
|
| 430 |
+
if alpha_transform_type == "cosine":
|
| 431 |
+
|
| 432 |
+
def alpha_bar_fn(t):
|
| 433 |
+
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
| 434 |
+
|
| 435 |
+
elif alpha_transform_type == "exp":
|
| 436 |
+
|
| 437 |
+
def alpha_bar_fn(t):
|
| 438 |
+
return math.exp(t * -12.0)
|
| 439 |
+
|
| 440 |
+
else:
|
| 441 |
+
raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
|
| 442 |
+
|
| 443 |
+
betas = []
|
| 444 |
+
for i in range(num_diffusion_timesteps):
|
| 445 |
+
t1 = i / num_diffusion_timesteps
|
| 446 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 447 |
+
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
| 448 |
+
return torch.tensor(betas, dtype=torch.float32)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def rescale_zero_terminal_snr(betas):
|
| 452 |
+
"""
|
| 453 |
+
Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1)
|
| 454 |
+
Args:
|
| 455 |
+
betas (`torch.Tensor`):
|
| 456 |
+
the betas that the scheduler is being initialized with.
|
| 457 |
+
Returns:
|
| 458 |
+
`torch.Tensor`: rescaled betas with zero terminal SNR
|
| 459 |
+
"""
|
| 460 |
+
# Convert betas to alphas_bar_sqrt
|
| 461 |
+
alphas = 1.0 - betas
|
| 462 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
| 463 |
+
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
| 464 |
+
|
| 465 |
+
# Store old values.
|
| 466 |
+
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
| 467 |
+
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
| 468 |
+
|
| 469 |
+
# Shift so the last timestep is zero.
|
| 470 |
+
alphas_bar_sqrt -= alphas_bar_sqrt_T
|
| 471 |
+
|
| 472 |
+
# Scale so the first timestep is back to the old value.
|
| 473 |
+
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
| 474 |
+
|
| 475 |
+
# Convert alphas_bar_sqrt to betas
|
| 476 |
+
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
| 477 |
+
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
|
| 478 |
+
alphas = torch.cat([alphas_bar[0:1], alphas])
|
| 479 |
+
betas = 1 - alphas
|
| 480 |
+
|
| 481 |
+
return betas
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
|
| 485 |
+
"""
|
| 486 |
+
This class modifies LCMScheduler to add a timestamp argument to set_timesteps
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
`LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
|
| 490 |
+
non-Markovian guidance.
|
| 491 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 492 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 493 |
+
Args:
|
| 494 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 495 |
+
The number of diffusion steps to train the model.
|
| 496 |
+
beta_start (`float`, defaults to 0.0001):
|
| 497 |
+
The starting `beta` value of inference.
|
| 498 |
+
beta_end (`float`, defaults to 0.02):
|
| 499 |
+
The final `beta` value.
|
| 500 |
+
beta_schedule (`str`, defaults to `"linear"`):
|
| 501 |
+
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
| 502 |
+
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
| 503 |
+
trained_betas (`np.ndarray`, *optional*):
|
| 504 |
+
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
|
| 505 |
+
clip_sample (`bool`, defaults to `True`):
|
| 506 |
+
Clip the predicted sample for numerical stability.
|
| 507 |
+
clip_sample_range (`float`, defaults to 1.0):
|
| 508 |
+
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
|
| 509 |
+
set_alpha_to_one (`bool`, defaults to `True`):
|
| 510 |
+
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
|
| 511 |
+
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
|
| 512 |
+
otherwise it uses the alpha value at step 0.
|
| 513 |
+
steps_offset (`int`, defaults to 0):
|
| 514 |
+
An offset added to the inference steps, as required by some model families.
|
| 515 |
+
prediction_type (`str`, defaults to `epsilon`, *optional*):
|
| 516 |
+
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
|
| 517 |
+
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
|
| 518 |
+
Video](https://imagen.research.google/video/paper.pdf) paper).
|
| 519 |
+
thresholding (`bool`, defaults to `False`):
|
| 520 |
+
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
|
| 521 |
+
as Stable Diffusion.
|
| 522 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 523 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 524 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 525 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
|
| 526 |
+
timestep_spacing (`str`, defaults to `"leading"`):
|
| 527 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 528 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 529 |
+
rescale_betas_zero_snr (`bool`, defaults to `False`):
|
| 530 |
+
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
|
| 531 |
+
dark samples instead of limiting it to samples with medium brightness. Loosely related to
|
| 532 |
+
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
|
| 533 |
+
"""
|
| 534 |
+
|
| 535 |
+
# _compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
| 536 |
+
order = 1
|
| 537 |
+
|
| 538 |
+
@register_to_config
|
| 539 |
+
def __init__(
|
| 540 |
+
self,
|
| 541 |
+
num_train_timesteps: int = 1000,
|
| 542 |
+
beta_start: float = 0.0001,
|
| 543 |
+
beta_end: float = 0.02,
|
| 544 |
+
beta_schedule: str = "linear",
|
| 545 |
+
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
| 546 |
+
clip_sample: bool = True,
|
| 547 |
+
set_alpha_to_one: bool = True,
|
| 548 |
+
steps_offset: int = 0,
|
| 549 |
+
prediction_type: str = "epsilon",
|
| 550 |
+
thresholding: bool = False,
|
| 551 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 552 |
+
clip_sample_range: float = 1.0,
|
| 553 |
+
sample_max_value: float = 1.0,
|
| 554 |
+
timestep_spacing: str = "leading",
|
| 555 |
+
rescale_betas_zero_snr: bool = False,
|
| 556 |
+
):
|
| 557 |
+
if trained_betas is not None:
|
| 558 |
+
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
| 559 |
+
elif beta_schedule == "linear":
|
| 560 |
+
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
| 561 |
+
elif beta_schedule == "scaled_linear":
|
| 562 |
+
# this schedule is very specific to the latent diffusion model.
|
| 563 |
+
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
| 564 |
+
elif beta_schedule == "squaredcos_cap_v2":
|
| 565 |
+
# Glide cosine schedule
|
| 566 |
+
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
| 567 |
+
else:
|
| 568 |
+
raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
|
| 569 |
+
|
| 570 |
+
# Rescale for zero SNR
|
| 571 |
+
if rescale_betas_zero_snr:
|
| 572 |
+
self.betas = rescale_zero_terminal_snr(self.betas)
|
| 573 |
+
|
| 574 |
+
self.alphas = 1.0 - self.betas
|
| 575 |
+
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
| 576 |
+
|
| 577 |
+
# At every step in ddim, we are looking into the previous alphas_cumprod
|
| 578 |
+
# For the final step, there is no previous alphas_cumprod because we are already at 0
|
| 579 |
+
# `set_alpha_to_one` decides whether we set this parameter simply to one or
|
| 580 |
+
# whether we use the final alpha of the "non-previous" one.
|
| 581 |
+
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
|
| 582 |
+
|
| 583 |
+
# standard deviation of the initial noise distribution
|
| 584 |
+
self.init_noise_sigma = 1.0
|
| 585 |
+
|
| 586 |
+
# setable values
|
| 587 |
+
self.num_inference_steps = None
|
| 588 |
+
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
| 589 |
+
|
| 590 |
+
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
|
| 591 |
+
"""
|
| 592 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 593 |
+
current timestep.
|
| 594 |
+
Args:
|
| 595 |
+
sample (`torch.Tensor`):
|
| 596 |
+
The input sample.
|
| 597 |
+
timestep (`int`, *optional*):
|
| 598 |
+
The current timestep in the diffusion chain.
|
| 599 |
+
Returns:
|
| 600 |
+
`torch.Tensor`:
|
| 601 |
+
A scaled input sample.
|
| 602 |
+
"""
|
| 603 |
+
return sample
|
| 604 |
+
|
| 605 |
+
def _get_variance(self, timestep, prev_timestep):
|
| 606 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 607 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 608 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 609 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 610 |
+
|
| 611 |
+
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
|
| 612 |
+
|
| 613 |
+
return variance
|
| 614 |
+
|
| 615 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 616 |
+
def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
|
| 617 |
+
"""
|
| 618 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 619 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 620 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 621 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 622 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 623 |
+
https://huggingface.co/papers/2205.11487
|
| 624 |
+
"""
|
| 625 |
+
dtype = sample.dtype
|
| 626 |
+
batch_size, channels, height, width = sample.shape
|
| 627 |
+
|
| 628 |
+
if dtype not in (torch.float32, torch.float64):
|
| 629 |
+
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 630 |
+
|
| 631 |
+
# Flatten sample for doing quantile calculation along each image
|
| 632 |
+
sample = sample.reshape(batch_size, channels * height * width)
|
| 633 |
+
|
| 634 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 635 |
+
|
| 636 |
+
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 637 |
+
s = torch.clamp(
|
| 638 |
+
s, min=1, max=self.config.sample_max_value
|
| 639 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 640 |
+
|
| 641 |
+
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 642 |
+
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 643 |
+
|
| 644 |
+
sample = sample.reshape(batch_size, channels, height, width)
|
| 645 |
+
sample = sample.to(dtype)
|
| 646 |
+
|
| 647 |
+
return sample
|
| 648 |
+
|
| 649 |
+
def set_timesteps(
|
| 650 |
+
self, strength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
|
| 651 |
+
):
|
| 652 |
+
"""
|
| 653 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 654 |
+
Args:
|
| 655 |
+
num_inference_steps (`int`):
|
| 656 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 657 |
+
"""
|
| 658 |
+
|
| 659 |
+
if num_inference_steps > self.config.num_train_timesteps:
|
| 660 |
+
raise ValueError(
|
| 661 |
+
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
|
| 662 |
+
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
|
| 663 |
+
f" maximal {self.config.num_train_timesteps} timesteps."
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
self.num_inference_steps = num_inference_steps
|
| 667 |
+
|
| 668 |
+
# LCM Timesteps Setting: # Linear Spacing
|
| 669 |
+
c = self.config.num_train_timesteps // lcm_origin_steps
|
| 670 |
+
lcm_origin_timesteps = (
|
| 671 |
+
np.asarray(list(range(1, int(lcm_origin_steps * strength) + 1))) * c - 1
|
| 672 |
+
) # LCM Training Steps Schedule
|
| 673 |
+
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
| 674 |
+
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
| 675 |
+
|
| 676 |
+
self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
|
| 677 |
+
|
| 678 |
+
def get_scalings_for_boundary_condition_discrete(self, t):
|
| 679 |
+
self.sigma_data = 0.5 # Default: 0.5
|
| 680 |
+
|
| 681 |
+
# By dividing 0.1: This is almost a delta function at t=0.
|
| 682 |
+
c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
|
| 683 |
+
c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
|
| 684 |
+
return c_skip, c_out
|
| 685 |
+
|
| 686 |
+
def step(
|
| 687 |
+
self,
|
| 688 |
+
model_output: torch.Tensor,
|
| 689 |
+
timeindex: int,
|
| 690 |
+
timestep: int,
|
| 691 |
+
sample: torch.Tensor,
|
| 692 |
+
eta: float = 0.0,
|
| 693 |
+
use_clipped_model_output: bool = False,
|
| 694 |
+
generator=None,
|
| 695 |
+
variance_noise: Optional[torch.Tensor] = None,
|
| 696 |
+
return_dict: bool = True,
|
| 697 |
+
) -> Union[LCMSchedulerOutput, Tuple]:
|
| 698 |
+
"""
|
| 699 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 700 |
+
process from the learned model outputs (most often the predicted noise).
|
| 701 |
+
Args:
|
| 702 |
+
model_output (`torch.Tensor`):
|
| 703 |
+
The direct output from learned diffusion model.
|
| 704 |
+
timestep (`float`):
|
| 705 |
+
The current discrete timestep in the diffusion chain.
|
| 706 |
+
sample (`torch.Tensor`):
|
| 707 |
+
A current instance of a sample created by the diffusion process.
|
| 708 |
+
eta (`float`):
|
| 709 |
+
The weight of noise for added noise in diffusion step.
|
| 710 |
+
use_clipped_model_output (`bool`, defaults to `False`):
|
| 711 |
+
If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
|
| 712 |
+
because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
|
| 713 |
+
clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
|
| 714 |
+
`use_clipped_model_output` has no effect.
|
| 715 |
+
generator (`torch.Generator`, *optional*):
|
| 716 |
+
A random number generator.
|
| 717 |
+
variance_noise (`torch.Tensor`):
|
| 718 |
+
Alternative to generating noise with `generator` by directly providing the noise for the variance
|
| 719 |
+
itself. Useful for methods such as [`CycleDiffusion`].
|
| 720 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 721 |
+
Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
|
| 722 |
+
Returns:
|
| 723 |
+
[`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
|
| 724 |
+
If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
|
| 725 |
+
tuple is returned where the first element is the sample tensor.
|
| 726 |
+
"""
|
| 727 |
+
if self.num_inference_steps is None:
|
| 728 |
+
raise ValueError(
|
| 729 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
# 1. get previous step value
|
| 733 |
+
prev_timeindex = timeindex + 1
|
| 734 |
+
if prev_timeindex < len(self.timesteps):
|
| 735 |
+
prev_timestep = self.timesteps[prev_timeindex]
|
| 736 |
+
else:
|
| 737 |
+
prev_timestep = timestep
|
| 738 |
+
|
| 739 |
+
# 2. compute alphas, betas
|
| 740 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 741 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 742 |
+
|
| 743 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 744 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 745 |
+
|
| 746 |
+
# 3. Get scalings for boundary conditions
|
| 747 |
+
c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
|
| 748 |
+
|
| 749 |
+
# 4. Different Parameterization:
|
| 750 |
+
parameterization = self.config.prediction_type
|
| 751 |
+
|
| 752 |
+
if parameterization == "epsilon": # noise-prediction
|
| 753 |
+
pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
|
| 754 |
+
|
| 755 |
+
elif parameterization == "sample": # x-prediction
|
| 756 |
+
pred_x0 = model_output
|
| 757 |
+
|
| 758 |
+
elif parameterization == "v_prediction": # v-prediction
|
| 759 |
+
pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
|
| 760 |
+
|
| 761 |
+
# 4. Denoise model output using boundary conditions
|
| 762 |
+
denoised = c_out * pred_x0 + c_skip * sample
|
| 763 |
+
|
| 764 |
+
# 5. Sample z ~ N(0, I), For MultiStep Inference
|
| 765 |
+
# Noise is not used for one-step sampling.
|
| 766 |
+
if len(self.timesteps) > 1:
|
| 767 |
+
noise = torch.randn(model_output.shape).to(model_output.device)
|
| 768 |
+
prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
|
| 769 |
+
else:
|
| 770 |
+
prev_sample = denoised
|
| 771 |
+
|
| 772 |
+
if not return_dict:
|
| 773 |
+
return (prev_sample, denoised)
|
| 774 |
+
|
| 775 |
+
return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
|
| 776 |
+
|
| 777 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
| 778 |
+
def add_noise(
|
| 779 |
+
self,
|
| 780 |
+
original_samples: torch.Tensor,
|
| 781 |
+
noise: torch.Tensor,
|
| 782 |
+
timesteps: torch.IntTensor,
|
| 783 |
+
) -> torch.Tensor:
|
| 784 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
| 785 |
+
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
| 786 |
+
timesteps = timesteps.to(original_samples.device)
|
| 787 |
+
|
| 788 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 789 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 790 |
+
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
| 791 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 792 |
+
|
| 793 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 794 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 795 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
| 796 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 797 |
+
|
| 798 |
+
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
| 799 |
+
return noisy_samples
|
| 800 |
+
|
| 801 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
|
| 802 |
+
def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
|
| 803 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as sample
|
| 804 |
+
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
|
| 805 |
+
timesteps = timesteps.to(sample.device)
|
| 806 |
+
|
| 807 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 808 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 809 |
+
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
| 810 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 811 |
+
|
| 812 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 813 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 814 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
| 815 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 816 |
+
|
| 817 |
+
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
| 818 |
+
return velocity
|
| 819 |
+
|
| 820 |
+
def __len__(self):
|
| 821 |
+
return self.config.num_train_timesteps
|
v0.36.0/latent_consistency_interpolate.py
ADDED
|
@@ -0,0 +1,999 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 7 |
+
|
| 8 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 9 |
+
from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 10 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 11 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 12 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
| 14 |
+
from diffusers.schedulers import LCMScheduler
|
| 15 |
+
from diffusers.utils import (
|
| 16 |
+
USE_PEFT_BACKEND,
|
| 17 |
+
deprecate,
|
| 18 |
+
logging,
|
| 19 |
+
replace_example_docstring,
|
| 20 |
+
scale_lora_layers,
|
| 21 |
+
unscale_lora_layers,
|
| 22 |
+
)
|
| 23 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 27 |
+
|
| 28 |
+
EXAMPLE_DOC_STRING = """
|
| 29 |
+
Examples:
|
| 30 |
+
```py
|
| 31 |
+
>>> import torch
|
| 32 |
+
>>> import numpy as np
|
| 33 |
+
|
| 34 |
+
>>> from diffusers import DiffusionPipeline
|
| 35 |
+
|
| 36 |
+
>>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate")
|
| 37 |
+
>>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
|
| 38 |
+
>>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)
|
| 39 |
+
|
| 40 |
+
>>> prompts = ["A cat", "A dog", "A horse"]
|
| 41 |
+
>>> num_inference_steps = 4
|
| 42 |
+
>>> num_interpolation_steps = 24
|
| 43 |
+
>>> seed = 1337
|
| 44 |
+
|
| 45 |
+
>>> torch.manual_seed(seed)
|
| 46 |
+
>>> np.random.seed(seed)
|
| 47 |
+
|
| 48 |
+
>>> images = pipe(
|
| 49 |
+
prompt=prompts,
|
| 50 |
+
height=512,
|
| 51 |
+
width=512,
|
| 52 |
+
num_inference_steps=num_inference_steps,
|
| 53 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 54 |
+
guidance_scale=8.0,
|
| 55 |
+
embedding_interpolation_type="lerp",
|
| 56 |
+
latent_interpolation_type="slerp",
|
| 57 |
+
process_batch_size=4, # Make it higher or lower based on your GPU memory
|
| 58 |
+
generator=torch.Generator(seed),
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
>>> # Save the images as a video
|
| 62 |
+
>>> import imageio
|
| 63 |
+
>>> from PIL import Image
|
| 64 |
+
|
| 65 |
+
>>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None:
|
| 66 |
+
frames = [np.array(image) for image in images]
|
| 67 |
+
with imageio.get_writer(filename, fps=fps) as video_writer:
|
| 68 |
+
for frame in frames:
|
| 69 |
+
video_writer.append_data(frame)
|
| 70 |
+
|
| 71 |
+
>>> pil_to_video(images, "lcm_interpolate.mp4", fps=24)
|
| 72 |
+
```
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def lerp(
|
| 77 |
+
v0: Union[torch.Tensor, np.ndarray],
|
| 78 |
+
v1: Union[torch.Tensor, np.ndarray],
|
| 79 |
+
t: Union[float, torch.Tensor, np.ndarray],
|
| 80 |
+
) -> Union[torch.Tensor, np.ndarray]:
|
| 81 |
+
"""
|
| 82 |
+
Linearly interpolate between two vectors/tensors.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
|
| 86 |
+
v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
|
| 87 |
+
t: (`float`, `torch.Tensor`, or `np.ndarray`):
|
| 88 |
+
Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
|
| 89 |
+
torch.Tensor, must be one dimensional with values between 0 and 1.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
Union[torch.Tensor, np.ndarray]
|
| 93 |
+
Interpolated vector/tensor between v0 and v1.
|
| 94 |
+
"""
|
| 95 |
+
inputs_are_torch = False
|
| 96 |
+
t_is_float = False
|
| 97 |
+
|
| 98 |
+
if isinstance(v0, torch.Tensor):
|
| 99 |
+
inputs_are_torch = True
|
| 100 |
+
input_device = v0.device
|
| 101 |
+
v0 = v0.cpu().numpy()
|
| 102 |
+
v1 = v1.cpu().numpy()
|
| 103 |
+
|
| 104 |
+
if isinstance(t, torch.Tensor):
|
| 105 |
+
inputs_are_torch = True
|
| 106 |
+
input_device = t.device
|
| 107 |
+
t = t.cpu().numpy()
|
| 108 |
+
elif isinstance(t, float):
|
| 109 |
+
t_is_float = True
|
| 110 |
+
t = np.array([t])
|
| 111 |
+
|
| 112 |
+
t = t[..., None]
|
| 113 |
+
v0 = v0[None, ...]
|
| 114 |
+
v1 = v1[None, ...]
|
| 115 |
+
v2 = (1 - t) * v0 + t * v1
|
| 116 |
+
|
| 117 |
+
if t_is_float and v0.ndim > 1:
|
| 118 |
+
assert v2.shape[0] == 1
|
| 119 |
+
v2 = np.squeeze(v2, axis=0)
|
| 120 |
+
if inputs_are_torch:
|
| 121 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 122 |
+
|
| 123 |
+
return v2
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def slerp(
|
| 127 |
+
v0: Union[torch.Tensor, np.ndarray],
|
| 128 |
+
v1: Union[torch.Tensor, np.ndarray],
|
| 129 |
+
t: Union[float, torch.Tensor, np.ndarray],
|
| 130 |
+
DOT_THRESHOLD=0.9995,
|
| 131 |
+
) -> Union[torch.Tensor, np.ndarray]:
|
| 132 |
+
"""
|
| 133 |
+
Spherical linear interpolation between two vectors/tensors.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
|
| 137 |
+
v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
|
| 138 |
+
t: (`float`, `torch.Tensor`, or `np.ndarray`):
|
| 139 |
+
Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
|
| 140 |
+
torch.Tensor, must be one dimensional with values between 0 and 1.
|
| 141 |
+
DOT_THRESHOLD (`float`, *optional*, default=0.9995):
|
| 142 |
+
Threshold for when to use linear interpolation instead of spherical interpolation.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
`torch.Tensor` or `np.ndarray`:
|
| 146 |
+
Interpolated vector/tensor between v0 and v1.
|
| 147 |
+
"""
|
| 148 |
+
inputs_are_torch = False
|
| 149 |
+
t_is_float = False
|
| 150 |
+
|
| 151 |
+
if isinstance(v0, torch.Tensor):
|
| 152 |
+
inputs_are_torch = True
|
| 153 |
+
input_device = v0.device
|
| 154 |
+
v0 = v0.cpu().numpy()
|
| 155 |
+
v1 = v1.cpu().numpy()
|
| 156 |
+
|
| 157 |
+
if isinstance(t, torch.Tensor):
|
| 158 |
+
inputs_are_torch = True
|
| 159 |
+
input_device = t.device
|
| 160 |
+
t = t.cpu().numpy()
|
| 161 |
+
elif isinstance(t, float):
|
| 162 |
+
t_is_float = True
|
| 163 |
+
t = np.array([t], dtype=v0.dtype)
|
| 164 |
+
|
| 165 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 166 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 167 |
+
# v1 and v2 are close to parallel
|
| 168 |
+
# Use linear interpolation instead
|
| 169 |
+
v2 = lerp(v0, v1, t)
|
| 170 |
+
else:
|
| 171 |
+
theta_0 = np.arccos(dot)
|
| 172 |
+
sin_theta_0 = np.sin(theta_0)
|
| 173 |
+
theta_t = theta_0 * t
|
| 174 |
+
sin_theta_t = np.sin(theta_t)
|
| 175 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 176 |
+
s1 = sin_theta_t / sin_theta_0
|
| 177 |
+
s0 = s0[..., None]
|
| 178 |
+
s1 = s1[..., None]
|
| 179 |
+
v0 = v0[None, ...]
|
| 180 |
+
v1 = v1[None, ...]
|
| 181 |
+
v2 = s0 * v0 + s1 * v1
|
| 182 |
+
|
| 183 |
+
if t_is_float and v0.ndim > 1:
|
| 184 |
+
assert v2.shape[0] == 1
|
| 185 |
+
v2 = np.squeeze(v2, axis=0)
|
| 186 |
+
if inputs_are_torch:
|
| 187 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 188 |
+
|
| 189 |
+
return v2
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class LatentConsistencyModelWalkPipeline(
|
| 193 |
+
DiffusionPipeline,
|
| 194 |
+
StableDiffusionMixin,
|
| 195 |
+
TextualInversionLoaderMixin,
|
| 196 |
+
StableDiffusionLoraLoaderMixin,
|
| 197 |
+
FromSingleFileMixin,
|
| 198 |
+
):
|
| 199 |
+
r"""
|
| 200 |
+
Pipeline for text-to-image generation using a latent consistency model.
|
| 201 |
+
|
| 202 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 203 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 204 |
+
|
| 205 |
+
The pipeline also inherits the following loading methods:
|
| 206 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 207 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 208 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 209 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
vae ([`AutoencoderKL`]):
|
| 213 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 214 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 215 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 216 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 217 |
+
A `CLIPTokenizer` to tokenize text.
|
| 218 |
+
unet ([`UNet2DConditionModel`]):
|
| 219 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 220 |
+
scheduler ([`SchedulerMixin`]):
|
| 221 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only
|
| 222 |
+
supports [`LCMScheduler`].
|
| 223 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 224 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 225 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 226 |
+
about a model's potential harms.
|
| 227 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 228 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 229 |
+
requires_safety_checker (`bool`, *optional*, defaults to `True`):
|
| 230 |
+
Whether the pipeline requires a safety checker component.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 234 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 235 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 236 |
+
_callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"]
|
| 237 |
+
|
| 238 |
+
def __init__(
|
| 239 |
+
self,
|
| 240 |
+
vae: AutoencoderKL,
|
| 241 |
+
text_encoder: CLIPTextModel,
|
| 242 |
+
tokenizer: CLIPTokenizer,
|
| 243 |
+
unet: UNet2DConditionModel,
|
| 244 |
+
scheduler: LCMScheduler,
|
| 245 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 246 |
+
feature_extractor: CLIPImageProcessor,
|
| 247 |
+
requires_safety_checker: bool = True,
|
| 248 |
+
):
|
| 249 |
+
super().__init__()
|
| 250 |
+
|
| 251 |
+
if safety_checker is None and requires_safety_checker:
|
| 252 |
+
logger.warning(
|
| 253 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 254 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 255 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 256 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 257 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 258 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
if safety_checker is not None and feature_extractor is None:
|
| 262 |
+
raise ValueError(
|
| 263 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 264 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
self.register_modules(
|
| 268 |
+
vae=vae,
|
| 269 |
+
text_encoder=text_encoder,
|
| 270 |
+
tokenizer=tokenizer,
|
| 271 |
+
unet=unet,
|
| 272 |
+
scheduler=scheduler,
|
| 273 |
+
safety_checker=safety_checker,
|
| 274 |
+
feature_extractor=feature_extractor,
|
| 275 |
+
)
|
| 276 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 277 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 278 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 279 |
+
|
| 280 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
|
| 281 |
+
def encode_prompt(
|
| 282 |
+
self,
|
| 283 |
+
prompt,
|
| 284 |
+
device,
|
| 285 |
+
num_images_per_prompt,
|
| 286 |
+
do_classifier_free_guidance,
|
| 287 |
+
negative_prompt=None,
|
| 288 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 289 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 290 |
+
lora_scale: Optional[float] = None,
|
| 291 |
+
clip_skip: Optional[int] = None,
|
| 292 |
+
):
|
| 293 |
+
r"""
|
| 294 |
+
Encodes the prompt into text encoder hidden states.
|
| 295 |
+
|
| 296 |
+
Args:
|
| 297 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 298 |
+
prompt to be encoded
|
| 299 |
+
device: (`torch.device`):
|
| 300 |
+
torch device
|
| 301 |
+
num_images_per_prompt (`int`):
|
| 302 |
+
number of images that should be generated per prompt
|
| 303 |
+
do_classifier_free_guidance (`bool`):
|
| 304 |
+
whether to use classifier free guidance or not
|
| 305 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 306 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 307 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 308 |
+
less than `1`).
|
| 309 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 310 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 311 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 312 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 313 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 314 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 315 |
+
argument.
|
| 316 |
+
lora_scale (`float`, *optional*):
|
| 317 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 318 |
+
clip_skip (`int`, *optional*):
|
| 319 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 320 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 321 |
+
"""
|
| 322 |
+
# set lora scale so that monkey patched LoRA
|
| 323 |
+
# function of text encoder can correctly access it
|
| 324 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 325 |
+
self._lora_scale = lora_scale
|
| 326 |
+
|
| 327 |
+
# dynamically adjust the LoRA scale
|
| 328 |
+
if not USE_PEFT_BACKEND:
|
| 329 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 330 |
+
else:
|
| 331 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 332 |
+
|
| 333 |
+
if prompt is not None and isinstance(prompt, str):
|
| 334 |
+
batch_size = 1
|
| 335 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 336 |
+
batch_size = len(prompt)
|
| 337 |
+
else:
|
| 338 |
+
batch_size = prompt_embeds.shape[0]
|
| 339 |
+
|
| 340 |
+
if prompt_embeds is None:
|
| 341 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 342 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 343 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 344 |
+
|
| 345 |
+
text_inputs = self.tokenizer(
|
| 346 |
+
prompt,
|
| 347 |
+
padding="max_length",
|
| 348 |
+
max_length=self.tokenizer.model_max_length,
|
| 349 |
+
truncation=True,
|
| 350 |
+
return_tensors="pt",
|
| 351 |
+
)
|
| 352 |
+
text_input_ids = text_inputs.input_ids
|
| 353 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 354 |
+
|
| 355 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 356 |
+
text_input_ids, untruncated_ids
|
| 357 |
+
):
|
| 358 |
+
removed_text = self.tokenizer.batch_decode(
|
| 359 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 360 |
+
)
|
| 361 |
+
logger.warning(
|
| 362 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 363 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 367 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 368 |
+
else:
|
| 369 |
+
attention_mask = None
|
| 370 |
+
|
| 371 |
+
if clip_skip is None:
|
| 372 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 373 |
+
prompt_embeds = prompt_embeds[0]
|
| 374 |
+
else:
|
| 375 |
+
prompt_embeds = self.text_encoder(
|
| 376 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 377 |
+
)
|
| 378 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 379 |
+
# all the hidden states from the encoder layers. Then index into
|
| 380 |
+
# the tuple to access the hidden states from the desired layer.
|
| 381 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 382 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 383 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 384 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 385 |
+
# layer.
|
| 386 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 387 |
+
|
| 388 |
+
if self.text_encoder is not None:
|
| 389 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 390 |
+
elif self.unet is not None:
|
| 391 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 392 |
+
else:
|
| 393 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 394 |
+
|
| 395 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 396 |
+
|
| 397 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 398 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 399 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 400 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 401 |
+
|
| 402 |
+
# get unconditional embeddings for classifier free guidance
|
| 403 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 404 |
+
uncond_tokens: List[str]
|
| 405 |
+
if negative_prompt is None:
|
| 406 |
+
uncond_tokens = [""] * batch_size
|
| 407 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 408 |
+
raise TypeError(
|
| 409 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 410 |
+
f" {type(prompt)}."
|
| 411 |
+
)
|
| 412 |
+
elif isinstance(negative_prompt, str):
|
| 413 |
+
uncond_tokens = [negative_prompt]
|
| 414 |
+
elif batch_size != len(negative_prompt):
|
| 415 |
+
raise ValueError(
|
| 416 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 417 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 418 |
+
" the batch size of `prompt`."
|
| 419 |
+
)
|
| 420 |
+
else:
|
| 421 |
+
uncond_tokens = negative_prompt
|
| 422 |
+
|
| 423 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 424 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 425 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 426 |
+
|
| 427 |
+
max_length = prompt_embeds.shape[1]
|
| 428 |
+
uncond_input = self.tokenizer(
|
| 429 |
+
uncond_tokens,
|
| 430 |
+
padding="max_length",
|
| 431 |
+
max_length=max_length,
|
| 432 |
+
truncation=True,
|
| 433 |
+
return_tensors="pt",
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 437 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 438 |
+
else:
|
| 439 |
+
attention_mask = None
|
| 440 |
+
|
| 441 |
+
negative_prompt_embeds = self.text_encoder(
|
| 442 |
+
uncond_input.input_ids.to(device),
|
| 443 |
+
attention_mask=attention_mask,
|
| 444 |
+
)
|
| 445 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 446 |
+
|
| 447 |
+
if do_classifier_free_guidance:
|
| 448 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 449 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 450 |
+
|
| 451 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 452 |
+
|
| 453 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 454 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 455 |
+
|
| 456 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 457 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 458 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 459 |
+
|
| 460 |
+
return prompt_embeds, negative_prompt_embeds
|
| 461 |
+
|
| 462 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 463 |
+
def run_safety_checker(self, image, device, dtype):
|
| 464 |
+
if self.safety_checker is None:
|
| 465 |
+
has_nsfw_concept = None
|
| 466 |
+
else:
|
| 467 |
+
if torch.is_tensor(image):
|
| 468 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 469 |
+
else:
|
| 470 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 471 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 472 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 473 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 474 |
+
)
|
| 475 |
+
return image, has_nsfw_concept
|
| 476 |
+
|
| 477 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 478 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 479 |
+
shape = (
|
| 480 |
+
batch_size,
|
| 481 |
+
num_channels_latents,
|
| 482 |
+
int(height) // self.vae_scale_factor,
|
| 483 |
+
int(width) // self.vae_scale_factor,
|
| 484 |
+
)
|
| 485 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 486 |
+
raise ValueError(
|
| 487 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 488 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
if latents is None:
|
| 492 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 493 |
+
else:
|
| 494 |
+
latents = latents.to(device)
|
| 495 |
+
|
| 496 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 497 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 498 |
+
return latents
|
| 499 |
+
|
| 500 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 501 |
+
"""
|
| 502 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
timesteps (`torch.Tensor`):
|
| 506 |
+
generate embedding vectors at these timesteps
|
| 507 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 508 |
+
dimension of the embeddings to generate
|
| 509 |
+
dtype:
|
| 510 |
+
data type of the generated embeddings
|
| 511 |
+
|
| 512 |
+
Returns:
|
| 513 |
+
`torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 514 |
+
"""
|
| 515 |
+
assert len(w.shape) == 1
|
| 516 |
+
w = w * 1000.0
|
| 517 |
+
|
| 518 |
+
half_dim = embedding_dim // 2
|
| 519 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 520 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 521 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 522 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 523 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 524 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 525 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 526 |
+
return emb
|
| 527 |
+
|
| 528 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 529 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 530 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 531 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 532 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 533 |
+
# and should be between [0, 1]
|
| 534 |
+
|
| 535 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 536 |
+
extra_step_kwargs = {}
|
| 537 |
+
if accepts_eta:
|
| 538 |
+
extra_step_kwargs["eta"] = eta
|
| 539 |
+
|
| 540 |
+
# check if the scheduler accepts generator
|
| 541 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 542 |
+
if accepts_generator:
|
| 543 |
+
extra_step_kwargs["generator"] = generator
|
| 544 |
+
return extra_step_kwargs
|
| 545 |
+
|
| 546 |
+
# Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed
|
| 547 |
+
def check_inputs(
|
| 548 |
+
self,
|
| 549 |
+
prompt: Union[str, List[str]],
|
| 550 |
+
height: int,
|
| 551 |
+
width: int,
|
| 552 |
+
callback_steps: int,
|
| 553 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 554 |
+
callback_on_step_end_tensor_inputs=None,
|
| 555 |
+
):
|
| 556 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 557 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 558 |
+
|
| 559 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 560 |
+
raise ValueError(
|
| 561 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 562 |
+
f" {type(callback_steps)}."
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 566 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 567 |
+
):
|
| 568 |
+
raise ValueError(
|
| 569 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
if prompt is not None and prompt_embeds is not None:
|
| 573 |
+
raise ValueError(
|
| 574 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 575 |
+
" only forward one of the two."
|
| 576 |
+
)
|
| 577 |
+
elif prompt is None and prompt_embeds is None:
|
| 578 |
+
raise ValueError(
|
| 579 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 580 |
+
)
|
| 581 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 582 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 583 |
+
|
| 584 |
+
@torch.no_grad()
|
| 585 |
+
def interpolate_embedding(
|
| 586 |
+
self,
|
| 587 |
+
start_embedding: torch.Tensor,
|
| 588 |
+
end_embedding: torch.Tensor,
|
| 589 |
+
num_interpolation_steps: Union[int, List[int]],
|
| 590 |
+
interpolation_type: str,
|
| 591 |
+
) -> torch.Tensor:
|
| 592 |
+
if interpolation_type == "lerp":
|
| 593 |
+
interpolation_fn = lerp
|
| 594 |
+
elif interpolation_type == "slerp":
|
| 595 |
+
interpolation_fn = slerp
|
| 596 |
+
else:
|
| 597 |
+
raise ValueError(
|
| 598 |
+
f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}."
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
embedding = torch.cat([start_embedding, end_embedding])
|
| 602 |
+
steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy()
|
| 603 |
+
steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim)))
|
| 604 |
+
interpolations = []
|
| 605 |
+
|
| 606 |
+
# Interpolate between text embeddings
|
| 607 |
+
# TODO(aryan): Think of a better way of doing this
|
| 608 |
+
# See if it can be done parallelly instead
|
| 609 |
+
for i in range(embedding.shape[0] - 1):
|
| 610 |
+
interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1))
|
| 611 |
+
|
| 612 |
+
interpolations = torch.cat(interpolations)
|
| 613 |
+
return interpolations
|
| 614 |
+
|
| 615 |
+
@torch.no_grad()
|
| 616 |
+
def interpolate_latent(
|
| 617 |
+
self,
|
| 618 |
+
start_latent: torch.Tensor,
|
| 619 |
+
end_latent: torch.Tensor,
|
| 620 |
+
num_interpolation_steps: Union[int, List[int]],
|
| 621 |
+
interpolation_type: str,
|
| 622 |
+
) -> torch.Tensor:
|
| 623 |
+
if interpolation_type == "lerp":
|
| 624 |
+
interpolation_fn = lerp
|
| 625 |
+
elif interpolation_type == "slerp":
|
| 626 |
+
interpolation_fn = slerp
|
| 627 |
+
|
| 628 |
+
latent = torch.cat([start_latent, end_latent])
|
| 629 |
+
steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy()
|
| 630 |
+
steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim)))
|
| 631 |
+
interpolations = []
|
| 632 |
+
|
| 633 |
+
# Interpolate between latents
|
| 634 |
+
# TODO: Think of a better way of doing this
|
| 635 |
+
# See if it can be done parallelly instead
|
| 636 |
+
for i in range(latent.shape[0] - 1):
|
| 637 |
+
interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1))
|
| 638 |
+
|
| 639 |
+
return torch.cat(interpolations)
|
| 640 |
+
|
| 641 |
+
@property
|
| 642 |
+
def guidance_scale(self):
|
| 643 |
+
return self._guidance_scale
|
| 644 |
+
|
| 645 |
+
@property
|
| 646 |
+
def cross_attention_kwargs(self):
|
| 647 |
+
return self._cross_attention_kwargs
|
| 648 |
+
|
| 649 |
+
@property
|
| 650 |
+
def clip_skip(self):
|
| 651 |
+
return self._clip_skip
|
| 652 |
+
|
| 653 |
+
@property
|
| 654 |
+
def num_timesteps(self):
|
| 655 |
+
return self._num_timesteps
|
| 656 |
+
|
| 657 |
+
@torch.no_grad()
|
| 658 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 659 |
+
def __call__(
|
| 660 |
+
self,
|
| 661 |
+
prompt: Union[str, List[str]] = None,
|
| 662 |
+
height: Optional[int] = None,
|
| 663 |
+
width: Optional[int] = None,
|
| 664 |
+
num_inference_steps: int = 4,
|
| 665 |
+
num_interpolation_steps: int = 8,
|
| 666 |
+
original_inference_steps: int = None,
|
| 667 |
+
guidance_scale: float = 8.5,
|
| 668 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 669 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 670 |
+
latents: Optional[torch.Tensor] = None,
|
| 671 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 672 |
+
output_type: Optional[str] = "pil",
|
| 673 |
+
return_dict: bool = True,
|
| 674 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 675 |
+
clip_skip: Optional[int] = None,
|
| 676 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 677 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 678 |
+
embedding_interpolation_type: str = "lerp",
|
| 679 |
+
latent_interpolation_type: str = "slerp",
|
| 680 |
+
process_batch_size: int = 4,
|
| 681 |
+
**kwargs,
|
| 682 |
+
):
|
| 683 |
+
r"""
|
| 684 |
+
The call function to the pipeline for generation.
|
| 685 |
+
|
| 686 |
+
Args:
|
| 687 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 688 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 689 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 690 |
+
The height in pixels of the generated image.
|
| 691 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 692 |
+
The width in pixels of the generated image.
|
| 693 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 694 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 695 |
+
expense of slower inference.
|
| 696 |
+
original_inference_steps (`int`, *optional*):
|
| 697 |
+
The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
|
| 698 |
+
we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
|
| 699 |
+
following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
|
| 700 |
+
scheduler's `original_inference_steps` attribute.
|
| 701 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 702 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 703 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 704 |
+
Note that the original latent consistency models paper uses a different CFG formulation where the
|
| 705 |
+
guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale >
|
| 706 |
+
0`).
|
| 707 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 708 |
+
The number of images to generate per prompt.
|
| 709 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 710 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 711 |
+
generation deterministic.
|
| 712 |
+
latents (`torch.Tensor`, *optional*):
|
| 713 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 714 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 715 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 716 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 717 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 718 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 719 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 720 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 721 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 722 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 723 |
+
plain tuple.
|
| 724 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 725 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 726 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 727 |
+
clip_skip (`int`, *optional*):
|
| 728 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 729 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 730 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 731 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 732 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 733 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 734 |
+
`callback_on_step_end_tensor_inputs`.
|
| 735 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 736 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 737 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 738 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 739 |
+
embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`):
|
| 740 |
+
The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`.
|
| 741 |
+
latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`):
|
| 742 |
+
The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`.
|
| 743 |
+
process_batch_size (`int`, *optional*, defaults to 4):
|
| 744 |
+
The batch size to use for processing the images. This is useful when generating a large number of images
|
| 745 |
+
and you want to avoid running out of memory.
|
| 746 |
+
|
| 747 |
+
Examples:
|
| 748 |
+
|
| 749 |
+
Returns:
|
| 750 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 751 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 752 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 753 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 754 |
+
"not-safe-for-work" (nsfw) content.
|
| 755 |
+
"""
|
| 756 |
+
|
| 757 |
+
callback = kwargs.pop("callback", None)
|
| 758 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 759 |
+
|
| 760 |
+
if callback is not None:
|
| 761 |
+
deprecate(
|
| 762 |
+
"callback",
|
| 763 |
+
"1.0.0",
|
| 764 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 765 |
+
)
|
| 766 |
+
if callback_steps is not None:
|
| 767 |
+
deprecate(
|
| 768 |
+
"callback_steps",
|
| 769 |
+
"1.0.0",
|
| 770 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 771 |
+
)
|
| 772 |
+
|
| 773 |
+
# 0. Default height and width to unet
|
| 774 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 775 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 776 |
+
|
| 777 |
+
# 1. Check inputs. Raise error if not correct
|
| 778 |
+
self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs)
|
| 779 |
+
self._guidance_scale = guidance_scale
|
| 780 |
+
self._clip_skip = clip_skip
|
| 781 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 782 |
+
|
| 783 |
+
# 2. Define call parameters
|
| 784 |
+
if prompt is not None and isinstance(prompt, str):
|
| 785 |
+
batch_size = 1
|
| 786 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 787 |
+
batch_size = len(prompt)
|
| 788 |
+
else:
|
| 789 |
+
batch_size = prompt_embeds.shape[0]
|
| 790 |
+
if batch_size < 2:
|
| 791 |
+
raise ValueError(f"`prompt` must have length of at least 2 but found {batch_size}")
|
| 792 |
+
if num_images_per_prompt != 1:
|
| 793 |
+
raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet")
|
| 794 |
+
if prompt_embeds is not None:
|
| 795 |
+
raise ValueError("`prompt_embeds` must be None since it is not supported yet")
|
| 796 |
+
if latents is not None:
|
| 797 |
+
raise ValueError("`latents` must be None since it is not supported yet")
|
| 798 |
+
|
| 799 |
+
device = self._execution_device
|
| 800 |
+
# do_classifier_free_guidance = guidance_scale > 1.0
|
| 801 |
+
|
| 802 |
+
lora_scale = (
|
| 803 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 804 |
+
)
|
| 805 |
+
|
| 806 |
+
self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps)
|
| 807 |
+
timesteps = self.scheduler.timesteps
|
| 808 |
+
num_channels_latents = self.unet.config.in_channels
|
| 809 |
+
# bs = batch_size * num_images_per_prompt
|
| 810 |
+
|
| 811 |
+
# 3. Encode initial input prompt
|
| 812 |
+
prompt_embeds_1, _ = self.encode_prompt(
|
| 813 |
+
prompt[:1],
|
| 814 |
+
device,
|
| 815 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 816 |
+
do_classifier_free_guidance=False,
|
| 817 |
+
negative_prompt=None,
|
| 818 |
+
prompt_embeds=prompt_embeds,
|
| 819 |
+
negative_prompt_embeds=None,
|
| 820 |
+
lora_scale=lora_scale,
|
| 821 |
+
clip_skip=self.clip_skip,
|
| 822 |
+
)
|
| 823 |
+
|
| 824 |
+
# 4. Prepare initial latent variables
|
| 825 |
+
latents_1 = self.prepare_latents(
|
| 826 |
+
1,
|
| 827 |
+
num_channels_latents,
|
| 828 |
+
height,
|
| 829 |
+
width,
|
| 830 |
+
prompt_embeds_1.dtype,
|
| 831 |
+
device,
|
| 832 |
+
generator,
|
| 833 |
+
latents,
|
| 834 |
+
)
|
| 835 |
+
|
| 836 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None)
|
| 837 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 838 |
+
self._num_timesteps = len(timesteps)
|
| 839 |
+
images = []
|
| 840 |
+
|
| 841 |
+
# 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time
|
| 842 |
+
# otherwise the memory usage ends up being too high.
|
| 843 |
+
with self.progress_bar(total=batch_size - 1) as prompt_progress_bar:
|
| 844 |
+
for i in range(1, batch_size):
|
| 845 |
+
# 6. Encode current prompt
|
| 846 |
+
prompt_embeds_2, _ = self.encode_prompt(
|
| 847 |
+
prompt[i : i + 1],
|
| 848 |
+
device,
|
| 849 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 850 |
+
do_classifier_free_guidance=False,
|
| 851 |
+
negative_prompt=None,
|
| 852 |
+
prompt_embeds=prompt_embeds,
|
| 853 |
+
negative_prompt_embeds=None,
|
| 854 |
+
lora_scale=lora_scale,
|
| 855 |
+
clip_skip=self.clip_skip,
|
| 856 |
+
)
|
| 857 |
+
|
| 858 |
+
# 7. Prepare current latent variables
|
| 859 |
+
latents_2 = self.prepare_latents(
|
| 860 |
+
1,
|
| 861 |
+
num_channels_latents,
|
| 862 |
+
height,
|
| 863 |
+
width,
|
| 864 |
+
prompt_embeds_2.dtype,
|
| 865 |
+
device,
|
| 866 |
+
generator,
|
| 867 |
+
latents,
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
# 8. Interpolate between previous and current prompt embeddings and latents
|
| 871 |
+
inference_embeddings = self.interpolate_embedding(
|
| 872 |
+
start_embedding=prompt_embeds_1,
|
| 873 |
+
end_embedding=prompt_embeds_2,
|
| 874 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 875 |
+
interpolation_type=embedding_interpolation_type,
|
| 876 |
+
)
|
| 877 |
+
inference_latents = self.interpolate_latent(
|
| 878 |
+
start_latent=latents_1,
|
| 879 |
+
end_latent=latents_2,
|
| 880 |
+
num_interpolation_steps=num_interpolation_steps,
|
| 881 |
+
interpolation_type=latent_interpolation_type,
|
| 882 |
+
)
|
| 883 |
+
next_prompt_embeds = inference_embeddings[-1:].detach().clone()
|
| 884 |
+
next_latents = inference_latents[-1:].detach().clone()
|
| 885 |
+
bs = num_interpolation_steps
|
| 886 |
+
|
| 887 |
+
# 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size
|
| 888 |
+
# of the inference. This is useful for reducing memory usage and can be configured based on the
|
| 889 |
+
# available GPU memory.
|
| 890 |
+
with self.progress_bar(
|
| 891 |
+
total=(bs + process_batch_size - 1) // process_batch_size
|
| 892 |
+
) as batch_progress_bar:
|
| 893 |
+
for batch_index in range(0, bs, process_batch_size):
|
| 894 |
+
batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size]
|
| 895 |
+
batch_inference_embeddings = inference_embeddings[
|
| 896 |
+
batch_index : batch_index + process_batch_size
|
| 897 |
+
]
|
| 898 |
+
|
| 899 |
+
self.scheduler.set_timesteps(
|
| 900 |
+
num_inference_steps, device, original_inference_steps=original_inference_steps
|
| 901 |
+
)
|
| 902 |
+
timesteps = self.scheduler.timesteps
|
| 903 |
+
|
| 904 |
+
current_bs = batch_inference_embeddings.shape[0]
|
| 905 |
+
w = torch.tensor(self.guidance_scale - 1).repeat(current_bs)
|
| 906 |
+
w_embedding = self.get_guidance_scale_embedding(
|
| 907 |
+
w, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 908 |
+
).to(device=device, dtype=latents_1.dtype)
|
| 909 |
+
|
| 910 |
+
# 10. Perform inference for current batch
|
| 911 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 912 |
+
for index, t in enumerate(timesteps):
|
| 913 |
+
batch_inference_latents = batch_inference_latents.to(batch_inference_embeddings.dtype)
|
| 914 |
+
|
| 915 |
+
# model prediction (v-prediction, eps, x)
|
| 916 |
+
model_pred = self.unet(
|
| 917 |
+
batch_inference_latents,
|
| 918 |
+
t,
|
| 919 |
+
timestep_cond=w_embedding,
|
| 920 |
+
encoder_hidden_states=batch_inference_embeddings,
|
| 921 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 922 |
+
return_dict=False,
|
| 923 |
+
)[0]
|
| 924 |
+
|
| 925 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 926 |
+
batch_inference_latents, denoised = self.scheduler.step(
|
| 927 |
+
model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False
|
| 928 |
+
)
|
| 929 |
+
if callback_on_step_end is not None:
|
| 930 |
+
callback_kwargs = {}
|
| 931 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 932 |
+
callback_kwargs[k] = locals()[k]
|
| 933 |
+
callback_outputs = callback_on_step_end(self, index, t, callback_kwargs)
|
| 934 |
+
|
| 935 |
+
batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents)
|
| 936 |
+
batch_inference_embeddings = callback_outputs.pop(
|
| 937 |
+
"prompt_embeds", batch_inference_embeddings
|
| 938 |
+
)
|
| 939 |
+
w_embedding = callback_outputs.pop("w_embedding", w_embedding)
|
| 940 |
+
denoised = callback_outputs.pop("denoised", denoised)
|
| 941 |
+
|
| 942 |
+
# call the callback, if provided
|
| 943 |
+
if index == len(timesteps) - 1 or (
|
| 944 |
+
(index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0
|
| 945 |
+
):
|
| 946 |
+
progress_bar.update()
|
| 947 |
+
if callback is not None and index % callback_steps == 0:
|
| 948 |
+
step_idx = index // getattr(self.scheduler, "order", 1)
|
| 949 |
+
callback(step_idx, t, batch_inference_latents)
|
| 950 |
+
|
| 951 |
+
denoised = denoised.to(batch_inference_embeddings.dtype)
|
| 952 |
+
|
| 953 |
+
# Note: This is not supported because you would get black images in your latent walk if
|
| 954 |
+
# NSFW concept is detected
|
| 955 |
+
# if not output_type == "latent":
|
| 956 |
+
# image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 957 |
+
# image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype)
|
| 958 |
+
# else:
|
| 959 |
+
# image = denoised
|
| 960 |
+
# has_nsfw_concept = None
|
| 961 |
+
|
| 962 |
+
# if has_nsfw_concept is None:
|
| 963 |
+
# do_denormalize = [True] * image.shape[0]
|
| 964 |
+
# else:
|
| 965 |
+
# do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 966 |
+
|
| 967 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 968 |
+
do_denormalize = [True] * image.shape[0]
|
| 969 |
+
has_nsfw_concept = None
|
| 970 |
+
|
| 971 |
+
image = self.image_processor.postprocess(
|
| 972 |
+
image, output_type=output_type, do_denormalize=do_denormalize
|
| 973 |
+
)
|
| 974 |
+
images.append(image)
|
| 975 |
+
|
| 976 |
+
batch_progress_bar.update()
|
| 977 |
+
|
| 978 |
+
prompt_embeds_1 = next_prompt_embeds
|
| 979 |
+
latents_1 = next_latents
|
| 980 |
+
|
| 981 |
+
prompt_progress_bar.update()
|
| 982 |
+
|
| 983 |
+
# 11. Determine what should be returned
|
| 984 |
+
if output_type == "pil":
|
| 985 |
+
images = [image for image_list in images for image in image_list]
|
| 986 |
+
elif output_type == "np":
|
| 987 |
+
images = np.concatenate(images)
|
| 988 |
+
elif output_type == "pt":
|
| 989 |
+
images = torch.cat(images)
|
| 990 |
+
else:
|
| 991 |
+
raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.")
|
| 992 |
+
|
| 993 |
+
# Offload all models
|
| 994 |
+
self.maybe_free_model_hooks()
|
| 995 |
+
|
| 996 |
+
if not return_dict:
|
| 997 |
+
return (images, has_nsfw_concept)
|
| 998 |
+
|
| 999 |
+
return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/latent_consistency_txt2img.py
ADDED
|
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
|
| 16 |
+
# and https://github.com/hojonathanho/diffusion
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 25 |
+
|
| 26 |
+
from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
|
| 27 |
+
from diffusers.configuration_utils import register_to_config
|
| 28 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 29 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 30 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 31 |
+
from diffusers.utils import BaseOutput
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LatentConsistencyModelPipeline(DiffusionPipeline):
|
| 38 |
+
_optional_components = ["scheduler"]
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
vae: AutoencoderKL,
|
| 43 |
+
text_encoder: CLIPTextModel,
|
| 44 |
+
tokenizer: CLIPTokenizer,
|
| 45 |
+
unet: UNet2DConditionModel,
|
| 46 |
+
scheduler: "LCMScheduler",
|
| 47 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 48 |
+
feature_extractor: CLIPImageProcessor,
|
| 49 |
+
requires_safety_checker: bool = True,
|
| 50 |
+
):
|
| 51 |
+
super().__init__()
|
| 52 |
+
|
| 53 |
+
scheduler = (
|
| 54 |
+
scheduler
|
| 55 |
+
if scheduler is not None
|
| 56 |
+
else LCMScheduler(
|
| 57 |
+
beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
|
| 58 |
+
)
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
self.register_modules(
|
| 62 |
+
vae=vae,
|
| 63 |
+
text_encoder=text_encoder,
|
| 64 |
+
tokenizer=tokenizer,
|
| 65 |
+
unet=unet,
|
| 66 |
+
scheduler=scheduler,
|
| 67 |
+
safety_checker=safety_checker,
|
| 68 |
+
feature_extractor=feature_extractor,
|
| 69 |
+
)
|
| 70 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 71 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 72 |
+
|
| 73 |
+
def _encode_prompt(
|
| 74 |
+
self,
|
| 75 |
+
prompt,
|
| 76 |
+
device,
|
| 77 |
+
num_images_per_prompt,
|
| 78 |
+
prompt_embeds: None,
|
| 79 |
+
):
|
| 80 |
+
r"""
|
| 81 |
+
Encodes the prompt into text encoder hidden states.
|
| 82 |
+
Args:
|
| 83 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 84 |
+
prompt to be encoded
|
| 85 |
+
device: (`torch.device`):
|
| 86 |
+
torch device
|
| 87 |
+
num_images_per_prompt (`int`):
|
| 88 |
+
number of images that should be generated per prompt
|
| 89 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 90 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 91 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
if prompt is not None and isinstance(prompt, str):
|
| 95 |
+
pass
|
| 96 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 97 |
+
len(prompt)
|
| 98 |
+
else:
|
| 99 |
+
prompt_embeds.shape[0]
|
| 100 |
+
|
| 101 |
+
if prompt_embeds is None:
|
| 102 |
+
text_inputs = self.tokenizer(
|
| 103 |
+
prompt,
|
| 104 |
+
padding="max_length",
|
| 105 |
+
max_length=self.tokenizer.model_max_length,
|
| 106 |
+
truncation=True,
|
| 107 |
+
return_tensors="pt",
|
| 108 |
+
)
|
| 109 |
+
text_input_ids = text_inputs.input_ids
|
| 110 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 111 |
+
|
| 112 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 113 |
+
text_input_ids, untruncated_ids
|
| 114 |
+
):
|
| 115 |
+
removed_text = self.tokenizer.batch_decode(
|
| 116 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 117 |
+
)
|
| 118 |
+
logger.warning(
|
| 119 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 120 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 124 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 125 |
+
else:
|
| 126 |
+
attention_mask = None
|
| 127 |
+
|
| 128 |
+
prompt_embeds = self.text_encoder(
|
| 129 |
+
text_input_ids.to(device),
|
| 130 |
+
attention_mask=attention_mask,
|
| 131 |
+
)
|
| 132 |
+
prompt_embeds = prompt_embeds[0]
|
| 133 |
+
|
| 134 |
+
if self.text_encoder is not None:
|
| 135 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 136 |
+
elif self.unet is not None:
|
| 137 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 138 |
+
else:
|
| 139 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 140 |
+
|
| 141 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 142 |
+
|
| 143 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 144 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 145 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 146 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 147 |
+
|
| 148 |
+
# Don't need to get uncond prompt embedding because of LCM Guided Distillation
|
| 149 |
+
return prompt_embeds
|
| 150 |
+
|
| 151 |
+
def run_safety_checker(self, image, device, dtype):
|
| 152 |
+
if self.safety_checker is None:
|
| 153 |
+
has_nsfw_concept = None
|
| 154 |
+
else:
|
| 155 |
+
if torch.is_tensor(image):
|
| 156 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 157 |
+
else:
|
| 158 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 159 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 160 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 161 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 162 |
+
)
|
| 163 |
+
return image, has_nsfw_concept
|
| 164 |
+
|
| 165 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
|
| 166 |
+
shape = (
|
| 167 |
+
batch_size,
|
| 168 |
+
num_channels_latents,
|
| 169 |
+
int(height) // self.vae_scale_factor,
|
| 170 |
+
int(width) // self.vae_scale_factor,
|
| 171 |
+
)
|
| 172 |
+
if latents is None:
|
| 173 |
+
latents = torch.randn(shape, dtype=dtype).to(device)
|
| 174 |
+
else:
|
| 175 |
+
latents = latents.to(device)
|
| 176 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 177 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 178 |
+
return latents
|
| 179 |
+
|
| 180 |
+
def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 181 |
+
"""
|
| 182 |
+
see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 183 |
+
Args:
|
| 184 |
+
timesteps: torch.Tensor: generate embedding vectors at these timesteps
|
| 185 |
+
embedding_dim: int: dimension of the embeddings to generate
|
| 186 |
+
dtype: data type of the generated embeddings
|
| 187 |
+
Returns:
|
| 188 |
+
embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 189 |
+
"""
|
| 190 |
+
assert len(w.shape) == 1
|
| 191 |
+
w = w * 1000.0
|
| 192 |
+
|
| 193 |
+
half_dim = embedding_dim // 2
|
| 194 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 195 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 196 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 197 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 198 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 199 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 200 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 201 |
+
return emb
|
| 202 |
+
|
| 203 |
+
@torch.no_grad()
|
| 204 |
+
def __call__(
|
| 205 |
+
self,
|
| 206 |
+
prompt: Union[str, List[str]] = None,
|
| 207 |
+
height: Optional[int] = 768,
|
| 208 |
+
width: Optional[int] = 768,
|
| 209 |
+
guidance_scale: float = 7.5,
|
| 210 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 211 |
+
latents: Optional[torch.Tensor] = None,
|
| 212 |
+
num_inference_steps: int = 4,
|
| 213 |
+
lcm_origin_steps: int = 50,
|
| 214 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 215 |
+
output_type: Optional[str] = "pil",
|
| 216 |
+
return_dict: bool = True,
|
| 217 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 218 |
+
):
|
| 219 |
+
# 0. Default height and width to unet
|
| 220 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 221 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 222 |
+
|
| 223 |
+
# 2. Define call parameters
|
| 224 |
+
if prompt is not None and isinstance(prompt, str):
|
| 225 |
+
batch_size = 1
|
| 226 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 227 |
+
batch_size = len(prompt)
|
| 228 |
+
else:
|
| 229 |
+
batch_size = prompt_embeds.shape[0]
|
| 230 |
+
|
| 231 |
+
device = self._execution_device
|
| 232 |
+
# do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
|
| 233 |
+
|
| 234 |
+
# 3. Encode input prompt
|
| 235 |
+
prompt_embeds = self._encode_prompt(
|
| 236 |
+
prompt,
|
| 237 |
+
device,
|
| 238 |
+
num_images_per_prompt,
|
| 239 |
+
prompt_embeds=prompt_embeds,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
# 4. Prepare timesteps
|
| 243 |
+
self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
|
| 244 |
+
timesteps = self.scheduler.timesteps
|
| 245 |
+
|
| 246 |
+
# 5. Prepare latent variable
|
| 247 |
+
num_channels_latents = self.unet.config.in_channels
|
| 248 |
+
latents = self.prepare_latents(
|
| 249 |
+
batch_size * num_images_per_prompt,
|
| 250 |
+
num_channels_latents,
|
| 251 |
+
height,
|
| 252 |
+
width,
|
| 253 |
+
prompt_embeds.dtype,
|
| 254 |
+
device,
|
| 255 |
+
latents,
|
| 256 |
+
)
|
| 257 |
+
bs = batch_size * num_images_per_prompt
|
| 258 |
+
|
| 259 |
+
# 6. Get Guidance Scale Embedding
|
| 260 |
+
w = torch.tensor(guidance_scale).repeat(bs)
|
| 261 |
+
w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
|
| 262 |
+
|
| 263 |
+
# 7. LCM MultiStep Sampling Loop:
|
| 264 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 265 |
+
for i, t in enumerate(timesteps):
|
| 266 |
+
ts = torch.full((bs,), t, device=device, dtype=torch.long)
|
| 267 |
+
latents = latents.to(prompt_embeds.dtype)
|
| 268 |
+
|
| 269 |
+
# model prediction (v-prediction, eps, x)
|
| 270 |
+
model_pred = self.unet(
|
| 271 |
+
latents,
|
| 272 |
+
ts,
|
| 273 |
+
timestep_cond=w_embedding,
|
| 274 |
+
encoder_hidden_states=prompt_embeds,
|
| 275 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 276 |
+
return_dict=False,
|
| 277 |
+
)[0]
|
| 278 |
+
|
| 279 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 280 |
+
latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
|
| 281 |
+
|
| 282 |
+
# # call the callback, if provided
|
| 283 |
+
# if i == len(timesteps) - 1:
|
| 284 |
+
progress_bar.update()
|
| 285 |
+
|
| 286 |
+
denoised = denoised.to(prompt_embeds.dtype)
|
| 287 |
+
if not output_type == "latent":
|
| 288 |
+
image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 289 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 290 |
+
else:
|
| 291 |
+
image = denoised
|
| 292 |
+
has_nsfw_concept = None
|
| 293 |
+
|
| 294 |
+
if has_nsfw_concept is None:
|
| 295 |
+
do_denormalize = [True] * image.shape[0]
|
| 296 |
+
else:
|
| 297 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 298 |
+
|
| 299 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 300 |
+
|
| 301 |
+
if not return_dict:
|
| 302 |
+
return (image, has_nsfw_concept)
|
| 303 |
+
|
| 304 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@dataclass
|
| 308 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
|
| 309 |
+
class LCMSchedulerOutput(BaseOutput):
|
| 310 |
+
"""
|
| 311 |
+
Output class for the scheduler's `step` function output.
|
| 312 |
+
Args:
|
| 313 |
+
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 314 |
+
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
|
| 315 |
+
denoising loop.
|
| 316 |
+
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
|
| 317 |
+
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
|
| 318 |
+
`pred_original_sample` can be used to preview progress or for guidance.
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
prev_sample: torch.Tensor
|
| 322 |
+
denoised: Optional[torch.Tensor] = None
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
|
| 326 |
+
def betas_for_alpha_bar(
|
| 327 |
+
num_diffusion_timesteps,
|
| 328 |
+
max_beta=0.999,
|
| 329 |
+
alpha_transform_type="cosine",
|
| 330 |
+
):
|
| 331 |
+
"""
|
| 332 |
+
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
|
| 333 |
+
(1-beta) over time from t = [0,1].
|
| 334 |
+
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
|
| 335 |
+
to that part of the diffusion process.
|
| 336 |
+
Args:
|
| 337 |
+
num_diffusion_timesteps (`int`): the number of betas to produce.
|
| 338 |
+
max_beta (`float`): the maximum beta to use; use values lower than 1 to
|
| 339 |
+
prevent singularities.
|
| 340 |
+
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
|
| 341 |
+
Choose from `cosine` or `exp`
|
| 342 |
+
Returns:
|
| 343 |
+
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
|
| 344 |
+
"""
|
| 345 |
+
if alpha_transform_type == "cosine":
|
| 346 |
+
|
| 347 |
+
def alpha_bar_fn(t):
|
| 348 |
+
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
|
| 349 |
+
|
| 350 |
+
elif alpha_transform_type == "exp":
|
| 351 |
+
|
| 352 |
+
def alpha_bar_fn(t):
|
| 353 |
+
return math.exp(t * -12.0)
|
| 354 |
+
|
| 355 |
+
else:
|
| 356 |
+
raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
|
| 357 |
+
|
| 358 |
+
betas = []
|
| 359 |
+
for i in range(num_diffusion_timesteps):
|
| 360 |
+
t1 = i / num_diffusion_timesteps
|
| 361 |
+
t2 = (i + 1) / num_diffusion_timesteps
|
| 362 |
+
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
|
| 363 |
+
return torch.tensor(betas, dtype=torch.float32)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def rescale_zero_terminal_snr(betas):
|
| 367 |
+
"""
|
| 368 |
+
Rescales betas to have zero terminal SNR Based on https://huggingface.co/papers/2305.08891 (Algorithm 1)
|
| 369 |
+
Args:
|
| 370 |
+
betas (`torch.Tensor`):
|
| 371 |
+
the betas that the scheduler is being initialized with.
|
| 372 |
+
Returns:
|
| 373 |
+
`torch.Tensor`: rescaled betas with zero terminal SNR
|
| 374 |
+
"""
|
| 375 |
+
# Convert betas to alphas_bar_sqrt
|
| 376 |
+
alphas = 1.0 - betas
|
| 377 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
| 378 |
+
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
| 379 |
+
|
| 380 |
+
# Store old values.
|
| 381 |
+
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
| 382 |
+
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
| 383 |
+
|
| 384 |
+
# Shift so the last timestep is zero.
|
| 385 |
+
alphas_bar_sqrt -= alphas_bar_sqrt_T
|
| 386 |
+
|
| 387 |
+
# Scale so the first timestep is back to the old value.
|
| 388 |
+
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
| 389 |
+
|
| 390 |
+
# Convert alphas_bar_sqrt to betas
|
| 391 |
+
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
| 392 |
+
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
|
| 393 |
+
alphas = torch.cat([alphas_bar[0:1], alphas])
|
| 394 |
+
betas = 1 - alphas
|
| 395 |
+
|
| 396 |
+
return betas
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class LCMScheduler(SchedulerMixin, ConfigMixin):
|
| 400 |
+
"""
|
| 401 |
+
`LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
|
| 402 |
+
non-Markovian guidance.
|
| 403 |
+
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
|
| 404 |
+
methods the library implements for all schedulers such as loading and saving.
|
| 405 |
+
Args:
|
| 406 |
+
num_train_timesteps (`int`, defaults to 1000):
|
| 407 |
+
The number of diffusion steps to train the model.
|
| 408 |
+
beta_start (`float`, defaults to 0.0001):
|
| 409 |
+
The starting `beta` value of inference.
|
| 410 |
+
beta_end (`float`, defaults to 0.02):
|
| 411 |
+
The final `beta` value.
|
| 412 |
+
beta_schedule (`str`, defaults to `"linear"`):
|
| 413 |
+
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
|
| 414 |
+
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
|
| 415 |
+
trained_betas (`np.ndarray`, *optional*):
|
| 416 |
+
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
|
| 417 |
+
clip_sample (`bool`, defaults to `True`):
|
| 418 |
+
Clip the predicted sample for numerical stability.
|
| 419 |
+
clip_sample_range (`float`, defaults to 1.0):
|
| 420 |
+
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
|
| 421 |
+
set_alpha_to_one (`bool`, defaults to `True`):
|
| 422 |
+
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
|
| 423 |
+
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
|
| 424 |
+
otherwise it uses the alpha value at step 0.
|
| 425 |
+
steps_offset (`int`, defaults to 0):
|
| 426 |
+
An offset added to the inference steps, as required by some model families.
|
| 427 |
+
prediction_type (`str`, defaults to `epsilon`, *optional*):
|
| 428 |
+
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
|
| 429 |
+
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
|
| 430 |
+
Video](https://imagen.research.google/video/paper.pdf) paper).
|
| 431 |
+
thresholding (`bool`, defaults to `False`):
|
| 432 |
+
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
|
| 433 |
+
as Stable Diffusion.
|
| 434 |
+
dynamic_thresholding_ratio (`float`, defaults to 0.995):
|
| 435 |
+
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
|
| 436 |
+
sample_max_value (`float`, defaults to 1.0):
|
| 437 |
+
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
|
| 438 |
+
timestep_spacing (`str`, defaults to `"leading"`):
|
| 439 |
+
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
|
| 440 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
|
| 441 |
+
rescale_betas_zero_snr (`bool`, defaults to `False`):
|
| 442 |
+
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
|
| 443 |
+
dark samples instead of limiting it to samples with medium brightness. Loosely related to
|
| 444 |
+
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
|
| 445 |
+
"""
|
| 446 |
+
|
| 447 |
+
# _compatibles = [e.name for e in KarrasDiffusionSchedulers]
|
| 448 |
+
order = 1
|
| 449 |
+
|
| 450 |
+
@register_to_config
|
| 451 |
+
def __init__(
|
| 452 |
+
self,
|
| 453 |
+
num_train_timesteps: int = 1000,
|
| 454 |
+
beta_start: float = 0.0001,
|
| 455 |
+
beta_end: float = 0.02,
|
| 456 |
+
beta_schedule: str = "linear",
|
| 457 |
+
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
|
| 458 |
+
clip_sample: bool = True,
|
| 459 |
+
set_alpha_to_one: bool = True,
|
| 460 |
+
steps_offset: int = 0,
|
| 461 |
+
prediction_type: str = "epsilon",
|
| 462 |
+
thresholding: bool = False,
|
| 463 |
+
dynamic_thresholding_ratio: float = 0.995,
|
| 464 |
+
clip_sample_range: float = 1.0,
|
| 465 |
+
sample_max_value: float = 1.0,
|
| 466 |
+
timestep_spacing: str = "leading",
|
| 467 |
+
rescale_betas_zero_snr: bool = False,
|
| 468 |
+
):
|
| 469 |
+
if trained_betas is not None:
|
| 470 |
+
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
|
| 471 |
+
elif beta_schedule == "linear":
|
| 472 |
+
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
|
| 473 |
+
elif beta_schedule == "scaled_linear":
|
| 474 |
+
# this schedule is very specific to the latent diffusion model.
|
| 475 |
+
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
|
| 476 |
+
elif beta_schedule == "squaredcos_cap_v2":
|
| 477 |
+
# Glide cosine schedule
|
| 478 |
+
self.betas = betas_for_alpha_bar(num_train_timesteps)
|
| 479 |
+
else:
|
| 480 |
+
raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
|
| 481 |
+
|
| 482 |
+
# Rescale for zero SNR
|
| 483 |
+
if rescale_betas_zero_snr:
|
| 484 |
+
self.betas = rescale_zero_terminal_snr(self.betas)
|
| 485 |
+
|
| 486 |
+
self.alphas = 1.0 - self.betas
|
| 487 |
+
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
|
| 488 |
+
|
| 489 |
+
# At every step in ddim, we are looking into the previous alphas_cumprod
|
| 490 |
+
# For the final step, there is no previous alphas_cumprod because we are already at 0
|
| 491 |
+
# `set_alpha_to_one` decides whether we set this parameter simply to one or
|
| 492 |
+
# whether we use the final alpha of the "non-previous" one.
|
| 493 |
+
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
|
| 494 |
+
|
| 495 |
+
# standard deviation of the initial noise distribution
|
| 496 |
+
self.init_noise_sigma = 1.0
|
| 497 |
+
|
| 498 |
+
# setable values
|
| 499 |
+
self.num_inference_steps = None
|
| 500 |
+
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
|
| 501 |
+
|
| 502 |
+
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
|
| 503 |
+
"""
|
| 504 |
+
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
|
| 505 |
+
current timestep.
|
| 506 |
+
Args:
|
| 507 |
+
sample (`torch.Tensor`):
|
| 508 |
+
The input sample.
|
| 509 |
+
timestep (`int`, *optional*):
|
| 510 |
+
The current timestep in the diffusion chain.
|
| 511 |
+
Returns:
|
| 512 |
+
`torch.Tensor`:
|
| 513 |
+
A scaled input sample.
|
| 514 |
+
"""
|
| 515 |
+
return sample
|
| 516 |
+
|
| 517 |
+
def _get_variance(self, timestep, prev_timestep):
|
| 518 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 519 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 520 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 521 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 522 |
+
|
| 523 |
+
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
|
| 524 |
+
|
| 525 |
+
return variance
|
| 526 |
+
|
| 527 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
|
| 528 |
+
def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
|
| 529 |
+
"""
|
| 530 |
+
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
|
| 531 |
+
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
|
| 532 |
+
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
|
| 533 |
+
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
|
| 534 |
+
photorealism as well as better image-text alignment, especially when using very large guidance weights."
|
| 535 |
+
https://huggingface.co/papers/2205.11487
|
| 536 |
+
"""
|
| 537 |
+
dtype = sample.dtype
|
| 538 |
+
batch_size, channels, height, width = sample.shape
|
| 539 |
+
|
| 540 |
+
if dtype not in (torch.float32, torch.float64):
|
| 541 |
+
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
|
| 542 |
+
|
| 543 |
+
# Flatten sample for doing quantile calculation along each image
|
| 544 |
+
sample = sample.reshape(batch_size, channels * height * width)
|
| 545 |
+
|
| 546 |
+
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
|
| 547 |
+
|
| 548 |
+
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
|
| 549 |
+
s = torch.clamp(
|
| 550 |
+
s, min=1, max=self.config.sample_max_value
|
| 551 |
+
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
|
| 552 |
+
|
| 553 |
+
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
|
| 554 |
+
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
|
| 555 |
+
|
| 556 |
+
sample = sample.reshape(batch_size, channels, height, width)
|
| 557 |
+
sample = sample.to(dtype)
|
| 558 |
+
|
| 559 |
+
return sample
|
| 560 |
+
|
| 561 |
+
def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
|
| 562 |
+
"""
|
| 563 |
+
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
|
| 564 |
+
Args:
|
| 565 |
+
num_inference_steps (`int`):
|
| 566 |
+
The number of diffusion steps used when generating samples with a pre-trained model.
|
| 567 |
+
"""
|
| 568 |
+
|
| 569 |
+
if num_inference_steps > self.config.num_train_timesteps:
|
| 570 |
+
raise ValueError(
|
| 571 |
+
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
|
| 572 |
+
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
|
| 573 |
+
f" maximal {self.config.num_train_timesteps} timesteps."
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
self.num_inference_steps = num_inference_steps
|
| 577 |
+
|
| 578 |
+
# LCM Timesteps Setting: # Linear Spacing
|
| 579 |
+
c = self.config.num_train_timesteps // lcm_origin_steps
|
| 580 |
+
lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
|
| 581 |
+
skipping_step = len(lcm_origin_timesteps) // num_inference_steps
|
| 582 |
+
timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
|
| 583 |
+
|
| 584 |
+
self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
|
| 585 |
+
|
| 586 |
+
def get_scalings_for_boundary_condition_discrete(self, t):
|
| 587 |
+
self.sigma_data = 0.5 # Default: 0.5
|
| 588 |
+
|
| 589 |
+
# By dividing 0.1: This is almost a delta function at t=0.
|
| 590 |
+
c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
|
| 591 |
+
c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
|
| 592 |
+
return c_skip, c_out
|
| 593 |
+
|
| 594 |
+
def step(
|
| 595 |
+
self,
|
| 596 |
+
model_output: torch.Tensor,
|
| 597 |
+
timeindex: int,
|
| 598 |
+
timestep: int,
|
| 599 |
+
sample: torch.Tensor,
|
| 600 |
+
eta: float = 0.0,
|
| 601 |
+
use_clipped_model_output: bool = False,
|
| 602 |
+
generator=None,
|
| 603 |
+
variance_noise: Optional[torch.Tensor] = None,
|
| 604 |
+
return_dict: bool = True,
|
| 605 |
+
) -> Union[LCMSchedulerOutput, Tuple]:
|
| 606 |
+
"""
|
| 607 |
+
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
|
| 608 |
+
process from the learned model outputs (most often the predicted noise).
|
| 609 |
+
Args:
|
| 610 |
+
model_output (`torch.Tensor`):
|
| 611 |
+
The direct output from learned diffusion model.
|
| 612 |
+
timestep (`float`):
|
| 613 |
+
The current discrete timestep in the diffusion chain.
|
| 614 |
+
sample (`torch.Tensor`):
|
| 615 |
+
A current instance of a sample created by the diffusion process.
|
| 616 |
+
eta (`float`):
|
| 617 |
+
The weight of noise for added noise in diffusion step.
|
| 618 |
+
use_clipped_model_output (`bool`, defaults to `False`):
|
| 619 |
+
If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
|
| 620 |
+
because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
|
| 621 |
+
clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
|
| 622 |
+
`use_clipped_model_output` has no effect.
|
| 623 |
+
generator (`torch.Generator`, *optional*):
|
| 624 |
+
A random number generator.
|
| 625 |
+
variance_noise (`torch.Tensor`):
|
| 626 |
+
Alternative to generating noise with `generator` by directly providing the noise for the variance
|
| 627 |
+
itself. Useful for methods such as [`CycleDiffusion`].
|
| 628 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 629 |
+
Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
|
| 630 |
+
Returns:
|
| 631 |
+
[`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
|
| 632 |
+
If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
|
| 633 |
+
tuple is returned where the first element is the sample tensor.
|
| 634 |
+
"""
|
| 635 |
+
if self.num_inference_steps is None:
|
| 636 |
+
raise ValueError(
|
| 637 |
+
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
|
| 638 |
+
)
|
| 639 |
+
|
| 640 |
+
# 1. get previous step value
|
| 641 |
+
prev_timeindex = timeindex + 1
|
| 642 |
+
if prev_timeindex < len(self.timesteps):
|
| 643 |
+
prev_timestep = self.timesteps[prev_timeindex]
|
| 644 |
+
else:
|
| 645 |
+
prev_timestep = timestep
|
| 646 |
+
|
| 647 |
+
# 2. compute alphas, betas
|
| 648 |
+
alpha_prod_t = self.alphas_cumprod[timestep]
|
| 649 |
+
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
|
| 650 |
+
|
| 651 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 652 |
+
beta_prod_t_prev = 1 - alpha_prod_t_prev
|
| 653 |
+
|
| 654 |
+
# 3. Get scalings for boundary conditions
|
| 655 |
+
c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
|
| 656 |
+
|
| 657 |
+
# 4. Different Parameterization:
|
| 658 |
+
parameterization = self.config.prediction_type
|
| 659 |
+
|
| 660 |
+
if parameterization == "epsilon": # noise-prediction
|
| 661 |
+
pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
|
| 662 |
+
|
| 663 |
+
elif parameterization == "sample": # x-prediction
|
| 664 |
+
pred_x0 = model_output
|
| 665 |
+
|
| 666 |
+
elif parameterization == "v_prediction": # v-prediction
|
| 667 |
+
pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
|
| 668 |
+
|
| 669 |
+
# 4. Denoise model output using boundary conditions
|
| 670 |
+
denoised = c_out * pred_x0 + c_skip * sample
|
| 671 |
+
|
| 672 |
+
# 5. Sample z ~ N(0, I), For MultiStep Inference
|
| 673 |
+
# Noise is not used for one-step sampling.
|
| 674 |
+
if len(self.timesteps) > 1:
|
| 675 |
+
noise = torch.randn(model_output.shape).to(model_output.device)
|
| 676 |
+
prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
|
| 677 |
+
else:
|
| 678 |
+
prev_sample = denoised
|
| 679 |
+
|
| 680 |
+
if not return_dict:
|
| 681 |
+
return (prev_sample, denoised)
|
| 682 |
+
|
| 683 |
+
return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
|
| 684 |
+
|
| 685 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
|
| 686 |
+
def add_noise(
|
| 687 |
+
self,
|
| 688 |
+
original_samples: torch.Tensor,
|
| 689 |
+
noise: torch.Tensor,
|
| 690 |
+
timesteps: torch.IntTensor,
|
| 691 |
+
) -> torch.Tensor:
|
| 692 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
|
| 693 |
+
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
|
| 694 |
+
timesteps = timesteps.to(original_samples.device)
|
| 695 |
+
|
| 696 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 697 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 698 |
+
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
|
| 699 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 700 |
+
|
| 701 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 702 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 703 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
|
| 704 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 705 |
+
|
| 706 |
+
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
|
| 707 |
+
return noisy_samples
|
| 708 |
+
|
| 709 |
+
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
|
| 710 |
+
def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
|
| 711 |
+
# Make sure alphas_cumprod and timestep have same device and dtype as sample
|
| 712 |
+
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
|
| 713 |
+
timesteps = timesteps.to(sample.device)
|
| 714 |
+
|
| 715 |
+
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
|
| 716 |
+
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
|
| 717 |
+
while len(sqrt_alpha_prod.shape) < len(sample.shape):
|
| 718 |
+
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
|
| 719 |
+
|
| 720 |
+
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
|
| 721 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
|
| 722 |
+
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
|
| 723 |
+
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
|
| 724 |
+
|
| 725 |
+
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
|
| 726 |
+
return velocity
|
| 727 |
+
|
| 728 |
+
def __len__(self):
|
| 729 |
+
return self.config.num_train_timesteps
|
v0.36.0/llm_grounded_diffusion.py
ADDED
|
@@ -0,0 +1,1567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# This is a single file implementation of LMD+. See README.md for examples.
|
| 16 |
+
|
| 17 |
+
import ast
|
| 18 |
+
import gc
|
| 19 |
+
import inspect
|
| 20 |
+
import math
|
| 21 |
+
import warnings
|
| 22 |
+
from collections.abc import Iterable
|
| 23 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
import torch.nn.functional as F
|
| 27 |
+
from packaging import version
|
| 28 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 29 |
+
|
| 30 |
+
from diffusers.configuration_utils import FrozenDict
|
| 31 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 32 |
+
from diffusers.loaders import (
|
| 33 |
+
FromSingleFileMixin,
|
| 34 |
+
IPAdapterMixin,
|
| 35 |
+
StableDiffusionLoraLoaderMixin,
|
| 36 |
+
TextualInversionLoaderMixin,
|
| 37 |
+
)
|
| 38 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 39 |
+
from diffusers.models.attention import Attention, GatedSelfAttentionDense
|
| 40 |
+
from diffusers.models.attention_processor import AttnProcessor2_0
|
| 41 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 42 |
+
from diffusers.pipelines import DiffusionPipeline
|
| 43 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 44 |
+
from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
|
| 45 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 46 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 47 |
+
from diffusers.utils import (
|
| 48 |
+
USE_PEFT_BACKEND,
|
| 49 |
+
deprecate,
|
| 50 |
+
logging,
|
| 51 |
+
replace_example_docstring,
|
| 52 |
+
scale_lora_layers,
|
| 53 |
+
unscale_lora_layers,
|
| 54 |
+
)
|
| 55 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
EXAMPLE_DOC_STRING = """
|
| 59 |
+
Examples:
|
| 60 |
+
```py
|
| 61 |
+
>>> import torch
|
| 62 |
+
>>> from diffusers import DiffusionPipeline
|
| 63 |
+
|
| 64 |
+
>>> pipe = DiffusionPipeline.from_pretrained(
|
| 65 |
+
... "longlian/lmd_plus",
|
| 66 |
+
... custom_pipeline="llm_grounded_diffusion",
|
| 67 |
+
... custom_revision="main",
|
| 68 |
+
... variant="fp16", torch_dtype=torch.float16
|
| 69 |
+
... )
|
| 70 |
+
>>> pipe.enable_model_cpu_offload()
|
| 71 |
+
|
| 72 |
+
>>> # Generate an image described by the prompt and
|
| 73 |
+
>>> # insert objects described by text at the region defined by bounding boxes
|
| 74 |
+
>>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
|
| 75 |
+
>>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
|
| 76 |
+
>>> phrases = ["a waterfall", "a modern high speed train"]
|
| 77 |
+
|
| 78 |
+
>>> images = pipe(
|
| 79 |
+
... prompt=prompt,
|
| 80 |
+
... phrases=phrases,
|
| 81 |
+
... boxes=boxes,
|
| 82 |
+
... gligen_scheduled_sampling_beta=0.4,
|
| 83 |
+
... output_type="pil",
|
| 84 |
+
... num_inference_steps=50,
|
| 85 |
+
... lmd_guidance_kwargs={}
|
| 86 |
+
... ).images
|
| 87 |
+
|
| 88 |
+
>>> images[0].save("./lmd_plus_generation.jpg")
|
| 89 |
+
|
| 90 |
+
>>> # Generate directly from a text prompt and an LLM response
|
| 91 |
+
>>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
|
| 92 |
+
>>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\"""
|
| 93 |
+
[('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])]
|
| 94 |
+
Background prompt: A beautiful forest with fall foliage
|
| 95 |
+
Negative prompt:
|
| 96 |
+
\""")
|
| 97 |
+
|
| 98 |
+
>> images = pipe(
|
| 99 |
+
... prompt=prompt,
|
| 100 |
+
... negative_prompt=neg_prompt,
|
| 101 |
+
... phrases=phrases,
|
| 102 |
+
... boxes=boxes,
|
| 103 |
+
... gligen_scheduled_sampling_beta=0.4,
|
| 104 |
+
... output_type="pil",
|
| 105 |
+
... num_inference_steps=50,
|
| 106 |
+
... lmd_guidance_kwargs={}
|
| 107 |
+
... ).images
|
| 108 |
+
|
| 109 |
+
>>> images[0].save("./lmd_plus_generation.jpg")
|
| 110 |
+
|
| 111 |
+
images[0]
|
| 112 |
+
|
| 113 |
+
```
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 117 |
+
|
| 118 |
+
# All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)]
|
| 119 |
+
# Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`.
|
| 120 |
+
DEFAULT_GUIDANCE_ATTN_KEYS = [
|
| 121 |
+
("mid", 0, 0, 0),
|
| 122 |
+
("up", 1, 0, 0),
|
| 123 |
+
("up", 1, 1, 0),
|
| 124 |
+
("up", 1, 2, 0),
|
| 125 |
+
]
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def convert_attn_keys(key):
|
| 129 |
+
"""Convert the attention key from tuple format to the torch state format"""
|
| 130 |
+
|
| 131 |
+
if key[0] == "mid":
|
| 132 |
+
assert key[1] == 0, f"mid block only has one block but the index is {key[1]}"
|
| 133 |
+
return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
|
| 134 |
+
|
| 135 |
+
return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS]
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def scale_proportion(obj_box, H, W):
|
| 142 |
+
# Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5".
|
| 143 |
+
x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H)
|
| 144 |
+
box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H)
|
| 145 |
+
x_max, y_max = x_min + box_w, y_min + box_h
|
| 146 |
+
|
| 147 |
+
x_min, y_min = max(x_min, 0), max(y_min, 0)
|
| 148 |
+
x_max, y_max = min(x_max, W), min(y_max, H)
|
| 149 |
+
|
| 150 |
+
return x_min, y_min, x_max, y_max
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# Adapted from the parent class `AttnProcessor2_0`
|
| 154 |
+
class AttnProcessorWithHook(AttnProcessor2_0):
|
| 155 |
+
def __init__(
|
| 156 |
+
self,
|
| 157 |
+
attn_processor_key,
|
| 158 |
+
hidden_size,
|
| 159 |
+
cross_attention_dim,
|
| 160 |
+
hook=None,
|
| 161 |
+
fast_attn=True,
|
| 162 |
+
enabled=True,
|
| 163 |
+
):
|
| 164 |
+
super().__init__()
|
| 165 |
+
self.attn_processor_key = attn_processor_key
|
| 166 |
+
self.hidden_size = hidden_size
|
| 167 |
+
self.cross_attention_dim = cross_attention_dim
|
| 168 |
+
self.hook = hook
|
| 169 |
+
self.fast_attn = fast_attn
|
| 170 |
+
self.enabled = enabled
|
| 171 |
+
|
| 172 |
+
def __call__(
|
| 173 |
+
self,
|
| 174 |
+
attn: Attention,
|
| 175 |
+
hidden_states,
|
| 176 |
+
encoder_hidden_states=None,
|
| 177 |
+
attention_mask=None,
|
| 178 |
+
temb=None,
|
| 179 |
+
scale: float = 1.0,
|
| 180 |
+
):
|
| 181 |
+
residual = hidden_states
|
| 182 |
+
|
| 183 |
+
if attn.spatial_norm is not None:
|
| 184 |
+
hidden_states = attn.spatial_norm(hidden_states, temb)
|
| 185 |
+
|
| 186 |
+
input_ndim = hidden_states.ndim
|
| 187 |
+
|
| 188 |
+
if input_ndim == 4:
|
| 189 |
+
batch_size, channel, height, width = hidden_states.shape
|
| 190 |
+
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
|
| 191 |
+
|
| 192 |
+
batch_size, sequence_length, _ = (
|
| 193 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
if attention_mask is not None:
|
| 197 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 198 |
+
|
| 199 |
+
if attn.group_norm is not None:
|
| 200 |
+
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
|
| 201 |
+
|
| 202 |
+
args = () if USE_PEFT_BACKEND else (scale,)
|
| 203 |
+
query = attn.to_q(hidden_states, *args)
|
| 204 |
+
|
| 205 |
+
if encoder_hidden_states is None:
|
| 206 |
+
encoder_hidden_states = hidden_states
|
| 207 |
+
elif attn.norm_cross:
|
| 208 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 209 |
+
|
| 210 |
+
key = attn.to_k(encoder_hidden_states, *args)
|
| 211 |
+
value = attn.to_v(encoder_hidden_states, *args)
|
| 212 |
+
|
| 213 |
+
inner_dim = key.shape[-1]
|
| 214 |
+
head_dim = inner_dim // attn.heads
|
| 215 |
+
|
| 216 |
+
if (self.hook is not None and self.enabled) or not self.fast_attn:
|
| 217 |
+
query_batch_dim = attn.head_to_batch_dim(query)
|
| 218 |
+
key_batch_dim = attn.head_to_batch_dim(key)
|
| 219 |
+
value_batch_dim = attn.head_to_batch_dim(value)
|
| 220 |
+
attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask)
|
| 221 |
+
|
| 222 |
+
if self.hook is not None and self.enabled:
|
| 223 |
+
# Call the hook with query, key, value, and attention maps
|
| 224 |
+
self.hook(
|
| 225 |
+
self.attn_processor_key,
|
| 226 |
+
query_batch_dim,
|
| 227 |
+
key_batch_dim,
|
| 228 |
+
value_batch_dim,
|
| 229 |
+
attention_probs,
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
if self.fast_attn:
|
| 233 |
+
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 234 |
+
|
| 235 |
+
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 236 |
+
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
|
| 237 |
+
|
| 238 |
+
if attention_mask is not None:
|
| 239 |
+
# scaled_dot_product_attention expects attention_mask shape to be
|
| 240 |
+
# (batch, heads, source_length, target_length)
|
| 241 |
+
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
|
| 242 |
+
|
| 243 |
+
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
| 244 |
+
# TODO: add support for attn.scale when we move to Torch 2.1
|
| 245 |
+
hidden_states = F.scaled_dot_product_attention(
|
| 246 |
+
query,
|
| 247 |
+
key,
|
| 248 |
+
value,
|
| 249 |
+
attn_mask=attention_mask,
|
| 250 |
+
dropout_p=0.0,
|
| 251 |
+
is_causal=False,
|
| 252 |
+
)
|
| 253 |
+
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
|
| 254 |
+
hidden_states = hidden_states.to(query.dtype)
|
| 255 |
+
else:
|
| 256 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 257 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 258 |
+
|
| 259 |
+
# linear proj
|
| 260 |
+
hidden_states = attn.to_out[0](hidden_states, *args)
|
| 261 |
+
# dropout
|
| 262 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 263 |
+
|
| 264 |
+
if input_ndim == 4:
|
| 265 |
+
hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
|
| 266 |
+
|
| 267 |
+
if attn.residual_connection:
|
| 268 |
+
hidden_states = hidden_states + residual
|
| 269 |
+
|
| 270 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 271 |
+
|
| 272 |
+
return hidden_states
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class LLMGroundedDiffusionPipeline(
|
| 276 |
+
DiffusionPipeline,
|
| 277 |
+
StableDiffusionMixin,
|
| 278 |
+
TextualInversionLoaderMixin,
|
| 279 |
+
StableDiffusionLoraLoaderMixin,
|
| 280 |
+
IPAdapterMixin,
|
| 281 |
+
FromSingleFileMixin,
|
| 282 |
+
):
|
| 283 |
+
r"""
|
| 284 |
+
Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://huggingface.co/papers/2305.13655.
|
| 285 |
+
|
| 286 |
+
This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods
|
| 287 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 288 |
+
|
| 289 |
+
This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
vae ([`AutoencoderKL`]):
|
| 293 |
+
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
|
| 294 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 295 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 296 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 297 |
+
A `CLIPTokenizer` to tokenize text.
|
| 298 |
+
unet ([`UNet2DConditionModel`]):
|
| 299 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 300 |
+
scheduler ([`SchedulerMixin`]):
|
| 301 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 302 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 303 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 304 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 305 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 306 |
+
about a model's potential harms.
|
| 307 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 308 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 309 |
+
requires_safety_checker (bool):
|
| 310 |
+
Whether a safety checker is needed for this pipeline.
|
| 311 |
+
"""
|
| 312 |
+
|
| 313 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 314 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 315 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 316 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 317 |
+
|
| 318 |
+
objects_text = "Objects: "
|
| 319 |
+
bg_prompt_text = "Background prompt: "
|
| 320 |
+
bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip()
|
| 321 |
+
neg_prompt_text = "Negative prompt: "
|
| 322 |
+
neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip()
|
| 323 |
+
|
| 324 |
+
def __init__(
|
| 325 |
+
self,
|
| 326 |
+
vae: AutoencoderKL,
|
| 327 |
+
text_encoder: CLIPTextModel,
|
| 328 |
+
tokenizer: CLIPTokenizer,
|
| 329 |
+
unet: UNet2DConditionModel,
|
| 330 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 331 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 332 |
+
feature_extractor: CLIPImageProcessor,
|
| 333 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 334 |
+
requires_safety_checker: bool = True,
|
| 335 |
+
):
|
| 336 |
+
# This is copied from StableDiffusionPipeline, with hook initizations for LMD+.
|
| 337 |
+
super().__init__()
|
| 338 |
+
|
| 339 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 340 |
+
deprecation_message = (
|
| 341 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 342 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 343 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 344 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 345 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 346 |
+
" file"
|
| 347 |
+
)
|
| 348 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 349 |
+
new_config = dict(scheduler.config)
|
| 350 |
+
new_config["steps_offset"] = 1
|
| 351 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 352 |
+
|
| 353 |
+
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
|
| 354 |
+
deprecation_message = (
|
| 355 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 356 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 357 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 358 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 359 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 360 |
+
)
|
| 361 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 362 |
+
new_config = dict(scheduler.config)
|
| 363 |
+
new_config["clip_sample"] = False
|
| 364 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 365 |
+
|
| 366 |
+
if safety_checker is None and requires_safety_checker:
|
| 367 |
+
logger.warning(
|
| 368 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 369 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 370 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 371 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 372 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 373 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
if safety_checker is not None and feature_extractor is None:
|
| 377 |
+
raise ValueError(
|
| 378 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 379 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
is_unet_version_less_0_9_0 = (
|
| 383 |
+
unet is not None
|
| 384 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 385 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 386 |
+
)
|
| 387 |
+
is_unet_sample_size_less_64 = (
|
| 388 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 389 |
+
)
|
| 390 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 391 |
+
deprecation_message = (
|
| 392 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 393 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 394 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 395 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 396 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 397 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 398 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 399 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 400 |
+
" the `unet/config.json` file"
|
| 401 |
+
)
|
| 402 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 403 |
+
new_config = dict(unet.config)
|
| 404 |
+
new_config["sample_size"] = 64
|
| 405 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 406 |
+
|
| 407 |
+
self.register_modules(
|
| 408 |
+
vae=vae,
|
| 409 |
+
text_encoder=text_encoder,
|
| 410 |
+
tokenizer=tokenizer,
|
| 411 |
+
unet=unet,
|
| 412 |
+
scheduler=scheduler,
|
| 413 |
+
safety_checker=safety_checker,
|
| 414 |
+
feature_extractor=feature_extractor,
|
| 415 |
+
image_encoder=image_encoder,
|
| 416 |
+
)
|
| 417 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 418 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 419 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 420 |
+
|
| 421 |
+
# Initialize the attention hooks for LLM-grounded Diffusion
|
| 422 |
+
self.register_attn_hooks(unet)
|
| 423 |
+
self._saved_attn = None
|
| 424 |
+
|
| 425 |
+
def attn_hook(self, name, query, key, value, attention_probs):
|
| 426 |
+
if name in DEFAULT_GUIDANCE_ATTN_KEYS:
|
| 427 |
+
self._saved_attn[name] = attention_probs
|
| 428 |
+
|
| 429 |
+
@classmethod
|
| 430 |
+
def convert_box(cls, box, height, width):
|
| 431 |
+
# box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max
|
| 432 |
+
x_min, y_min = box[0] / width, box[1] / height
|
| 433 |
+
w_box, h_box = box[2] / width, box[3] / height
|
| 434 |
+
|
| 435 |
+
x_max, y_max = x_min + w_box, y_min + h_box
|
| 436 |
+
|
| 437 |
+
return x_min, y_min, x_max, y_max
|
| 438 |
+
|
| 439 |
+
@classmethod
|
| 440 |
+
def _parse_response_with_negative(cls, text):
|
| 441 |
+
if not text:
|
| 442 |
+
raise ValueError("LLM response is empty")
|
| 443 |
+
|
| 444 |
+
if cls.objects_text in text:
|
| 445 |
+
text = text.split(cls.objects_text)[1]
|
| 446 |
+
|
| 447 |
+
text_split = text.split(cls.bg_prompt_text_no_trailing_space)
|
| 448 |
+
if len(text_split) == 2:
|
| 449 |
+
gen_boxes, text_rem = text_split
|
| 450 |
+
else:
|
| 451 |
+
raise ValueError(f"LLM response is incomplete: {text}")
|
| 452 |
+
|
| 453 |
+
text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space)
|
| 454 |
+
|
| 455 |
+
if len(text_split) == 2:
|
| 456 |
+
bg_prompt, neg_prompt = text_split
|
| 457 |
+
else:
|
| 458 |
+
raise ValueError(f"LLM response is incomplete: {text}")
|
| 459 |
+
|
| 460 |
+
try:
|
| 461 |
+
gen_boxes = ast.literal_eval(gen_boxes)
|
| 462 |
+
except SyntaxError as e:
|
| 463 |
+
# Sometimes the response is in plain text
|
| 464 |
+
if "No objects" in gen_boxes or gen_boxes.strip() == "":
|
| 465 |
+
gen_boxes = []
|
| 466 |
+
else:
|
| 467 |
+
raise e
|
| 468 |
+
bg_prompt = bg_prompt.strip()
|
| 469 |
+
neg_prompt = neg_prompt.strip()
|
| 470 |
+
|
| 471 |
+
# LLM may return "None" to mean no negative prompt provided.
|
| 472 |
+
if neg_prompt == "None":
|
| 473 |
+
neg_prompt = ""
|
| 474 |
+
|
| 475 |
+
return gen_boxes, bg_prompt, neg_prompt
|
| 476 |
+
|
| 477 |
+
@classmethod
|
| 478 |
+
def parse_llm_response(cls, response, canvas_height=512, canvas_width=512):
|
| 479 |
+
# Infer from spec
|
| 480 |
+
gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response)
|
| 481 |
+
|
| 482 |
+
gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0])
|
| 483 |
+
|
| 484 |
+
phrases = [name for name, _ in gen_boxes]
|
| 485 |
+
boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes]
|
| 486 |
+
|
| 487 |
+
return phrases, boxes, bg_prompt, neg_prompt
|
| 488 |
+
|
| 489 |
+
def check_inputs(
|
| 490 |
+
self,
|
| 491 |
+
prompt,
|
| 492 |
+
height,
|
| 493 |
+
width,
|
| 494 |
+
callback_steps,
|
| 495 |
+
phrases,
|
| 496 |
+
boxes,
|
| 497 |
+
negative_prompt=None,
|
| 498 |
+
prompt_embeds=None,
|
| 499 |
+
negative_prompt_embeds=None,
|
| 500 |
+
phrase_indices=None,
|
| 501 |
+
):
|
| 502 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 503 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 504 |
+
|
| 505 |
+
if (callback_steps is None) or (
|
| 506 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 507 |
+
):
|
| 508 |
+
raise ValueError(
|
| 509 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 510 |
+
f" {type(callback_steps)}."
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
if prompt is not None and prompt_embeds is not None:
|
| 514 |
+
raise ValueError(
|
| 515 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 516 |
+
" only forward one of the two."
|
| 517 |
+
)
|
| 518 |
+
elif prompt is None and prompt_embeds is None:
|
| 519 |
+
raise ValueError(
|
| 520 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 521 |
+
)
|
| 522 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 523 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 524 |
+
elif prompt is None and phrase_indices is None:
|
| 525 |
+
raise ValueError("If the prompt is None, the phrase_indices cannot be None")
|
| 526 |
+
|
| 527 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 528 |
+
raise ValueError(
|
| 529 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 530 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 531 |
+
)
|
| 532 |
+
|
| 533 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 534 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 535 |
+
raise ValueError(
|
| 536 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 537 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 538 |
+
f" {negative_prompt_embeds.shape}."
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
if len(phrases) != len(boxes):
|
| 542 |
+
raise ValueError(
|
| 543 |
+
"length of `phrases` and `boxes` has to be same, but"
|
| 544 |
+
f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}"
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
def register_attn_hooks(self, unet):
|
| 548 |
+
"""Registering hooks to obtain the attention maps for guidance"""
|
| 549 |
+
|
| 550 |
+
attn_procs = {}
|
| 551 |
+
|
| 552 |
+
for name in unet.attn_processors.keys():
|
| 553 |
+
# Only obtain the queries and keys from cross-attention
|
| 554 |
+
if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"):
|
| 555 |
+
# Keep the same attn_processors for self-attention (no hooks for self-attention)
|
| 556 |
+
attn_procs[name] = unet.attn_processors[name]
|
| 557 |
+
continue
|
| 558 |
+
|
| 559 |
+
cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
|
| 560 |
+
|
| 561 |
+
if name.startswith("mid_block"):
|
| 562 |
+
hidden_size = unet.config.block_out_channels[-1]
|
| 563 |
+
elif name.startswith("up_blocks"):
|
| 564 |
+
block_id = int(name[len("up_blocks.")])
|
| 565 |
+
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
|
| 566 |
+
elif name.startswith("down_blocks"):
|
| 567 |
+
block_id = int(name[len("down_blocks.")])
|
| 568 |
+
hidden_size = unet.config.block_out_channels[block_id]
|
| 569 |
+
|
| 570 |
+
attn_procs[name] = AttnProcessorWithHook(
|
| 571 |
+
attn_processor_key=name,
|
| 572 |
+
hidden_size=hidden_size,
|
| 573 |
+
cross_attention_dim=cross_attention_dim,
|
| 574 |
+
hook=self.attn_hook,
|
| 575 |
+
fast_attn=True,
|
| 576 |
+
# Not enabled by default
|
| 577 |
+
enabled=False,
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
unet.set_attn_processor(attn_procs)
|
| 581 |
+
|
| 582 |
+
def enable_fuser(self, enabled=True):
|
| 583 |
+
for module in self.unet.modules():
|
| 584 |
+
if isinstance(module, GatedSelfAttentionDense):
|
| 585 |
+
module.enabled = enabled
|
| 586 |
+
|
| 587 |
+
def enable_attn_hook(self, enabled=True):
|
| 588 |
+
for module in self.unet.attn_processors.values():
|
| 589 |
+
if isinstance(module, AttnProcessorWithHook):
|
| 590 |
+
module.enabled = enabled
|
| 591 |
+
|
| 592 |
+
def get_token_map(self, prompt, padding="do_not_pad", verbose=False):
|
| 593 |
+
"""Get a list of mapping: prompt index to str (prompt in a list of token str)"""
|
| 594 |
+
fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np")
|
| 595 |
+
input_ids = fg_prompt_tokens["input_ids"][0]
|
| 596 |
+
|
| 597 |
+
token_map = []
|
| 598 |
+
for ind, item in enumerate(input_ids.tolist()):
|
| 599 |
+
token = self.tokenizer._convert_id_to_token(item)
|
| 600 |
+
|
| 601 |
+
if verbose:
|
| 602 |
+
logger.info(f"{ind}, {token} ({item})")
|
| 603 |
+
|
| 604 |
+
token_map.append(token)
|
| 605 |
+
|
| 606 |
+
return token_map
|
| 607 |
+
|
| 608 |
+
def get_phrase_indices(
|
| 609 |
+
self,
|
| 610 |
+
prompt,
|
| 611 |
+
phrases,
|
| 612 |
+
token_map=None,
|
| 613 |
+
add_suffix_if_not_found=False,
|
| 614 |
+
verbose=False,
|
| 615 |
+
):
|
| 616 |
+
for obj in phrases:
|
| 617 |
+
# Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix
|
| 618 |
+
if obj not in prompt:
|
| 619 |
+
prompt += "| " + obj
|
| 620 |
+
|
| 621 |
+
if token_map is None:
|
| 622 |
+
# We allow using a pre-computed token map.
|
| 623 |
+
token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose)
|
| 624 |
+
token_map_str = " ".join(token_map)
|
| 625 |
+
|
| 626 |
+
phrase_indices = []
|
| 627 |
+
|
| 628 |
+
for obj in phrases:
|
| 629 |
+
phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose)
|
| 630 |
+
# Remove <bos> and <eos> in substr
|
| 631 |
+
phrase_token_map = phrase_token_map[1:-1]
|
| 632 |
+
phrase_token_map_len = len(phrase_token_map)
|
| 633 |
+
phrase_token_map_str = " ".join(phrase_token_map)
|
| 634 |
+
|
| 635 |
+
if verbose:
|
| 636 |
+
logger.info(
|
| 637 |
+
"Full str:",
|
| 638 |
+
token_map_str,
|
| 639 |
+
"Substr:",
|
| 640 |
+
phrase_token_map_str,
|
| 641 |
+
"Phrase:",
|
| 642 |
+
phrases,
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
# Count the number of token before substr
|
| 646 |
+
# The substring comes with a trailing space that needs to be removed by minus one in the index.
|
| 647 |
+
obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" "))
|
| 648 |
+
|
| 649 |
+
obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len))
|
| 650 |
+
phrase_indices.append(obj_position)
|
| 651 |
+
|
| 652 |
+
if add_suffix_if_not_found:
|
| 653 |
+
return phrase_indices, prompt
|
| 654 |
+
|
| 655 |
+
return phrase_indices
|
| 656 |
+
|
| 657 |
+
def add_ca_loss_per_attn_map_to_loss(
|
| 658 |
+
self,
|
| 659 |
+
loss,
|
| 660 |
+
attn_map,
|
| 661 |
+
object_number,
|
| 662 |
+
bboxes,
|
| 663 |
+
phrase_indices,
|
| 664 |
+
fg_top_p=0.2,
|
| 665 |
+
bg_top_p=0.2,
|
| 666 |
+
fg_weight=1.0,
|
| 667 |
+
bg_weight=1.0,
|
| 668 |
+
):
|
| 669 |
+
# b is the number of heads, not batch
|
| 670 |
+
b, i, j = attn_map.shape
|
| 671 |
+
H = W = int(math.sqrt(i))
|
| 672 |
+
for obj_idx in range(object_number):
|
| 673 |
+
obj_loss = 0
|
| 674 |
+
mask = torch.zeros(size=(H, W), device="cuda")
|
| 675 |
+
obj_boxes = bboxes[obj_idx]
|
| 676 |
+
|
| 677 |
+
# We support two level (one box per phrase) and three level (multiple boxes per phrase)
|
| 678 |
+
if not isinstance(obj_boxes[0], Iterable):
|
| 679 |
+
obj_boxes = [obj_boxes]
|
| 680 |
+
|
| 681 |
+
for obj_box in obj_boxes:
|
| 682 |
+
# x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H)
|
| 683 |
+
x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W)
|
| 684 |
+
mask[y_min:y_max, x_min:x_max] = 1
|
| 685 |
+
|
| 686 |
+
for obj_position in phrase_indices[obj_idx]:
|
| 687 |
+
# Could potentially optimize to compute this for loop in batch.
|
| 688 |
+
# Could crop the ref cross attention before saving to save memory.
|
| 689 |
+
|
| 690 |
+
ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W)
|
| 691 |
+
|
| 692 |
+
# shape: (b, H * W)
|
| 693 |
+
ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W)
|
| 694 |
+
k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1)
|
| 695 |
+
k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1)
|
| 696 |
+
|
| 697 |
+
mask_1d = mask.view(1, -1)
|
| 698 |
+
|
| 699 |
+
# Max-based loss function
|
| 700 |
+
|
| 701 |
+
# Take the topk over spatial dimension, and then take the sum over heads dim
|
| 702 |
+
# The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own.
|
| 703 |
+
obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight
|
| 704 |
+
obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight
|
| 705 |
+
|
| 706 |
+
loss += obj_loss / len(phrase_indices[obj_idx])
|
| 707 |
+
|
| 708 |
+
return loss
|
| 709 |
+
|
| 710 |
+
def compute_ca_loss(
|
| 711 |
+
self,
|
| 712 |
+
saved_attn,
|
| 713 |
+
bboxes,
|
| 714 |
+
phrase_indices,
|
| 715 |
+
guidance_attn_keys,
|
| 716 |
+
verbose=False,
|
| 717 |
+
**kwargs,
|
| 718 |
+
):
|
| 719 |
+
"""
|
| 720 |
+
The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss.
|
| 721 |
+
`AttnProcessor` will put attention maps into the `save_attn_to_dict`.
|
| 722 |
+
|
| 723 |
+
`index` is the timestep.
|
| 724 |
+
`ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token).
|
| 725 |
+
`ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens.
|
| 726 |
+
"""
|
| 727 |
+
loss = torch.tensor(0).float().cuda()
|
| 728 |
+
object_number = len(bboxes)
|
| 729 |
+
if object_number == 0:
|
| 730 |
+
return loss
|
| 731 |
+
|
| 732 |
+
for attn_key in guidance_attn_keys:
|
| 733 |
+
# We only have 1 cross attention for mid.
|
| 734 |
+
|
| 735 |
+
attn_map_integrated = saved_attn[attn_key]
|
| 736 |
+
if not attn_map_integrated.is_cuda:
|
| 737 |
+
attn_map_integrated = attn_map_integrated.cuda()
|
| 738 |
+
# Example dimension: [20, 64, 77]
|
| 739 |
+
attn_map = attn_map_integrated.squeeze(dim=0)
|
| 740 |
+
|
| 741 |
+
loss = self.add_ca_loss_per_attn_map_to_loss(
|
| 742 |
+
loss, attn_map, object_number, bboxes, phrase_indices, **kwargs
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
num_attn = len(guidance_attn_keys)
|
| 746 |
+
|
| 747 |
+
if num_attn > 0:
|
| 748 |
+
loss = loss / (object_number * num_attn)
|
| 749 |
+
|
| 750 |
+
return loss
|
| 751 |
+
|
| 752 |
+
@torch.no_grad()
|
| 753 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 754 |
+
def __call__(
|
| 755 |
+
self,
|
| 756 |
+
prompt: Union[str, List[str]] = None,
|
| 757 |
+
height: Optional[int] = None,
|
| 758 |
+
width: Optional[int] = None,
|
| 759 |
+
num_inference_steps: int = 50,
|
| 760 |
+
guidance_scale: float = 7.5,
|
| 761 |
+
gligen_scheduled_sampling_beta: float = 0.3,
|
| 762 |
+
phrases: List[str] = None,
|
| 763 |
+
boxes: List[List[float]] = None,
|
| 764 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 765 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 766 |
+
eta: float = 0.0,
|
| 767 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 768 |
+
latents: Optional[torch.Tensor] = None,
|
| 769 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 770 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 771 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 772 |
+
output_type: Optional[str] = "pil",
|
| 773 |
+
return_dict: bool = True,
|
| 774 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 775 |
+
callback_steps: int = 1,
|
| 776 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 777 |
+
clip_skip: Optional[int] = None,
|
| 778 |
+
lmd_guidance_kwargs: Optional[Dict[str, Any]] = {},
|
| 779 |
+
phrase_indices: Optional[List[int]] = None,
|
| 780 |
+
):
|
| 781 |
+
r"""
|
| 782 |
+
The call function to the pipeline for generation.
|
| 783 |
+
|
| 784 |
+
Args:
|
| 785 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 786 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 787 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 788 |
+
The height in pixels of the generated image.
|
| 789 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 790 |
+
The width in pixels of the generated image.
|
| 791 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 792 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 793 |
+
expense of slower inference.
|
| 794 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 795 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 796 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 797 |
+
phrases (`List[str]`):
|
| 798 |
+
The phrases to guide what to include in each of the regions defined by the corresponding
|
| 799 |
+
`boxes`. There should only be one phrase per bounding box.
|
| 800 |
+
boxes (`List[List[float]]`):
|
| 801 |
+
The bounding boxes that identify rectangular regions of the image that are going to be filled with the
|
| 802 |
+
content described by the corresponding `phrases`. Each rectangular box is defined as a
|
| 803 |
+
`List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
|
| 804 |
+
gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
|
| 805 |
+
Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
|
| 806 |
+
Generation](https://huggingface.co/papers/2301.07093). Scheduled Sampling factor is only varied for
|
| 807 |
+
scheduled sampling during inference for improved quality and controllability.
|
| 808 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 809 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 810 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 811 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 812 |
+
The number of images to generate per prompt.
|
| 813 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 814 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 815 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 816 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 817 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 818 |
+
generation deterministic.
|
| 819 |
+
latents (`torch.Tensor`, *optional*):
|
| 820 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
|
| 821 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 822 |
+
tensor is generated by sampling using the supplied random `generator`.
|
| 823 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 824 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 825 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 826 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 827 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 828 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 829 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 830 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 831 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 832 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 833 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 834 |
+
plain tuple.
|
| 835 |
+
callback (`Callable`, *optional*):
|
| 836 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 837 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 838 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 839 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 840 |
+
every step.
|
| 841 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 842 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 843 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 844 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 845 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 846 |
+
Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
|
| 847 |
+
using zero terminal SNR.
|
| 848 |
+
clip_skip (`int`, *optional*):
|
| 849 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 850 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 851 |
+
lmd_guidance_kwargs (`dict`, *optional*):
|
| 852 |
+
A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details.
|
| 853 |
+
phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default.
|
| 854 |
+
Examples:
|
| 855 |
+
|
| 856 |
+
Returns:
|
| 857 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 858 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 859 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 860 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 861 |
+
"not-safe-for-work" (nsfw) content.
|
| 862 |
+
"""
|
| 863 |
+
# 0. Default height and width to unet
|
| 864 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 865 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 866 |
+
|
| 867 |
+
# 1. Check inputs. Raise error if not correct
|
| 868 |
+
self.check_inputs(
|
| 869 |
+
prompt,
|
| 870 |
+
height,
|
| 871 |
+
width,
|
| 872 |
+
callback_steps,
|
| 873 |
+
phrases,
|
| 874 |
+
boxes,
|
| 875 |
+
negative_prompt,
|
| 876 |
+
prompt_embeds,
|
| 877 |
+
negative_prompt_embeds,
|
| 878 |
+
phrase_indices,
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
# 2. Define call parameters
|
| 882 |
+
if prompt is not None and isinstance(prompt, str):
|
| 883 |
+
batch_size = 1
|
| 884 |
+
if phrase_indices is None:
|
| 885 |
+
phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True)
|
| 886 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 887 |
+
batch_size = len(prompt)
|
| 888 |
+
if phrase_indices is None:
|
| 889 |
+
phrase_indices = []
|
| 890 |
+
prompt_parsed = []
|
| 891 |
+
for prompt_item in prompt:
|
| 892 |
+
(
|
| 893 |
+
phrase_indices_parsed_item,
|
| 894 |
+
prompt_parsed_item,
|
| 895 |
+
) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True)
|
| 896 |
+
phrase_indices.append(phrase_indices_parsed_item)
|
| 897 |
+
prompt_parsed.append(prompt_parsed_item)
|
| 898 |
+
prompt = prompt_parsed
|
| 899 |
+
else:
|
| 900 |
+
batch_size = prompt_embeds.shape[0]
|
| 901 |
+
|
| 902 |
+
device = self._execution_device
|
| 903 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 904 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 905 |
+
# corresponds to doing no classifier free guidance.
|
| 906 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 907 |
+
|
| 908 |
+
# 3. Encode input prompt
|
| 909 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 910 |
+
prompt,
|
| 911 |
+
device,
|
| 912 |
+
num_images_per_prompt,
|
| 913 |
+
do_classifier_free_guidance,
|
| 914 |
+
negative_prompt,
|
| 915 |
+
prompt_embeds=prompt_embeds,
|
| 916 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 917 |
+
clip_skip=clip_skip,
|
| 918 |
+
)
|
| 919 |
+
|
| 920 |
+
cond_prompt_embeds = prompt_embeds
|
| 921 |
+
|
| 922 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 923 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 924 |
+
# to avoid doing two forward passes
|
| 925 |
+
if do_classifier_free_guidance:
|
| 926 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 927 |
+
|
| 928 |
+
if ip_adapter_image is not None:
|
| 929 |
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
| 930 |
+
if self.do_classifier_free_guidance:
|
| 931 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 932 |
+
|
| 933 |
+
# 4. Prepare timesteps
|
| 934 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 935 |
+
timesteps = self.scheduler.timesteps
|
| 936 |
+
|
| 937 |
+
# 5. Prepare latent variables
|
| 938 |
+
num_channels_latents = self.unet.config.in_channels
|
| 939 |
+
latents = self.prepare_latents(
|
| 940 |
+
batch_size * num_images_per_prompt,
|
| 941 |
+
num_channels_latents,
|
| 942 |
+
height,
|
| 943 |
+
width,
|
| 944 |
+
prompt_embeds.dtype,
|
| 945 |
+
device,
|
| 946 |
+
generator,
|
| 947 |
+
latents,
|
| 948 |
+
)
|
| 949 |
+
|
| 950 |
+
# 5.1 Prepare GLIGEN variables
|
| 951 |
+
max_objs = 30
|
| 952 |
+
if len(boxes) > max_objs:
|
| 953 |
+
warnings.warn(
|
| 954 |
+
f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
|
| 955 |
+
FutureWarning,
|
| 956 |
+
)
|
| 957 |
+
phrases = phrases[:max_objs]
|
| 958 |
+
boxes = boxes[:max_objs]
|
| 959 |
+
|
| 960 |
+
n_objs = len(boxes)
|
| 961 |
+
if n_objs:
|
| 962 |
+
# prepare batched input to the PositionNet (boxes, phrases, mask)
|
| 963 |
+
# Get tokens for phrases from pre-trained CLIPTokenizer
|
| 964 |
+
tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device)
|
| 965 |
+
# For the token, we use the same pre-trained text encoder
|
| 966 |
+
# to obtain its text feature
|
| 967 |
+
_text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
|
| 968 |
+
|
| 969 |
+
# For each entity, described in phrases, is denoted with a bounding box,
|
| 970 |
+
# we represent the location information as (xmin,ymin,xmax,ymax)
|
| 971 |
+
cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
|
| 972 |
+
if n_objs:
|
| 973 |
+
cond_boxes[:n_objs] = torch.tensor(boxes)
|
| 974 |
+
text_embeddings = torch.zeros(
|
| 975 |
+
max_objs,
|
| 976 |
+
self.unet.config.cross_attention_dim,
|
| 977 |
+
device=device,
|
| 978 |
+
dtype=self.text_encoder.dtype,
|
| 979 |
+
)
|
| 980 |
+
if n_objs:
|
| 981 |
+
text_embeddings[:n_objs] = _text_embeddings
|
| 982 |
+
# Generate a mask for each object that is entity described by phrases
|
| 983 |
+
masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
|
| 984 |
+
masks[:n_objs] = 1
|
| 985 |
+
|
| 986 |
+
repeat_batch = batch_size * num_images_per_prompt
|
| 987 |
+
cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
| 988 |
+
text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
|
| 989 |
+
masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
|
| 990 |
+
if do_classifier_free_guidance:
|
| 991 |
+
repeat_batch = repeat_batch * 2
|
| 992 |
+
cond_boxes = torch.cat([cond_boxes] * 2)
|
| 993 |
+
text_embeddings = torch.cat([text_embeddings] * 2)
|
| 994 |
+
masks = torch.cat([masks] * 2)
|
| 995 |
+
masks[: repeat_batch // 2] = 0
|
| 996 |
+
if cross_attention_kwargs is None:
|
| 997 |
+
cross_attention_kwargs = {}
|
| 998 |
+
cross_attention_kwargs["gligen"] = {
|
| 999 |
+
"boxes": cond_boxes,
|
| 1000 |
+
"positive_embeddings": text_embeddings,
|
| 1001 |
+
"masks": masks,
|
| 1002 |
+
}
|
| 1003 |
+
|
| 1004 |
+
num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
|
| 1005 |
+
self.enable_fuser(True)
|
| 1006 |
+
|
| 1007 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1008 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1009 |
+
|
| 1010 |
+
# 6.1 Add image embeds for IP-Adapter
|
| 1011 |
+
added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
|
| 1012 |
+
|
| 1013 |
+
loss_attn = torch.tensor(10000.0)
|
| 1014 |
+
|
| 1015 |
+
# 7. Denoising loop
|
| 1016 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1017 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1018 |
+
for i, t in enumerate(timesteps):
|
| 1019 |
+
# Scheduled sampling
|
| 1020 |
+
if i == num_grounding_steps:
|
| 1021 |
+
self.enable_fuser(False)
|
| 1022 |
+
|
| 1023 |
+
if latents.shape[1] != 4:
|
| 1024 |
+
latents = torch.randn_like(latents[:, :4])
|
| 1025 |
+
|
| 1026 |
+
# 7.1 Perform LMD guidance
|
| 1027 |
+
if boxes:
|
| 1028 |
+
latents, loss_attn = self.latent_lmd_guidance(
|
| 1029 |
+
cond_prompt_embeds,
|
| 1030 |
+
index=i,
|
| 1031 |
+
boxes=boxes,
|
| 1032 |
+
phrase_indices=phrase_indices,
|
| 1033 |
+
t=t,
|
| 1034 |
+
latents=latents,
|
| 1035 |
+
loss=loss_attn,
|
| 1036 |
+
**lmd_guidance_kwargs,
|
| 1037 |
+
)
|
| 1038 |
+
|
| 1039 |
+
# expand the latents if we are doing classifier free guidance
|
| 1040 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1041 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1042 |
+
|
| 1043 |
+
# predict the noise residual
|
| 1044 |
+
noise_pred = self.unet(
|
| 1045 |
+
latent_model_input,
|
| 1046 |
+
t,
|
| 1047 |
+
encoder_hidden_states=prompt_embeds,
|
| 1048 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1049 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1050 |
+
).sample
|
| 1051 |
+
|
| 1052 |
+
# perform guidance
|
| 1053 |
+
if do_classifier_free_guidance:
|
| 1054 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1055 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1056 |
+
|
| 1057 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1058 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 1059 |
+
|
| 1060 |
+
# call the callback, if provided
|
| 1061 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1062 |
+
progress_bar.update()
|
| 1063 |
+
if callback is not None and i % callback_steps == 0:
|
| 1064 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1065 |
+
callback(step_idx, t, latents)
|
| 1066 |
+
|
| 1067 |
+
if not output_type == "latent":
|
| 1068 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1069 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1070 |
+
else:
|
| 1071 |
+
image = latents
|
| 1072 |
+
has_nsfw_concept = None
|
| 1073 |
+
|
| 1074 |
+
if has_nsfw_concept is None:
|
| 1075 |
+
do_denormalize = [True] * image.shape[0]
|
| 1076 |
+
else:
|
| 1077 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1078 |
+
|
| 1079 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1080 |
+
|
| 1081 |
+
# Offload last model to CPU
|
| 1082 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1083 |
+
self.final_offload_hook.offload()
|
| 1084 |
+
|
| 1085 |
+
if not return_dict:
|
| 1086 |
+
return (image, has_nsfw_concept)
|
| 1087 |
+
|
| 1088 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1089 |
+
|
| 1090 |
+
@torch.set_grad_enabled(True)
|
| 1091 |
+
def latent_lmd_guidance(
|
| 1092 |
+
self,
|
| 1093 |
+
cond_embeddings,
|
| 1094 |
+
index,
|
| 1095 |
+
boxes,
|
| 1096 |
+
phrase_indices,
|
| 1097 |
+
t,
|
| 1098 |
+
latents,
|
| 1099 |
+
loss,
|
| 1100 |
+
*,
|
| 1101 |
+
loss_scale=20,
|
| 1102 |
+
loss_threshold=5.0,
|
| 1103 |
+
max_iter=[3] * 5 + [2] * 5 + [1] * 5,
|
| 1104 |
+
guidance_timesteps=15,
|
| 1105 |
+
cross_attention_kwargs=None,
|
| 1106 |
+
guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS,
|
| 1107 |
+
verbose=False,
|
| 1108 |
+
clear_cache=False,
|
| 1109 |
+
unet_additional_kwargs={},
|
| 1110 |
+
guidance_callback=None,
|
| 1111 |
+
**kwargs,
|
| 1112 |
+
):
|
| 1113 |
+
scheduler, unet = self.scheduler, self.unet
|
| 1114 |
+
|
| 1115 |
+
iteration = 0
|
| 1116 |
+
|
| 1117 |
+
if index < guidance_timesteps:
|
| 1118 |
+
if isinstance(max_iter, list):
|
| 1119 |
+
max_iter = max_iter[index]
|
| 1120 |
+
|
| 1121 |
+
if verbose:
|
| 1122 |
+
logger.info(
|
| 1123 |
+
f"time index {index}, loss: {loss.item() / loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}"
|
| 1124 |
+
)
|
| 1125 |
+
|
| 1126 |
+
try:
|
| 1127 |
+
self.enable_attn_hook(enabled=True)
|
| 1128 |
+
|
| 1129 |
+
while (
|
| 1130 |
+
loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps
|
| 1131 |
+
):
|
| 1132 |
+
self._saved_attn = {}
|
| 1133 |
+
|
| 1134 |
+
latents.requires_grad_(True)
|
| 1135 |
+
latent_model_input = latents
|
| 1136 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
| 1137 |
+
|
| 1138 |
+
unet(
|
| 1139 |
+
latent_model_input,
|
| 1140 |
+
t,
|
| 1141 |
+
encoder_hidden_states=cond_embeddings,
|
| 1142 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1143 |
+
**unet_additional_kwargs,
|
| 1144 |
+
)
|
| 1145 |
+
|
| 1146 |
+
# update latents with guidance
|
| 1147 |
+
loss = (
|
| 1148 |
+
self.compute_ca_loss(
|
| 1149 |
+
saved_attn=self._saved_attn,
|
| 1150 |
+
bboxes=boxes,
|
| 1151 |
+
phrase_indices=phrase_indices,
|
| 1152 |
+
guidance_attn_keys=guidance_attn_keys,
|
| 1153 |
+
verbose=verbose,
|
| 1154 |
+
**kwargs,
|
| 1155 |
+
)
|
| 1156 |
+
* loss_scale
|
| 1157 |
+
)
|
| 1158 |
+
|
| 1159 |
+
if torch.isnan(loss):
|
| 1160 |
+
raise RuntimeError("**Loss is NaN**")
|
| 1161 |
+
|
| 1162 |
+
# This callback allows visualizations.
|
| 1163 |
+
if guidance_callback is not None:
|
| 1164 |
+
guidance_callback(self, latents, loss, iteration, index)
|
| 1165 |
+
|
| 1166 |
+
self._saved_attn = None
|
| 1167 |
+
|
| 1168 |
+
grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0]
|
| 1169 |
+
|
| 1170 |
+
latents.requires_grad_(False)
|
| 1171 |
+
|
| 1172 |
+
# Scaling with classifier guidance
|
| 1173 |
+
alpha_prod_t = scheduler.alphas_cumprod[t]
|
| 1174 |
+
# Classifier guidance: https://huggingface.co/papers/2105.05233
|
| 1175 |
+
# DDIM: https://huggingface.co/papers/2010.02502
|
| 1176 |
+
scale = (1 - alpha_prod_t) ** (0.5)
|
| 1177 |
+
latents = latents - scale * grad_cond
|
| 1178 |
+
|
| 1179 |
+
iteration += 1
|
| 1180 |
+
|
| 1181 |
+
if clear_cache:
|
| 1182 |
+
gc.collect()
|
| 1183 |
+
torch.cuda.empty_cache()
|
| 1184 |
+
|
| 1185 |
+
if verbose:
|
| 1186 |
+
logger.info(
|
| 1187 |
+
f"time index {index}, loss: {loss.item() / loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}"
|
| 1188 |
+
)
|
| 1189 |
+
|
| 1190 |
+
finally:
|
| 1191 |
+
self.enable_attn_hook(enabled=False)
|
| 1192 |
+
|
| 1193 |
+
return latents, loss
|
| 1194 |
+
|
| 1195 |
+
# Below are methods copied from StableDiffusionPipeline
|
| 1196 |
+
# The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517
|
| 1197 |
+
|
| 1198 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 1199 |
+
def _encode_prompt(
|
| 1200 |
+
self,
|
| 1201 |
+
prompt,
|
| 1202 |
+
device,
|
| 1203 |
+
num_images_per_prompt,
|
| 1204 |
+
do_classifier_free_guidance,
|
| 1205 |
+
negative_prompt=None,
|
| 1206 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1207 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1208 |
+
lora_scale: Optional[float] = None,
|
| 1209 |
+
**kwargs,
|
| 1210 |
+
):
|
| 1211 |
+
deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
|
| 1212 |
+
deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
|
| 1213 |
+
|
| 1214 |
+
prompt_embeds_tuple = self.encode_prompt(
|
| 1215 |
+
prompt=prompt,
|
| 1216 |
+
device=device,
|
| 1217 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1218 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 1219 |
+
negative_prompt=negative_prompt,
|
| 1220 |
+
prompt_embeds=prompt_embeds,
|
| 1221 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1222 |
+
lora_scale=lora_scale,
|
| 1223 |
+
**kwargs,
|
| 1224 |
+
)
|
| 1225 |
+
|
| 1226 |
+
# concatenate for backwards comp
|
| 1227 |
+
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
|
| 1228 |
+
|
| 1229 |
+
return prompt_embeds
|
| 1230 |
+
|
| 1231 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
|
| 1232 |
+
def encode_prompt(
|
| 1233 |
+
self,
|
| 1234 |
+
prompt,
|
| 1235 |
+
device,
|
| 1236 |
+
num_images_per_prompt,
|
| 1237 |
+
do_classifier_free_guidance,
|
| 1238 |
+
negative_prompt=None,
|
| 1239 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1240 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1241 |
+
lora_scale: Optional[float] = None,
|
| 1242 |
+
clip_skip: Optional[int] = None,
|
| 1243 |
+
):
|
| 1244 |
+
r"""
|
| 1245 |
+
Encodes the prompt into text encoder hidden states.
|
| 1246 |
+
|
| 1247 |
+
Args:
|
| 1248 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1249 |
+
prompt to be encoded
|
| 1250 |
+
device: (`torch.device`):
|
| 1251 |
+
torch device
|
| 1252 |
+
num_images_per_prompt (`int`):
|
| 1253 |
+
number of images that should be generated per prompt
|
| 1254 |
+
do_classifier_free_guidance (`bool`):
|
| 1255 |
+
whether to use classifier free guidance or not
|
| 1256 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1257 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1258 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1259 |
+
less than `1`).
|
| 1260 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1261 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1262 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1263 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1264 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1265 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1266 |
+
argument.
|
| 1267 |
+
lora_scale (`float`, *optional*):
|
| 1268 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 1269 |
+
clip_skip (`int`, *optional*):
|
| 1270 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1271 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1272 |
+
"""
|
| 1273 |
+
# set lora scale so that monkey patched LoRA
|
| 1274 |
+
# function of text encoder can correctly access it
|
| 1275 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 1276 |
+
self._lora_scale = lora_scale
|
| 1277 |
+
|
| 1278 |
+
# dynamically adjust the LoRA scale
|
| 1279 |
+
if not USE_PEFT_BACKEND:
|
| 1280 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 1281 |
+
else:
|
| 1282 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 1283 |
+
|
| 1284 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1285 |
+
batch_size = 1
|
| 1286 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1287 |
+
batch_size = len(prompt)
|
| 1288 |
+
else:
|
| 1289 |
+
batch_size = prompt_embeds.shape[0]
|
| 1290 |
+
|
| 1291 |
+
if prompt_embeds is None:
|
| 1292 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 1293 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 1294 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 1295 |
+
|
| 1296 |
+
text_inputs = self.tokenizer(
|
| 1297 |
+
prompt,
|
| 1298 |
+
padding="max_length",
|
| 1299 |
+
max_length=self.tokenizer.model_max_length,
|
| 1300 |
+
truncation=True,
|
| 1301 |
+
return_tensors="pt",
|
| 1302 |
+
)
|
| 1303 |
+
text_input_ids = text_inputs.input_ids
|
| 1304 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 1305 |
+
|
| 1306 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 1307 |
+
text_input_ids, untruncated_ids
|
| 1308 |
+
):
|
| 1309 |
+
removed_text = self.tokenizer.batch_decode(
|
| 1310 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 1311 |
+
)
|
| 1312 |
+
logger.warning(
|
| 1313 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 1314 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 1315 |
+
)
|
| 1316 |
+
|
| 1317 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 1318 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 1319 |
+
else:
|
| 1320 |
+
attention_mask = None
|
| 1321 |
+
|
| 1322 |
+
if clip_skip is None:
|
| 1323 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 1324 |
+
prompt_embeds = prompt_embeds[0]
|
| 1325 |
+
else:
|
| 1326 |
+
prompt_embeds = self.text_encoder(
|
| 1327 |
+
text_input_ids.to(device),
|
| 1328 |
+
attention_mask=attention_mask,
|
| 1329 |
+
output_hidden_states=True,
|
| 1330 |
+
)
|
| 1331 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 1332 |
+
# all the hidden states from the encoder layers. Then index into
|
| 1333 |
+
# the tuple to access the hidden states from the desired layer.
|
| 1334 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 1335 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 1336 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 1337 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 1338 |
+
# layer.
|
| 1339 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 1340 |
+
|
| 1341 |
+
if self.text_encoder is not None:
|
| 1342 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 1343 |
+
elif self.unet is not None:
|
| 1344 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 1345 |
+
else:
|
| 1346 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 1347 |
+
|
| 1348 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 1349 |
+
|
| 1350 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 1351 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 1352 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 1353 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 1354 |
+
|
| 1355 |
+
# get unconditional embeddings for classifier free guidance
|
| 1356 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 1357 |
+
uncond_tokens: List[str]
|
| 1358 |
+
if negative_prompt is None:
|
| 1359 |
+
uncond_tokens = [""] * batch_size
|
| 1360 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 1361 |
+
raise TypeError(
|
| 1362 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 1363 |
+
f" {type(prompt)}."
|
| 1364 |
+
)
|
| 1365 |
+
elif isinstance(negative_prompt, str):
|
| 1366 |
+
uncond_tokens = [negative_prompt]
|
| 1367 |
+
elif batch_size != len(negative_prompt):
|
| 1368 |
+
raise ValueError(
|
| 1369 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 1370 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 1371 |
+
" the batch size of `prompt`."
|
| 1372 |
+
)
|
| 1373 |
+
else:
|
| 1374 |
+
uncond_tokens = negative_prompt
|
| 1375 |
+
|
| 1376 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 1377 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 1378 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 1379 |
+
|
| 1380 |
+
max_length = prompt_embeds.shape[1]
|
| 1381 |
+
uncond_input = self.tokenizer(
|
| 1382 |
+
uncond_tokens,
|
| 1383 |
+
padding="max_length",
|
| 1384 |
+
max_length=max_length,
|
| 1385 |
+
truncation=True,
|
| 1386 |
+
return_tensors="pt",
|
| 1387 |
+
)
|
| 1388 |
+
|
| 1389 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 1390 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 1391 |
+
else:
|
| 1392 |
+
attention_mask = None
|
| 1393 |
+
|
| 1394 |
+
negative_prompt_embeds = self.text_encoder(
|
| 1395 |
+
uncond_input.input_ids.to(device),
|
| 1396 |
+
attention_mask=attention_mask,
|
| 1397 |
+
)
|
| 1398 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 1399 |
+
|
| 1400 |
+
if do_classifier_free_guidance:
|
| 1401 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 1402 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 1403 |
+
|
| 1404 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 1405 |
+
|
| 1406 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 1407 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 1408 |
+
|
| 1409 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 1410 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 1411 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 1412 |
+
|
| 1413 |
+
return prompt_embeds, negative_prompt_embeds
|
| 1414 |
+
|
| 1415 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 1416 |
+
def encode_image(self, image, device, num_images_per_prompt):
|
| 1417 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 1418 |
+
|
| 1419 |
+
if not isinstance(image, torch.Tensor):
|
| 1420 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 1421 |
+
|
| 1422 |
+
image = image.to(device=device, dtype=dtype)
|
| 1423 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 1424 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 1425 |
+
|
| 1426 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 1427 |
+
return image_embeds, uncond_image_embeds
|
| 1428 |
+
|
| 1429 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 1430 |
+
def run_safety_checker(self, image, device, dtype):
|
| 1431 |
+
if self.safety_checker is None:
|
| 1432 |
+
has_nsfw_concept = None
|
| 1433 |
+
else:
|
| 1434 |
+
if torch.is_tensor(image):
|
| 1435 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 1436 |
+
else:
|
| 1437 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 1438 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 1439 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 1440 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 1441 |
+
)
|
| 1442 |
+
return image, has_nsfw_concept
|
| 1443 |
+
|
| 1444 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
| 1445 |
+
def decode_latents(self, latents):
|
| 1446 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 1447 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 1448 |
+
|
| 1449 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 1450 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1451 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 1452 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 1453 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 1454 |
+
return image
|
| 1455 |
+
|
| 1456 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 1457 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 1458 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 1459 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 1460 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 1461 |
+
# and should be between [0, 1]
|
| 1462 |
+
|
| 1463 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 1464 |
+
extra_step_kwargs = {}
|
| 1465 |
+
if accepts_eta:
|
| 1466 |
+
extra_step_kwargs["eta"] = eta
|
| 1467 |
+
|
| 1468 |
+
# check if the scheduler accepts generator
|
| 1469 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 1470 |
+
if accepts_generator:
|
| 1471 |
+
extra_step_kwargs["generator"] = generator
|
| 1472 |
+
return extra_step_kwargs
|
| 1473 |
+
|
| 1474 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 1475 |
+
def prepare_latents(
|
| 1476 |
+
self,
|
| 1477 |
+
batch_size,
|
| 1478 |
+
num_channels_latents,
|
| 1479 |
+
height,
|
| 1480 |
+
width,
|
| 1481 |
+
dtype,
|
| 1482 |
+
device,
|
| 1483 |
+
generator,
|
| 1484 |
+
latents=None,
|
| 1485 |
+
):
|
| 1486 |
+
shape = (
|
| 1487 |
+
batch_size,
|
| 1488 |
+
num_channels_latents,
|
| 1489 |
+
height // self.vae_scale_factor,
|
| 1490 |
+
width // self.vae_scale_factor,
|
| 1491 |
+
)
|
| 1492 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 1493 |
+
raise ValueError(
|
| 1494 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 1495 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 1496 |
+
)
|
| 1497 |
+
|
| 1498 |
+
if latents is None:
|
| 1499 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 1500 |
+
else:
|
| 1501 |
+
latents = latents.to(device)
|
| 1502 |
+
|
| 1503 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 1504 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 1505 |
+
return latents
|
| 1506 |
+
|
| 1507 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 1508 |
+
def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
|
| 1509 |
+
"""
|
| 1510 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 1511 |
+
|
| 1512 |
+
Args:
|
| 1513 |
+
timesteps (`torch.Tensor`):
|
| 1514 |
+
generate embedding vectors at these timesteps
|
| 1515 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 1516 |
+
dimension of the embeddings to generate
|
| 1517 |
+
dtype:
|
| 1518 |
+
data type of the generated embeddings
|
| 1519 |
+
|
| 1520 |
+
Returns:
|
| 1521 |
+
`torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
| 1522 |
+
"""
|
| 1523 |
+
assert len(w.shape) == 1
|
| 1524 |
+
w = w * 1000.0
|
| 1525 |
+
|
| 1526 |
+
half_dim = embedding_dim // 2
|
| 1527 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 1528 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 1529 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 1530 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 1531 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 1532 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 1533 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 1534 |
+
return emb
|
| 1535 |
+
|
| 1536 |
+
@property
|
| 1537 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
|
| 1538 |
+
def guidance_scale(self):
|
| 1539 |
+
return self._guidance_scale
|
| 1540 |
+
|
| 1541 |
+
@property
|
| 1542 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale
|
| 1543 |
+
def guidance_rescale(self):
|
| 1544 |
+
return self._guidance_rescale
|
| 1545 |
+
|
| 1546 |
+
@property
|
| 1547 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
|
| 1548 |
+
def clip_skip(self):
|
| 1549 |
+
return self._clip_skip
|
| 1550 |
+
|
| 1551 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1552 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 1553 |
+
# corresponds to doing no classifier free guidance.
|
| 1554 |
+
@property
|
| 1555 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
|
| 1556 |
+
def do_classifier_free_guidance(self):
|
| 1557 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 1558 |
+
|
| 1559 |
+
@property
|
| 1560 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
|
| 1561 |
+
def cross_attention_kwargs(self):
|
| 1562 |
+
return self._cross_attention_kwargs
|
| 1563 |
+
|
| 1564 |
+
@property
|
| 1565 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
|
| 1566 |
+
def num_timesteps(self):
|
| 1567 |
+
return self._num_timesteps
|
v0.36.0/lpw_stable_diffusion.py
ADDED
|
@@ -0,0 +1,1431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import re
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from packaging import version
|
| 9 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 10 |
+
|
| 11 |
+
from diffusers import DiffusionPipeline
|
| 12 |
+
from diffusers.configuration_utils import FrozenDict
|
| 13 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 14 |
+
from diffusers.loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 15 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 16 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 17 |
+
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
|
| 18 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
| 19 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 20 |
+
from diffusers.utils import (
|
| 21 |
+
PIL_INTERPOLATION,
|
| 22 |
+
USE_PEFT_BACKEND,
|
| 23 |
+
deprecate,
|
| 24 |
+
logging,
|
| 25 |
+
scale_lora_layers,
|
| 26 |
+
unscale_lora_layers,
|
| 27 |
+
)
|
| 28 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# ------------------------------------------------------------------------------
|
| 32 |
+
|
| 33 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 34 |
+
|
| 35 |
+
re_attention = re.compile(
|
| 36 |
+
r"""
|
| 37 |
+
\\\(|
|
| 38 |
+
\\\)|
|
| 39 |
+
\\\[|
|
| 40 |
+
\\]|
|
| 41 |
+
\\\\|
|
| 42 |
+
\\|
|
| 43 |
+
\(|
|
| 44 |
+
\[|
|
| 45 |
+
:([+-]?[.\d]+)\)|
|
| 46 |
+
\)|
|
| 47 |
+
]|
|
| 48 |
+
[^\\()\[\]:]+|
|
| 49 |
+
:
|
| 50 |
+
""",
|
| 51 |
+
re.X,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def parse_prompt_attention(text):
|
| 56 |
+
"""
|
| 57 |
+
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
| 58 |
+
Accepted tokens are:
|
| 59 |
+
(abc) - increases attention to abc by a multiplier of 1.1
|
| 60 |
+
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
| 61 |
+
[abc] - decreases attention to abc by a multiplier of 1.1
|
| 62 |
+
\\( - literal character '('
|
| 63 |
+
\\[ - literal character '['
|
| 64 |
+
\\) - literal character ')'
|
| 65 |
+
\\] - literal character ']'
|
| 66 |
+
\\ - literal character '\'
|
| 67 |
+
anything else - just text
|
| 68 |
+
>>> parse_prompt_attention('normal text')
|
| 69 |
+
[['normal text', 1.0]]
|
| 70 |
+
>>> parse_prompt_attention('an (important) word')
|
| 71 |
+
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
| 72 |
+
>>> parse_prompt_attention('(unbalanced')
|
| 73 |
+
[['unbalanced', 1.1]]
|
| 74 |
+
>>> parse_prompt_attention('\\(literal\\]')
|
| 75 |
+
[['(literal]', 1.0]]
|
| 76 |
+
>>> parse_prompt_attention('(unnecessary)(parens)')
|
| 77 |
+
[['unnecessaryparens', 1.1]]
|
| 78 |
+
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
| 79 |
+
[['a ', 1.0],
|
| 80 |
+
['house', 1.5730000000000004],
|
| 81 |
+
[' ', 1.1],
|
| 82 |
+
['on', 1.0],
|
| 83 |
+
[' a ', 1.1],
|
| 84 |
+
['hill', 0.55],
|
| 85 |
+
[', sun, ', 1.1],
|
| 86 |
+
['sky', 1.4641000000000006],
|
| 87 |
+
['.', 1.1]]
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
res = []
|
| 91 |
+
round_brackets = []
|
| 92 |
+
square_brackets = []
|
| 93 |
+
|
| 94 |
+
round_bracket_multiplier = 1.1
|
| 95 |
+
square_bracket_multiplier = 1 / 1.1
|
| 96 |
+
|
| 97 |
+
def multiply_range(start_position, multiplier):
|
| 98 |
+
for p in range(start_position, len(res)):
|
| 99 |
+
res[p][1] *= multiplier
|
| 100 |
+
|
| 101 |
+
for m in re_attention.finditer(text):
|
| 102 |
+
text = m.group(0)
|
| 103 |
+
weight = m.group(1)
|
| 104 |
+
|
| 105 |
+
if text.startswith("\\"):
|
| 106 |
+
res.append([text[1:], 1.0])
|
| 107 |
+
elif text == "(":
|
| 108 |
+
round_brackets.append(len(res))
|
| 109 |
+
elif text == "[":
|
| 110 |
+
square_brackets.append(len(res))
|
| 111 |
+
elif weight is not None and len(round_brackets) > 0:
|
| 112 |
+
multiply_range(round_brackets.pop(), float(weight))
|
| 113 |
+
elif text == ")" and len(round_brackets) > 0:
|
| 114 |
+
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
| 115 |
+
elif text == "]" and len(square_brackets) > 0:
|
| 116 |
+
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
| 117 |
+
else:
|
| 118 |
+
res.append([text, 1.0])
|
| 119 |
+
|
| 120 |
+
for pos in round_brackets:
|
| 121 |
+
multiply_range(pos, round_bracket_multiplier)
|
| 122 |
+
|
| 123 |
+
for pos in square_brackets:
|
| 124 |
+
multiply_range(pos, square_bracket_multiplier)
|
| 125 |
+
|
| 126 |
+
if len(res) == 0:
|
| 127 |
+
res = [["", 1.0]]
|
| 128 |
+
|
| 129 |
+
# merge runs of identical weights
|
| 130 |
+
i = 0
|
| 131 |
+
while i + 1 < len(res):
|
| 132 |
+
if res[i][1] == res[i + 1][1]:
|
| 133 |
+
res[i][0] += res[i + 1][0]
|
| 134 |
+
res.pop(i + 1)
|
| 135 |
+
else:
|
| 136 |
+
i += 1
|
| 137 |
+
|
| 138 |
+
return res
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
|
| 142 |
+
r"""
|
| 143 |
+
Tokenize a list of prompts and return its tokens with weights of each token.
|
| 144 |
+
|
| 145 |
+
No padding, starting or ending token is included.
|
| 146 |
+
"""
|
| 147 |
+
tokens = []
|
| 148 |
+
weights = []
|
| 149 |
+
truncated = False
|
| 150 |
+
for text in prompt:
|
| 151 |
+
texts_and_weights = parse_prompt_attention(text)
|
| 152 |
+
text_token = []
|
| 153 |
+
text_weight = []
|
| 154 |
+
for word, weight in texts_and_weights:
|
| 155 |
+
# tokenize and discard the starting and the ending token
|
| 156 |
+
token = pipe.tokenizer(word).input_ids[1:-1]
|
| 157 |
+
text_token += token
|
| 158 |
+
# copy the weight by length of token
|
| 159 |
+
text_weight += [weight] * len(token)
|
| 160 |
+
# stop if the text is too long (longer than truncation limit)
|
| 161 |
+
if len(text_token) > max_length:
|
| 162 |
+
truncated = True
|
| 163 |
+
break
|
| 164 |
+
# truncate
|
| 165 |
+
if len(text_token) > max_length:
|
| 166 |
+
truncated = True
|
| 167 |
+
text_token = text_token[:max_length]
|
| 168 |
+
text_weight = text_weight[:max_length]
|
| 169 |
+
tokens.append(text_token)
|
| 170 |
+
weights.append(text_weight)
|
| 171 |
+
if truncated:
|
| 172 |
+
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
| 173 |
+
return tokens, weights
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
| 177 |
+
r"""
|
| 178 |
+
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
| 179 |
+
"""
|
| 180 |
+
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
| 181 |
+
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
| 182 |
+
for i in range(len(tokens)):
|
| 183 |
+
tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
|
| 184 |
+
if no_boseos_middle:
|
| 185 |
+
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
| 186 |
+
else:
|
| 187 |
+
w = []
|
| 188 |
+
if len(weights[i]) == 0:
|
| 189 |
+
w = [1.0] * weights_length
|
| 190 |
+
else:
|
| 191 |
+
for j in range(max_embeddings_multiples):
|
| 192 |
+
w.append(1.0) # weight for starting token in this chunk
|
| 193 |
+
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
| 194 |
+
w.append(1.0) # weight for ending token in this chunk
|
| 195 |
+
w += [1.0] * (weights_length - len(w))
|
| 196 |
+
weights[i] = w[:]
|
| 197 |
+
|
| 198 |
+
return tokens, weights
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_unweighted_text_embeddings(
|
| 202 |
+
pipe: DiffusionPipeline,
|
| 203 |
+
text_input: torch.Tensor,
|
| 204 |
+
chunk_length: int,
|
| 205 |
+
no_boseos_middle: Optional[bool] = True,
|
| 206 |
+
clip_skip: Optional[int] = None,
|
| 207 |
+
):
|
| 208 |
+
"""
|
| 209 |
+
When the length of tokens is a multiple of the capacity of the text encoder,
|
| 210 |
+
it should be split into chunks and sent to the text encoder individually.
|
| 211 |
+
"""
|
| 212 |
+
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
| 213 |
+
if max_embeddings_multiples > 1:
|
| 214 |
+
text_embeddings = []
|
| 215 |
+
for i in range(max_embeddings_multiples):
|
| 216 |
+
# extract the i-th chunk
|
| 217 |
+
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
|
| 218 |
+
|
| 219 |
+
# cover the head and the tail by the starting and the ending tokens
|
| 220 |
+
text_input_chunk[:, 0] = text_input[0, 0]
|
| 221 |
+
text_input_chunk[:, -1] = text_input[0, -1]
|
| 222 |
+
if clip_skip is None:
|
| 223 |
+
prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device))
|
| 224 |
+
text_embedding = prompt_embeds[0]
|
| 225 |
+
else:
|
| 226 |
+
prompt_embeds = pipe.text_encoder(text_input_chunk.to(pipe.device), output_hidden_states=True)
|
| 227 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 228 |
+
# all the hidden states from the encoder layers. Then index into
|
| 229 |
+
# the tuple to access the hidden states from the desired layer.
|
| 230 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 231 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 232 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 233 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 234 |
+
# layer.
|
| 235 |
+
text_embedding = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 236 |
+
|
| 237 |
+
if no_boseos_middle:
|
| 238 |
+
if i == 0:
|
| 239 |
+
# discard the ending token
|
| 240 |
+
text_embedding = text_embedding[:, :-1]
|
| 241 |
+
elif i == max_embeddings_multiples - 1:
|
| 242 |
+
# discard the starting token
|
| 243 |
+
text_embedding = text_embedding[:, 1:]
|
| 244 |
+
else:
|
| 245 |
+
# discard both starting and ending tokens
|
| 246 |
+
text_embedding = text_embedding[:, 1:-1]
|
| 247 |
+
|
| 248 |
+
text_embeddings.append(text_embedding)
|
| 249 |
+
text_embeddings = torch.concat(text_embeddings, axis=1)
|
| 250 |
+
else:
|
| 251 |
+
if clip_skip is None:
|
| 252 |
+
clip_skip = 0
|
| 253 |
+
prompt_embeds = pipe.text_encoder(text_input, output_hidden_states=True)[-1][-(clip_skip + 1)]
|
| 254 |
+
text_embeddings = pipe.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 255 |
+
return text_embeddings
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def get_weighted_text_embeddings(
|
| 259 |
+
pipe: DiffusionPipeline,
|
| 260 |
+
prompt: Union[str, List[str]],
|
| 261 |
+
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
| 262 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 263 |
+
no_boseos_middle: Optional[bool] = False,
|
| 264 |
+
skip_parsing: Optional[bool] = False,
|
| 265 |
+
skip_weighting: Optional[bool] = False,
|
| 266 |
+
clip_skip=None,
|
| 267 |
+
lora_scale=None,
|
| 268 |
+
):
|
| 269 |
+
r"""
|
| 270 |
+
Prompts can be assigned with local weights using brackets. For example,
|
| 271 |
+
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
| 272 |
+
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
| 273 |
+
|
| 274 |
+
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
pipe (`DiffusionPipeline`):
|
| 278 |
+
Pipe to provide access to the tokenizer and the text encoder.
|
| 279 |
+
prompt (`str` or `List[str]`):
|
| 280 |
+
The prompt or prompts to guide the image generation.
|
| 281 |
+
uncond_prompt (`str` or `List[str]`):
|
| 282 |
+
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
| 283 |
+
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
| 284 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 285 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 286 |
+
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
| 287 |
+
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
| 288 |
+
ending token in each of the chunk in the middle.
|
| 289 |
+
skip_parsing (`bool`, *optional*, defaults to `False`):
|
| 290 |
+
Skip the parsing of brackets.
|
| 291 |
+
skip_weighting (`bool`, *optional*, defaults to `False`):
|
| 292 |
+
Skip the weighting. When the parsing is skipped, it is forced True.
|
| 293 |
+
"""
|
| 294 |
+
# set lora scale so that monkey patched LoRA
|
| 295 |
+
# function of text encoder can correctly access it
|
| 296 |
+
if lora_scale is not None and isinstance(pipe, StableDiffusionLoraLoaderMixin):
|
| 297 |
+
pipe._lora_scale = lora_scale
|
| 298 |
+
|
| 299 |
+
# dynamically adjust the LoRA scale
|
| 300 |
+
if not USE_PEFT_BACKEND:
|
| 301 |
+
adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale)
|
| 302 |
+
else:
|
| 303 |
+
scale_lora_layers(pipe.text_encoder, lora_scale)
|
| 304 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 305 |
+
if isinstance(prompt, str):
|
| 306 |
+
prompt = [prompt]
|
| 307 |
+
|
| 308 |
+
if not skip_parsing:
|
| 309 |
+
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
| 310 |
+
if uncond_prompt is not None:
|
| 311 |
+
if isinstance(uncond_prompt, str):
|
| 312 |
+
uncond_prompt = [uncond_prompt]
|
| 313 |
+
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
| 314 |
+
else:
|
| 315 |
+
prompt_tokens = [
|
| 316 |
+
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
|
| 317 |
+
]
|
| 318 |
+
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
| 319 |
+
if uncond_prompt is not None:
|
| 320 |
+
if isinstance(uncond_prompt, str):
|
| 321 |
+
uncond_prompt = [uncond_prompt]
|
| 322 |
+
uncond_tokens = [
|
| 323 |
+
token[1:-1]
|
| 324 |
+
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
|
| 325 |
+
]
|
| 326 |
+
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
| 327 |
+
|
| 328 |
+
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
| 329 |
+
max_length = max([len(token) for token in prompt_tokens])
|
| 330 |
+
if uncond_prompt is not None:
|
| 331 |
+
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
| 332 |
+
|
| 333 |
+
max_embeddings_multiples = min(
|
| 334 |
+
max_embeddings_multiples,
|
| 335 |
+
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
| 336 |
+
)
|
| 337 |
+
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
| 338 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 339 |
+
|
| 340 |
+
# pad the length of tokens and weights
|
| 341 |
+
bos = pipe.tokenizer.bos_token_id
|
| 342 |
+
eos = pipe.tokenizer.eos_token_id
|
| 343 |
+
pad = getattr(pipe.tokenizer, "pad_token_id", eos)
|
| 344 |
+
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
| 345 |
+
prompt_tokens,
|
| 346 |
+
prompt_weights,
|
| 347 |
+
max_length,
|
| 348 |
+
bos,
|
| 349 |
+
eos,
|
| 350 |
+
pad,
|
| 351 |
+
no_boseos_middle=no_boseos_middle,
|
| 352 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 353 |
+
)
|
| 354 |
+
prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
|
| 355 |
+
if uncond_prompt is not None:
|
| 356 |
+
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
| 357 |
+
uncond_tokens,
|
| 358 |
+
uncond_weights,
|
| 359 |
+
max_length,
|
| 360 |
+
bos,
|
| 361 |
+
eos,
|
| 362 |
+
pad,
|
| 363 |
+
no_boseos_middle=no_boseos_middle,
|
| 364 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 365 |
+
)
|
| 366 |
+
uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
|
| 367 |
+
|
| 368 |
+
# get the embeddings
|
| 369 |
+
text_embeddings = get_unweighted_text_embeddings(
|
| 370 |
+
pipe, prompt_tokens, pipe.tokenizer.model_max_length, no_boseos_middle=no_boseos_middle, clip_skip=clip_skip
|
| 371 |
+
)
|
| 372 |
+
prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
|
| 373 |
+
if uncond_prompt is not None:
|
| 374 |
+
uncond_embeddings = get_unweighted_text_embeddings(
|
| 375 |
+
pipe,
|
| 376 |
+
uncond_tokens,
|
| 377 |
+
pipe.tokenizer.model_max_length,
|
| 378 |
+
no_boseos_middle=no_boseos_middle,
|
| 379 |
+
clip_skip=clip_skip,
|
| 380 |
+
)
|
| 381 |
+
uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
|
| 382 |
+
|
| 383 |
+
# assign weights to the prompts and normalize in the sense of mean
|
| 384 |
+
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
| 385 |
+
if (not skip_parsing) and (not skip_weighting):
|
| 386 |
+
previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
|
| 387 |
+
text_embeddings *= prompt_weights.unsqueeze(-1)
|
| 388 |
+
current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
|
| 389 |
+
text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
|
| 390 |
+
if uncond_prompt is not None:
|
| 391 |
+
previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
|
| 392 |
+
uncond_embeddings *= uncond_weights.unsqueeze(-1)
|
| 393 |
+
current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
|
| 394 |
+
uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
|
| 395 |
+
|
| 396 |
+
if pipe.text_encoder is not None:
|
| 397 |
+
if isinstance(pipe, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 398 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 399 |
+
unscale_lora_layers(pipe.text_encoder, lora_scale)
|
| 400 |
+
|
| 401 |
+
if uncond_prompt is not None:
|
| 402 |
+
return text_embeddings, uncond_embeddings
|
| 403 |
+
return text_embeddings, None
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
def preprocess_image(image, batch_size):
|
| 407 |
+
w, h = image.size
|
| 408 |
+
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
| 409 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 410 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 411 |
+
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
|
| 412 |
+
image = torch.from_numpy(image)
|
| 413 |
+
return 2.0 * image - 1.0
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def preprocess_mask(mask, batch_size, scale_factor=8):
|
| 417 |
+
if not isinstance(mask, torch.Tensor):
|
| 418 |
+
mask = mask.convert("L")
|
| 419 |
+
w, h = mask.size
|
| 420 |
+
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
| 421 |
+
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
| 422 |
+
mask = np.array(mask).astype(np.float32) / 255.0
|
| 423 |
+
mask = np.tile(mask, (4, 1, 1))
|
| 424 |
+
mask = np.vstack([mask[None]] * batch_size)
|
| 425 |
+
mask = 1 - mask # repaint white, keep black
|
| 426 |
+
mask = torch.from_numpy(mask)
|
| 427 |
+
return mask
|
| 428 |
+
|
| 429 |
+
else:
|
| 430 |
+
valid_mask_channel_sizes = [1, 3]
|
| 431 |
+
# if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
|
| 432 |
+
if mask.shape[3] in valid_mask_channel_sizes:
|
| 433 |
+
mask = mask.permute(0, 3, 1, 2)
|
| 434 |
+
elif mask.shape[1] not in valid_mask_channel_sizes:
|
| 435 |
+
raise ValueError(
|
| 436 |
+
f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
|
| 437 |
+
f" but received mask of shape {tuple(mask.shape)}"
|
| 438 |
+
)
|
| 439 |
+
# (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
|
| 440 |
+
mask = mask.mean(dim=1, keepdim=True)
|
| 441 |
+
h, w = mask.shape[-2:]
|
| 442 |
+
h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
|
| 443 |
+
mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
|
| 444 |
+
return mask
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
class StableDiffusionLongPromptWeightingPipeline(
|
| 448 |
+
DiffusionPipeline,
|
| 449 |
+
StableDiffusionMixin,
|
| 450 |
+
TextualInversionLoaderMixin,
|
| 451 |
+
StableDiffusionLoraLoaderMixin,
|
| 452 |
+
FromSingleFileMixin,
|
| 453 |
+
):
|
| 454 |
+
r"""
|
| 455 |
+
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
| 456 |
+
weighting in prompt.
|
| 457 |
+
|
| 458 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 459 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
vae ([`AutoencoderKL`]):
|
| 463 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 464 |
+
text_encoder ([`CLIPTextModel`]):
|
| 465 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 466 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 467 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 468 |
+
tokenizer (`CLIPTokenizer`):
|
| 469 |
+
Tokenizer of class
|
| 470 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 471 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 472 |
+
scheduler ([`SchedulerMixin`]):
|
| 473 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 474 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 475 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 476 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 477 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 478 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 479 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
model_cpu_offload_seq = "text_encoder-->unet->vae"
|
| 483 |
+
_optional_components = ["safety_checker", "feature_extractor"]
|
| 484 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 485 |
+
|
| 486 |
+
def __init__(
|
| 487 |
+
self,
|
| 488 |
+
vae: AutoencoderKL,
|
| 489 |
+
text_encoder: CLIPTextModel,
|
| 490 |
+
tokenizer: CLIPTokenizer,
|
| 491 |
+
unet: UNet2DConditionModel,
|
| 492 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 493 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 494 |
+
feature_extractor: CLIPImageProcessor,
|
| 495 |
+
requires_safety_checker: bool = True,
|
| 496 |
+
):
|
| 497 |
+
super().__init__()
|
| 498 |
+
|
| 499 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 500 |
+
deprecation_message = (
|
| 501 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 502 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 503 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 504 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 505 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 506 |
+
" file"
|
| 507 |
+
)
|
| 508 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 509 |
+
new_config = dict(scheduler.config)
|
| 510 |
+
new_config["steps_offset"] = 1
|
| 511 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 512 |
+
|
| 513 |
+
if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
|
| 514 |
+
deprecation_message = (
|
| 515 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 516 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 517 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 518 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 519 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 520 |
+
)
|
| 521 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 522 |
+
new_config = dict(scheduler.config)
|
| 523 |
+
new_config["clip_sample"] = False
|
| 524 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 525 |
+
|
| 526 |
+
if safety_checker is None and requires_safety_checker:
|
| 527 |
+
logger.warning(
|
| 528 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 529 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 530 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 531 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 532 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 533 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
if safety_checker is not None and feature_extractor is None:
|
| 537 |
+
raise ValueError(
|
| 538 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 539 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 540 |
+
)
|
| 541 |
+
|
| 542 |
+
is_unet_version_less_0_9_0 = (
|
| 543 |
+
unet is not None
|
| 544 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 545 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 546 |
+
)
|
| 547 |
+
is_unet_sample_size_less_64 = (
|
| 548 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 549 |
+
)
|
| 550 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 551 |
+
deprecation_message = (
|
| 552 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 553 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 554 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 555 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 556 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 557 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 558 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 559 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 560 |
+
" the `unet/config.json` file"
|
| 561 |
+
)
|
| 562 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 563 |
+
new_config = dict(unet.config)
|
| 564 |
+
new_config["sample_size"] = 64
|
| 565 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 566 |
+
self.register_modules(
|
| 567 |
+
vae=vae,
|
| 568 |
+
text_encoder=text_encoder,
|
| 569 |
+
tokenizer=tokenizer,
|
| 570 |
+
unet=unet,
|
| 571 |
+
scheduler=scheduler,
|
| 572 |
+
safety_checker=safety_checker,
|
| 573 |
+
feature_extractor=feature_extractor,
|
| 574 |
+
)
|
| 575 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 576 |
+
|
| 577 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 578 |
+
self.register_to_config(
|
| 579 |
+
requires_safety_checker=requires_safety_checker,
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
def _encode_prompt(
|
| 583 |
+
self,
|
| 584 |
+
prompt,
|
| 585 |
+
device,
|
| 586 |
+
num_images_per_prompt,
|
| 587 |
+
do_classifier_free_guidance,
|
| 588 |
+
negative_prompt=None,
|
| 589 |
+
max_embeddings_multiples=3,
|
| 590 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 591 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 592 |
+
clip_skip: Optional[int] = None,
|
| 593 |
+
lora_scale: Optional[float] = None,
|
| 594 |
+
):
|
| 595 |
+
r"""
|
| 596 |
+
Encodes the prompt into text encoder hidden states.
|
| 597 |
+
|
| 598 |
+
Args:
|
| 599 |
+
prompt (`str` or `list(int)`):
|
| 600 |
+
prompt to be encoded
|
| 601 |
+
device: (`torch.device`):
|
| 602 |
+
torch device
|
| 603 |
+
num_images_per_prompt (`int`):
|
| 604 |
+
number of images that should be generated per prompt
|
| 605 |
+
do_classifier_free_guidance (`bool`):
|
| 606 |
+
whether to use classifier free guidance or not
|
| 607 |
+
negative_prompt (`str` or `List[str]`):
|
| 608 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 609 |
+
if `guidance_scale` is less than `1`).
|
| 610 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 611 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 612 |
+
"""
|
| 613 |
+
if prompt is not None and isinstance(prompt, str):
|
| 614 |
+
batch_size = 1
|
| 615 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 616 |
+
batch_size = len(prompt)
|
| 617 |
+
else:
|
| 618 |
+
batch_size = prompt_embeds.shape[0]
|
| 619 |
+
|
| 620 |
+
if negative_prompt_embeds is None:
|
| 621 |
+
if negative_prompt is None:
|
| 622 |
+
negative_prompt = [""] * batch_size
|
| 623 |
+
elif isinstance(negative_prompt, str):
|
| 624 |
+
negative_prompt = [negative_prompt] * batch_size
|
| 625 |
+
if batch_size != len(negative_prompt):
|
| 626 |
+
raise ValueError(
|
| 627 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 628 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 629 |
+
" the batch size of `prompt`."
|
| 630 |
+
)
|
| 631 |
+
if prompt_embeds is None or negative_prompt_embeds is None:
|
| 632 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 633 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 634 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 635 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
|
| 636 |
+
|
| 637 |
+
prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
|
| 638 |
+
pipe=self,
|
| 639 |
+
prompt=prompt,
|
| 640 |
+
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
| 641 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 642 |
+
clip_skip=clip_skip,
|
| 643 |
+
lora_scale=lora_scale,
|
| 644 |
+
)
|
| 645 |
+
if prompt_embeds is None:
|
| 646 |
+
prompt_embeds = prompt_embeds1
|
| 647 |
+
if negative_prompt_embeds is None:
|
| 648 |
+
negative_prompt_embeds = negative_prompt_embeds1
|
| 649 |
+
|
| 650 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 651 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 652 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 653 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 654 |
+
|
| 655 |
+
if do_classifier_free_guidance:
|
| 656 |
+
bs_embed, seq_len, _ = negative_prompt_embeds.shape
|
| 657 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 658 |
+
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 659 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 660 |
+
|
| 661 |
+
return prompt_embeds
|
| 662 |
+
|
| 663 |
+
def check_inputs(
|
| 664 |
+
self,
|
| 665 |
+
prompt,
|
| 666 |
+
height,
|
| 667 |
+
width,
|
| 668 |
+
strength,
|
| 669 |
+
callback_steps,
|
| 670 |
+
negative_prompt=None,
|
| 671 |
+
prompt_embeds=None,
|
| 672 |
+
negative_prompt_embeds=None,
|
| 673 |
+
):
|
| 674 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 675 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 676 |
+
|
| 677 |
+
if strength < 0 or strength > 1:
|
| 678 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 679 |
+
|
| 680 |
+
if (callback_steps is None) or (
|
| 681 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 682 |
+
):
|
| 683 |
+
raise ValueError(
|
| 684 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 685 |
+
f" {type(callback_steps)}."
|
| 686 |
+
)
|
| 687 |
+
|
| 688 |
+
if prompt is not None and prompt_embeds is not None:
|
| 689 |
+
raise ValueError(
|
| 690 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 691 |
+
" only forward one of the two."
|
| 692 |
+
)
|
| 693 |
+
elif prompt is None and prompt_embeds is None:
|
| 694 |
+
raise ValueError(
|
| 695 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 696 |
+
)
|
| 697 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 698 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 699 |
+
|
| 700 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 701 |
+
raise ValueError(
|
| 702 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 703 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 707 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 708 |
+
raise ValueError(
|
| 709 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 710 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 711 |
+
f" {negative_prompt_embeds.shape}."
|
| 712 |
+
)
|
| 713 |
+
|
| 714 |
+
def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
|
| 715 |
+
if is_text2img:
|
| 716 |
+
return self.scheduler.timesteps.to(device), num_inference_steps
|
| 717 |
+
else:
|
| 718 |
+
# get the original timestep using init_timestep
|
| 719 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 720 |
+
|
| 721 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 722 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 723 |
+
|
| 724 |
+
return timesteps, num_inference_steps - t_start
|
| 725 |
+
|
| 726 |
+
def run_safety_checker(self, image, device, dtype):
|
| 727 |
+
if self.safety_checker is not None:
|
| 728 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
| 729 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 730 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 731 |
+
)
|
| 732 |
+
else:
|
| 733 |
+
has_nsfw_concept = None
|
| 734 |
+
return image, has_nsfw_concept
|
| 735 |
+
|
| 736 |
+
def decode_latents(self, latents):
|
| 737 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 738 |
+
image = self.vae.decode(latents).sample
|
| 739 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 740 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 741 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 742 |
+
return image
|
| 743 |
+
|
| 744 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 745 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 746 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 747 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 748 |
+
# and should be between [0, 1]
|
| 749 |
+
|
| 750 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 751 |
+
extra_step_kwargs = {}
|
| 752 |
+
if accepts_eta:
|
| 753 |
+
extra_step_kwargs["eta"] = eta
|
| 754 |
+
|
| 755 |
+
# check if the scheduler accepts generator
|
| 756 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 757 |
+
if accepts_generator:
|
| 758 |
+
extra_step_kwargs["generator"] = generator
|
| 759 |
+
return extra_step_kwargs
|
| 760 |
+
|
| 761 |
+
def prepare_latents(
|
| 762 |
+
self,
|
| 763 |
+
image,
|
| 764 |
+
timestep,
|
| 765 |
+
num_images_per_prompt,
|
| 766 |
+
batch_size,
|
| 767 |
+
num_channels_latents,
|
| 768 |
+
height,
|
| 769 |
+
width,
|
| 770 |
+
dtype,
|
| 771 |
+
device,
|
| 772 |
+
generator,
|
| 773 |
+
latents=None,
|
| 774 |
+
):
|
| 775 |
+
if image is None:
|
| 776 |
+
batch_size = batch_size * num_images_per_prompt
|
| 777 |
+
shape = (
|
| 778 |
+
batch_size,
|
| 779 |
+
num_channels_latents,
|
| 780 |
+
int(height) // self.vae_scale_factor,
|
| 781 |
+
int(width) // self.vae_scale_factor,
|
| 782 |
+
)
|
| 783 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 784 |
+
raise ValueError(
|
| 785 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 786 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 787 |
+
)
|
| 788 |
+
|
| 789 |
+
if latents is None:
|
| 790 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 791 |
+
else:
|
| 792 |
+
latents = latents.to(device)
|
| 793 |
+
|
| 794 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 795 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 796 |
+
return latents, None, None
|
| 797 |
+
else:
|
| 798 |
+
image = image.to(device=self.device, dtype=dtype)
|
| 799 |
+
init_latent_dist = self.vae.encode(image).latent_dist
|
| 800 |
+
init_latents = init_latent_dist.sample(generator=generator)
|
| 801 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 802 |
+
|
| 803 |
+
# Expand init_latents for batch_size and num_images_per_prompt
|
| 804 |
+
init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
|
| 805 |
+
init_latents_orig = init_latents
|
| 806 |
+
|
| 807 |
+
# add noise to latents using the timesteps
|
| 808 |
+
noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
|
| 809 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 810 |
+
latents = init_latents
|
| 811 |
+
return latents, init_latents_orig, noise
|
| 812 |
+
|
| 813 |
+
@torch.no_grad()
|
| 814 |
+
def __call__(
|
| 815 |
+
self,
|
| 816 |
+
prompt: Union[str, List[str]],
|
| 817 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 818 |
+
image: Union[torch.Tensor, PIL.Image.Image] = None,
|
| 819 |
+
mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
|
| 820 |
+
height: int = 512,
|
| 821 |
+
width: int = 512,
|
| 822 |
+
num_inference_steps: int = 50,
|
| 823 |
+
guidance_scale: float = 7.5,
|
| 824 |
+
strength: float = 0.8,
|
| 825 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 826 |
+
add_predicted_noise: Optional[bool] = False,
|
| 827 |
+
eta: float = 0.0,
|
| 828 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 829 |
+
latents: Optional[torch.Tensor] = None,
|
| 830 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 831 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 832 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 833 |
+
output_type: Optional[str] = "pil",
|
| 834 |
+
return_dict: bool = True,
|
| 835 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 836 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 837 |
+
clip_skip: Optional[int] = None,
|
| 838 |
+
callback_steps: int = 1,
|
| 839 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 840 |
+
):
|
| 841 |
+
r"""
|
| 842 |
+
Function invoked when calling the pipeline for generation.
|
| 843 |
+
|
| 844 |
+
Args:
|
| 845 |
+
prompt (`str` or `List[str]`):
|
| 846 |
+
The prompt or prompts to guide the image generation.
|
| 847 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 848 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 849 |
+
if `guidance_scale` is less than `1`).
|
| 850 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 851 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 852 |
+
process.
|
| 853 |
+
mask_image (`torch.Tensor` or `PIL.Image.Image`):
|
| 854 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 855 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 856 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 857 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 858 |
+
height (`int`, *optional*, defaults to 512):
|
| 859 |
+
The height in pixels of the generated image.
|
| 860 |
+
width (`int`, *optional*, defaults to 512):
|
| 861 |
+
The width in pixels of the generated image.
|
| 862 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 863 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 864 |
+
expense of slower inference.
|
| 865 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 866 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 867 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 868 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 869 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 870 |
+
usually at the expense of lower image quality.
|
| 871 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 872 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 873 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 874 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 875 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 876 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 877 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 878 |
+
The number of images to generate per prompt.
|
| 879 |
+
add_predicted_noise (`bool`, *optional*, defaults to True):
|
| 880 |
+
Use predicted noise instead of random noise when constructing noisy versions of the original image in
|
| 881 |
+
the reverse diffusion process
|
| 882 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 883 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 884 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 885 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 886 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 887 |
+
to make generation deterministic.
|
| 888 |
+
latents (`torch.Tensor`, *optional*):
|
| 889 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 890 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 891 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 892 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 893 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 894 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 895 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 896 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 897 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 898 |
+
argument.
|
| 899 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 900 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 901 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 902 |
+
The output format of the generate image. Choose between
|
| 903 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 904 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 905 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 906 |
+
plain tuple.
|
| 907 |
+
callback (`Callable`, *optional*):
|
| 908 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 909 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 910 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 911 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 912 |
+
`True`, the inference will be cancelled.
|
| 913 |
+
clip_skip (`int`, *optional*):
|
| 914 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 915 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 916 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 917 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 918 |
+
called at every step.
|
| 919 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 920 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 921 |
+
`self.processor` in
|
| 922 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 923 |
+
|
| 924 |
+
Returns:
|
| 925 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 926 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 927 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 928 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 929 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 930 |
+
(nsfw) content, according to the `safety_checker`.
|
| 931 |
+
"""
|
| 932 |
+
# 0. Default height and width to unet
|
| 933 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 934 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 935 |
+
|
| 936 |
+
# 1. Check inputs. Raise error if not correct
|
| 937 |
+
self.check_inputs(
|
| 938 |
+
prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
# 2. Define call parameters
|
| 942 |
+
if prompt is not None and isinstance(prompt, str):
|
| 943 |
+
batch_size = 1
|
| 944 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 945 |
+
batch_size = len(prompt)
|
| 946 |
+
else:
|
| 947 |
+
batch_size = prompt_embeds.shape[0]
|
| 948 |
+
|
| 949 |
+
device = self._execution_device
|
| 950 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 951 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 952 |
+
# corresponds to doing no classifier free guidance.
|
| 953 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 954 |
+
lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 955 |
+
|
| 956 |
+
# 3. Encode input prompt
|
| 957 |
+
prompt_embeds = self._encode_prompt(
|
| 958 |
+
prompt,
|
| 959 |
+
device,
|
| 960 |
+
num_images_per_prompt,
|
| 961 |
+
do_classifier_free_guidance,
|
| 962 |
+
negative_prompt,
|
| 963 |
+
max_embeddings_multiples,
|
| 964 |
+
prompt_embeds=prompt_embeds,
|
| 965 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 966 |
+
clip_skip=clip_skip,
|
| 967 |
+
lora_scale=lora_scale,
|
| 968 |
+
)
|
| 969 |
+
dtype = prompt_embeds.dtype
|
| 970 |
+
|
| 971 |
+
# 4. Preprocess image and mask
|
| 972 |
+
if isinstance(image, PIL.Image.Image):
|
| 973 |
+
image = preprocess_image(image, batch_size)
|
| 974 |
+
if image is not None:
|
| 975 |
+
image = image.to(device=self.device, dtype=dtype)
|
| 976 |
+
if isinstance(mask_image, PIL.Image.Image):
|
| 977 |
+
mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
|
| 978 |
+
if mask_image is not None:
|
| 979 |
+
mask = mask_image.to(device=self.device, dtype=dtype)
|
| 980 |
+
mask = torch.cat([mask] * num_images_per_prompt)
|
| 981 |
+
else:
|
| 982 |
+
mask = None
|
| 983 |
+
|
| 984 |
+
# 5. set timesteps
|
| 985 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 986 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
|
| 987 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 988 |
+
|
| 989 |
+
# 6. Prepare latent variables
|
| 990 |
+
latents, init_latents_orig, noise = self.prepare_latents(
|
| 991 |
+
image,
|
| 992 |
+
latent_timestep,
|
| 993 |
+
num_images_per_prompt,
|
| 994 |
+
batch_size,
|
| 995 |
+
self.unet.config.in_channels,
|
| 996 |
+
height,
|
| 997 |
+
width,
|
| 998 |
+
dtype,
|
| 999 |
+
device,
|
| 1000 |
+
generator,
|
| 1001 |
+
latents,
|
| 1002 |
+
)
|
| 1003 |
+
|
| 1004 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1005 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1006 |
+
|
| 1007 |
+
# 8. Denoising loop
|
| 1008 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1009 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1010 |
+
for i, t in enumerate(timesteps):
|
| 1011 |
+
# expand the latents if we are doing classifier free guidance
|
| 1012 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 1013 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1014 |
+
|
| 1015 |
+
# predict the noise residual
|
| 1016 |
+
noise_pred = self.unet(
|
| 1017 |
+
latent_model_input,
|
| 1018 |
+
t,
|
| 1019 |
+
encoder_hidden_states=prompt_embeds,
|
| 1020 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1021 |
+
).sample
|
| 1022 |
+
|
| 1023 |
+
# perform guidance
|
| 1024 |
+
if do_classifier_free_guidance:
|
| 1025 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1026 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1027 |
+
|
| 1028 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1029 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 1030 |
+
|
| 1031 |
+
if mask is not None:
|
| 1032 |
+
# masking
|
| 1033 |
+
if add_predicted_noise:
|
| 1034 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 1035 |
+
init_latents_orig, noise_pred_uncond, torch.tensor([t])
|
| 1036 |
+
)
|
| 1037 |
+
else:
|
| 1038 |
+
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
|
| 1039 |
+
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
| 1040 |
+
|
| 1041 |
+
# call the callback, if provided
|
| 1042 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1043 |
+
progress_bar.update()
|
| 1044 |
+
if i % callback_steps == 0:
|
| 1045 |
+
if callback is not None:
|
| 1046 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1047 |
+
callback(step_idx, t, latents)
|
| 1048 |
+
if is_cancelled_callback is not None and is_cancelled_callback():
|
| 1049 |
+
return None
|
| 1050 |
+
|
| 1051 |
+
if output_type == "latent":
|
| 1052 |
+
image = latents
|
| 1053 |
+
has_nsfw_concept = None
|
| 1054 |
+
elif output_type == "pil":
|
| 1055 |
+
# 9. Post-processing
|
| 1056 |
+
image = self.decode_latents(latents)
|
| 1057 |
+
|
| 1058 |
+
# 10. Run safety checker
|
| 1059 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1060 |
+
|
| 1061 |
+
# 11. Convert to PIL
|
| 1062 |
+
image = self.numpy_to_pil(image)
|
| 1063 |
+
else:
|
| 1064 |
+
# 9. Post-processing
|
| 1065 |
+
image = self.decode_latents(latents)
|
| 1066 |
+
|
| 1067 |
+
# 10. Run safety checker
|
| 1068 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 1069 |
+
|
| 1070 |
+
# Offload last model to CPU
|
| 1071 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1072 |
+
self.final_offload_hook.offload()
|
| 1073 |
+
|
| 1074 |
+
if not return_dict:
|
| 1075 |
+
return image, has_nsfw_concept
|
| 1076 |
+
|
| 1077 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1078 |
+
|
| 1079 |
+
def text2img(
|
| 1080 |
+
self,
|
| 1081 |
+
prompt: Union[str, List[str]],
|
| 1082 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1083 |
+
height: int = 512,
|
| 1084 |
+
width: int = 512,
|
| 1085 |
+
num_inference_steps: int = 50,
|
| 1086 |
+
guidance_scale: float = 7.5,
|
| 1087 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1088 |
+
eta: float = 0.0,
|
| 1089 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1090 |
+
latents: Optional[torch.Tensor] = None,
|
| 1091 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1092 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1093 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1094 |
+
output_type: Optional[str] = "pil",
|
| 1095 |
+
return_dict: bool = True,
|
| 1096 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 1097 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1098 |
+
clip_skip=None,
|
| 1099 |
+
callback_steps: int = 1,
|
| 1100 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1101 |
+
):
|
| 1102 |
+
r"""
|
| 1103 |
+
Function for text-to-image generation.
|
| 1104 |
+
Args:
|
| 1105 |
+
prompt (`str` or `List[str]`):
|
| 1106 |
+
The prompt or prompts to guide the image generation.
|
| 1107 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1108 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1109 |
+
if `guidance_scale` is less than `1`).
|
| 1110 |
+
height (`int`, *optional*, defaults to 512):
|
| 1111 |
+
The height in pixels of the generated image.
|
| 1112 |
+
width (`int`, *optional*, defaults to 512):
|
| 1113 |
+
The width in pixels of the generated image.
|
| 1114 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1115 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1116 |
+
expense of slower inference.
|
| 1117 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1118 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1119 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1120 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1121 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1122 |
+
usually at the expense of lower image quality.
|
| 1123 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1124 |
+
The number of images to generate per prompt.
|
| 1125 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1126 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1127 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1128 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1129 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1130 |
+
to make generation deterministic.
|
| 1131 |
+
latents (`torch.Tensor`, *optional*):
|
| 1132 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1133 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1134 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 1135 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1136 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1137 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1138 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1139 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1140 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1141 |
+
argument.
|
| 1142 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1143 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1144 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1145 |
+
The output format of the generate image. Choose between
|
| 1146 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1147 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1148 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1149 |
+
plain tuple.
|
| 1150 |
+
callback (`Callable`, *optional*):
|
| 1151 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1152 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 1153 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1154 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1155 |
+
`True`, the inference will be cancelled.
|
| 1156 |
+
clip_skip (`int`, *optional*):
|
| 1157 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1158 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1159 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1160 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1161 |
+
called at every step.
|
| 1162 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1163 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1164 |
+
`self.processor` in
|
| 1165 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1166 |
+
|
| 1167 |
+
Returns:
|
| 1168 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1169 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1170 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1171 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1172 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1173 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1174 |
+
"""
|
| 1175 |
+
return self.__call__(
|
| 1176 |
+
prompt=prompt,
|
| 1177 |
+
negative_prompt=negative_prompt,
|
| 1178 |
+
height=height,
|
| 1179 |
+
width=width,
|
| 1180 |
+
num_inference_steps=num_inference_steps,
|
| 1181 |
+
guidance_scale=guidance_scale,
|
| 1182 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1183 |
+
eta=eta,
|
| 1184 |
+
generator=generator,
|
| 1185 |
+
latents=latents,
|
| 1186 |
+
prompt_embeds=prompt_embeds,
|
| 1187 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1188 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1189 |
+
output_type=output_type,
|
| 1190 |
+
return_dict=return_dict,
|
| 1191 |
+
callback=callback,
|
| 1192 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1193 |
+
clip_skip=clip_skip,
|
| 1194 |
+
callback_steps=callback_steps,
|
| 1195 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1196 |
+
)
|
| 1197 |
+
|
| 1198 |
+
def img2img(
|
| 1199 |
+
self,
|
| 1200 |
+
image: Union[torch.Tensor, PIL.Image.Image],
|
| 1201 |
+
prompt: Union[str, List[str]],
|
| 1202 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1203 |
+
strength: float = 0.8,
|
| 1204 |
+
num_inference_steps: Optional[int] = 50,
|
| 1205 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1206 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1207 |
+
eta: Optional[float] = 0.0,
|
| 1208 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1209 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1210 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1211 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1212 |
+
output_type: Optional[str] = "pil",
|
| 1213 |
+
return_dict: bool = True,
|
| 1214 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 1215 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1216 |
+
callback_steps: int = 1,
|
| 1217 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1218 |
+
):
|
| 1219 |
+
r"""
|
| 1220 |
+
Function for image-to-image generation.
|
| 1221 |
+
Args:
|
| 1222 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 1223 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1224 |
+
process.
|
| 1225 |
+
prompt (`str` or `List[str]`):
|
| 1226 |
+
The prompt or prompts to guide the image generation.
|
| 1227 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1228 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1229 |
+
if `guidance_scale` is less than `1`).
|
| 1230 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1231 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 1232 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 1233 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 1234 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 1235 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 1236 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1237 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1238 |
+
expense of slower inference. This parameter will be modulated by `strength`.
|
| 1239 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1240 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1241 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1242 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1243 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1244 |
+
usually at the expense of lower image quality.
|
| 1245 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1246 |
+
The number of images to generate per prompt.
|
| 1247 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1248 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1249 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1250 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1251 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1252 |
+
to make generation deterministic.
|
| 1253 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1254 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1255 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1256 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1257 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1258 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1259 |
+
argument.
|
| 1260 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1261 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1262 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1263 |
+
The output format of the generate image. Choose between
|
| 1264 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1265 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1266 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1267 |
+
plain tuple.
|
| 1268 |
+
callback (`Callable`, *optional*):
|
| 1269 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1270 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 1271 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1272 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1273 |
+
`True`, the inference will be cancelled.
|
| 1274 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1275 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1276 |
+
called at every step.
|
| 1277 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1278 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1279 |
+
`self.processor` in
|
| 1280 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1281 |
+
|
| 1282 |
+
Returns:
|
| 1283 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1284 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1285 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1286 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1287 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1288 |
+
"""
|
| 1289 |
+
return self.__call__(
|
| 1290 |
+
prompt=prompt,
|
| 1291 |
+
negative_prompt=negative_prompt,
|
| 1292 |
+
image=image,
|
| 1293 |
+
num_inference_steps=num_inference_steps,
|
| 1294 |
+
guidance_scale=guidance_scale,
|
| 1295 |
+
strength=strength,
|
| 1296 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1297 |
+
eta=eta,
|
| 1298 |
+
generator=generator,
|
| 1299 |
+
prompt_embeds=prompt_embeds,
|
| 1300 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1301 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1302 |
+
output_type=output_type,
|
| 1303 |
+
return_dict=return_dict,
|
| 1304 |
+
callback=callback,
|
| 1305 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1306 |
+
callback_steps=callback_steps,
|
| 1307 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1308 |
+
)
|
| 1309 |
+
|
| 1310 |
+
def inpaint(
|
| 1311 |
+
self,
|
| 1312 |
+
image: Union[torch.Tensor, PIL.Image.Image],
|
| 1313 |
+
mask_image: Union[torch.Tensor, PIL.Image.Image],
|
| 1314 |
+
prompt: Union[str, List[str]],
|
| 1315 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1316 |
+
strength: float = 0.8,
|
| 1317 |
+
num_inference_steps: Optional[int] = 50,
|
| 1318 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1319 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1320 |
+
add_predicted_noise: Optional[bool] = False,
|
| 1321 |
+
eta: Optional[float] = 0.0,
|
| 1322 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1323 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1324 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1325 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1326 |
+
output_type: Optional[str] = "pil",
|
| 1327 |
+
return_dict: bool = True,
|
| 1328 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 1329 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 1330 |
+
callback_steps: int = 1,
|
| 1331 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1332 |
+
):
|
| 1333 |
+
r"""
|
| 1334 |
+
Function for inpaint.
|
| 1335 |
+
Args:
|
| 1336 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 1337 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1338 |
+
process. This is the image whose masked region will be inpainted.
|
| 1339 |
+
mask_image (`torch.Tensor` or `PIL.Image.Image`):
|
| 1340 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1341 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 1342 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 1343 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1344 |
+
prompt (`str` or `List[str]`):
|
| 1345 |
+
The prompt or prompts to guide the image generation.
|
| 1346 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1347 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1348 |
+
if `guidance_scale` is less than `1`).
|
| 1349 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1350 |
+
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
| 1351 |
+
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
| 1352 |
+
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
| 1353 |
+
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
| 1354 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1355 |
+
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
| 1356 |
+
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
| 1357 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1358 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1359 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1360 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1361 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1362 |
+
usually at the expense of lower image quality.
|
| 1363 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1364 |
+
The number of images to generate per prompt.
|
| 1365 |
+
add_predicted_noise (`bool`, *optional*, defaults to True):
|
| 1366 |
+
Use predicted noise instead of random noise when constructing noisy versions of the original image in
|
| 1367 |
+
the reverse diffusion process
|
| 1368 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1369 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1370 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1371 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1372 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1373 |
+
to make generation deterministic.
|
| 1374 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1375 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1376 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1377 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1378 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1379 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1380 |
+
argument.
|
| 1381 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1382 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1383 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1384 |
+
The output format of the generate image. Choose between
|
| 1385 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1386 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1387 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1388 |
+
plain tuple.
|
| 1389 |
+
callback (`Callable`, *optional*):
|
| 1390 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1391 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 1392 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 1393 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 1394 |
+
`True`, the inference will be cancelled.
|
| 1395 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1396 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1397 |
+
called at every step.
|
| 1398 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1399 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1400 |
+
`self.processor` in
|
| 1401 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1402 |
+
|
| 1403 |
+
Returns:
|
| 1404 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 1405 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1406 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1407 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1408 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1409 |
+
"""
|
| 1410 |
+
return self.__call__(
|
| 1411 |
+
prompt=prompt,
|
| 1412 |
+
negative_prompt=negative_prompt,
|
| 1413 |
+
image=image,
|
| 1414 |
+
mask_image=mask_image,
|
| 1415 |
+
num_inference_steps=num_inference_steps,
|
| 1416 |
+
guidance_scale=guidance_scale,
|
| 1417 |
+
strength=strength,
|
| 1418 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1419 |
+
add_predicted_noise=add_predicted_noise,
|
| 1420 |
+
eta=eta,
|
| 1421 |
+
generator=generator,
|
| 1422 |
+
prompt_embeds=prompt_embeds,
|
| 1423 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1424 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1425 |
+
output_type=output_type,
|
| 1426 |
+
return_dict=return_dict,
|
| 1427 |
+
callback=callback,
|
| 1428 |
+
is_cancelled_callback=is_cancelled_callback,
|
| 1429 |
+
callback_steps=callback_steps,
|
| 1430 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1431 |
+
)
|
v0.36.0/lpw_stable_diffusion_onnx.py
ADDED
|
@@ -0,0 +1,1148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import re
|
| 3 |
+
from typing import Callable, List, Optional, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import PIL.Image
|
| 7 |
+
import torch
|
| 8 |
+
from packaging import version
|
| 9 |
+
from transformers import CLIPImageProcessor, CLIPTokenizer
|
| 10 |
+
|
| 11 |
+
import diffusers
|
| 12 |
+
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
|
| 13 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 14 |
+
from diffusers.utils import logging
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
|
| 19 |
+
except ImportError:
|
| 20 |
+
ORT_TO_NP_TYPE = {
|
| 21 |
+
"tensor(bool)": np.bool_,
|
| 22 |
+
"tensor(int8)": np.int8,
|
| 23 |
+
"tensor(uint8)": np.uint8,
|
| 24 |
+
"tensor(int16)": np.int16,
|
| 25 |
+
"tensor(uint16)": np.uint16,
|
| 26 |
+
"tensor(int32)": np.int32,
|
| 27 |
+
"tensor(uint32)": np.uint32,
|
| 28 |
+
"tensor(int64)": np.int64,
|
| 29 |
+
"tensor(uint64)": np.uint64,
|
| 30 |
+
"tensor(float16)": np.float16,
|
| 31 |
+
"tensor(float)": np.float32,
|
| 32 |
+
"tensor(double)": np.float64,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
from diffusers.utils import PIL_INTERPOLATION
|
| 37 |
+
except ImportError:
|
| 38 |
+
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
| 39 |
+
PIL_INTERPOLATION = {
|
| 40 |
+
"linear": PIL.Image.Resampling.BILINEAR,
|
| 41 |
+
"bilinear": PIL.Image.Resampling.BILINEAR,
|
| 42 |
+
"bicubic": PIL.Image.Resampling.BICUBIC,
|
| 43 |
+
"lanczos": PIL.Image.Resampling.LANCZOS,
|
| 44 |
+
"nearest": PIL.Image.Resampling.NEAREST,
|
| 45 |
+
}
|
| 46 |
+
else:
|
| 47 |
+
PIL_INTERPOLATION = {
|
| 48 |
+
"linear": PIL.Image.LINEAR,
|
| 49 |
+
"bilinear": PIL.Image.BILINEAR,
|
| 50 |
+
"bicubic": PIL.Image.BICUBIC,
|
| 51 |
+
"lanczos": PIL.Image.LANCZOS,
|
| 52 |
+
"nearest": PIL.Image.NEAREST,
|
| 53 |
+
}
|
| 54 |
+
# ------------------------------------------------------------------------------
|
| 55 |
+
|
| 56 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 57 |
+
|
| 58 |
+
re_attention = re.compile(
|
| 59 |
+
r"""
|
| 60 |
+
\\\(|
|
| 61 |
+
\\\)|
|
| 62 |
+
\\\[|
|
| 63 |
+
\\]|
|
| 64 |
+
\\\\|
|
| 65 |
+
\\|
|
| 66 |
+
\(|
|
| 67 |
+
\[|
|
| 68 |
+
:([+-]?[.\d]+)\)|
|
| 69 |
+
\)|
|
| 70 |
+
]|
|
| 71 |
+
[^\\()\[\]:]+|
|
| 72 |
+
:
|
| 73 |
+
""",
|
| 74 |
+
re.X,
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def parse_prompt_attention(text):
|
| 79 |
+
"""
|
| 80 |
+
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
| 81 |
+
Accepted tokens are:
|
| 82 |
+
(abc) - increases attention to abc by a multiplier of 1.1
|
| 83 |
+
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
| 84 |
+
[abc] - decreases attention to abc by a multiplier of 1.1
|
| 85 |
+
\\( - literal character '('
|
| 86 |
+
\\[ - literal character '['
|
| 87 |
+
\\) - literal character ')'
|
| 88 |
+
\\] - literal character ']'
|
| 89 |
+
\\ - literal character '\'
|
| 90 |
+
anything else - just text
|
| 91 |
+
>>> parse_prompt_attention('normal text')
|
| 92 |
+
[['normal text', 1.0]]
|
| 93 |
+
>>> parse_prompt_attention('an (important) word')
|
| 94 |
+
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
| 95 |
+
>>> parse_prompt_attention('(unbalanced')
|
| 96 |
+
[['unbalanced', 1.1]]
|
| 97 |
+
>>> parse_prompt_attention('\\(literal\\]')
|
| 98 |
+
[['(literal]', 1.0]]
|
| 99 |
+
>>> parse_prompt_attention('(unnecessary)(parens)')
|
| 100 |
+
[['unnecessaryparens', 1.1]]
|
| 101 |
+
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
| 102 |
+
[['a ', 1.0],
|
| 103 |
+
['house', 1.5730000000000004],
|
| 104 |
+
[' ', 1.1],
|
| 105 |
+
['on', 1.0],
|
| 106 |
+
[' a ', 1.1],
|
| 107 |
+
['hill', 0.55],
|
| 108 |
+
[', sun, ', 1.1],
|
| 109 |
+
['sky', 1.4641000000000006],
|
| 110 |
+
['.', 1.1]]
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
res = []
|
| 114 |
+
round_brackets = []
|
| 115 |
+
square_brackets = []
|
| 116 |
+
|
| 117 |
+
round_bracket_multiplier = 1.1
|
| 118 |
+
square_bracket_multiplier = 1 / 1.1
|
| 119 |
+
|
| 120 |
+
def multiply_range(start_position, multiplier):
|
| 121 |
+
for p in range(start_position, len(res)):
|
| 122 |
+
res[p][1] *= multiplier
|
| 123 |
+
|
| 124 |
+
for m in re_attention.finditer(text):
|
| 125 |
+
text = m.group(0)
|
| 126 |
+
weight = m.group(1)
|
| 127 |
+
|
| 128 |
+
if text.startswith("\\"):
|
| 129 |
+
res.append([text[1:], 1.0])
|
| 130 |
+
elif text == "(":
|
| 131 |
+
round_brackets.append(len(res))
|
| 132 |
+
elif text == "[":
|
| 133 |
+
square_brackets.append(len(res))
|
| 134 |
+
elif weight is not None and len(round_brackets) > 0:
|
| 135 |
+
multiply_range(round_brackets.pop(), float(weight))
|
| 136 |
+
elif text == ")" and len(round_brackets) > 0:
|
| 137 |
+
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
| 138 |
+
elif text == "]" and len(square_brackets) > 0:
|
| 139 |
+
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
| 140 |
+
else:
|
| 141 |
+
res.append([text, 1.0])
|
| 142 |
+
|
| 143 |
+
for pos in round_brackets:
|
| 144 |
+
multiply_range(pos, round_bracket_multiplier)
|
| 145 |
+
|
| 146 |
+
for pos in square_brackets:
|
| 147 |
+
multiply_range(pos, square_bracket_multiplier)
|
| 148 |
+
|
| 149 |
+
if len(res) == 0:
|
| 150 |
+
res = [["", 1.0]]
|
| 151 |
+
|
| 152 |
+
# merge runs of identical weights
|
| 153 |
+
i = 0
|
| 154 |
+
while i + 1 < len(res):
|
| 155 |
+
if res[i][1] == res[i + 1][1]:
|
| 156 |
+
res[i][0] += res[i + 1][0]
|
| 157 |
+
res.pop(i + 1)
|
| 158 |
+
else:
|
| 159 |
+
i += 1
|
| 160 |
+
|
| 161 |
+
return res
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
|
| 165 |
+
r"""
|
| 166 |
+
Tokenize a list of prompts and return its tokens with weights of each token.
|
| 167 |
+
|
| 168 |
+
No padding, starting or ending token is included.
|
| 169 |
+
"""
|
| 170 |
+
tokens = []
|
| 171 |
+
weights = []
|
| 172 |
+
truncated = False
|
| 173 |
+
for text in prompt:
|
| 174 |
+
texts_and_weights = parse_prompt_attention(text)
|
| 175 |
+
text_token = []
|
| 176 |
+
text_weight = []
|
| 177 |
+
for word, weight in texts_and_weights:
|
| 178 |
+
# tokenize and discard the starting and the ending token
|
| 179 |
+
token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
|
| 180 |
+
text_token += list(token)
|
| 181 |
+
# copy the weight by length of token
|
| 182 |
+
text_weight += [weight] * len(token)
|
| 183 |
+
# stop if the text is too long (longer than truncation limit)
|
| 184 |
+
if len(text_token) > max_length:
|
| 185 |
+
truncated = True
|
| 186 |
+
break
|
| 187 |
+
# truncate
|
| 188 |
+
if len(text_token) > max_length:
|
| 189 |
+
truncated = True
|
| 190 |
+
text_token = text_token[:max_length]
|
| 191 |
+
text_weight = text_weight[:max_length]
|
| 192 |
+
tokens.append(text_token)
|
| 193 |
+
weights.append(text_weight)
|
| 194 |
+
if truncated:
|
| 195 |
+
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
| 196 |
+
return tokens, weights
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
|
| 200 |
+
r"""
|
| 201 |
+
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
| 202 |
+
"""
|
| 203 |
+
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
| 204 |
+
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
| 205 |
+
for i in range(len(tokens)):
|
| 206 |
+
tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
|
| 207 |
+
if no_boseos_middle:
|
| 208 |
+
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
| 209 |
+
else:
|
| 210 |
+
w = []
|
| 211 |
+
if len(weights[i]) == 0:
|
| 212 |
+
w = [1.0] * weights_length
|
| 213 |
+
else:
|
| 214 |
+
for j in range(max_embeddings_multiples):
|
| 215 |
+
w.append(1.0) # weight for starting token in this chunk
|
| 216 |
+
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
| 217 |
+
w.append(1.0) # weight for ending token in this chunk
|
| 218 |
+
w += [1.0] * (weights_length - len(w))
|
| 219 |
+
weights[i] = w[:]
|
| 220 |
+
|
| 221 |
+
return tokens, weights
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def get_unweighted_text_embeddings(
|
| 225 |
+
pipe,
|
| 226 |
+
text_input: np.array,
|
| 227 |
+
chunk_length: int,
|
| 228 |
+
no_boseos_middle: Optional[bool] = True,
|
| 229 |
+
):
|
| 230 |
+
"""
|
| 231 |
+
When the length of tokens is a multiple of the capacity of the text encoder,
|
| 232 |
+
it should be split into chunks and sent to the text encoder individually.
|
| 233 |
+
"""
|
| 234 |
+
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
| 235 |
+
if max_embeddings_multiples > 1:
|
| 236 |
+
text_embeddings = []
|
| 237 |
+
for i in range(max_embeddings_multiples):
|
| 238 |
+
# extract the i-th chunk
|
| 239 |
+
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
|
| 240 |
+
|
| 241 |
+
# cover the head and the tail by the starting and the ending tokens
|
| 242 |
+
text_input_chunk[:, 0] = text_input[0, 0]
|
| 243 |
+
text_input_chunk[:, -1] = text_input[0, -1]
|
| 244 |
+
|
| 245 |
+
text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
|
| 246 |
+
|
| 247 |
+
if no_boseos_middle:
|
| 248 |
+
if i == 0:
|
| 249 |
+
# discard the ending token
|
| 250 |
+
text_embedding = text_embedding[:, :-1]
|
| 251 |
+
elif i == max_embeddings_multiples - 1:
|
| 252 |
+
# discard the starting token
|
| 253 |
+
text_embedding = text_embedding[:, 1:]
|
| 254 |
+
else:
|
| 255 |
+
# discard both starting and ending tokens
|
| 256 |
+
text_embedding = text_embedding[:, 1:-1]
|
| 257 |
+
|
| 258 |
+
text_embeddings.append(text_embedding)
|
| 259 |
+
text_embeddings = np.concatenate(text_embeddings, axis=1)
|
| 260 |
+
else:
|
| 261 |
+
text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
|
| 262 |
+
return text_embeddings
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def get_weighted_text_embeddings(
|
| 266 |
+
pipe,
|
| 267 |
+
prompt: Union[str, List[str]],
|
| 268 |
+
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
| 269 |
+
max_embeddings_multiples: Optional[int] = 4,
|
| 270 |
+
no_boseos_middle: Optional[bool] = False,
|
| 271 |
+
skip_parsing: Optional[bool] = False,
|
| 272 |
+
skip_weighting: Optional[bool] = False,
|
| 273 |
+
**kwargs,
|
| 274 |
+
):
|
| 275 |
+
r"""
|
| 276 |
+
Prompts can be assigned with local weights using brackets. For example,
|
| 277 |
+
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
| 278 |
+
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
| 279 |
+
|
| 280 |
+
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
pipe (`OnnxStableDiffusionPipeline`):
|
| 284 |
+
Pipe to provide access to the tokenizer and the text encoder.
|
| 285 |
+
prompt (`str` or `List[str]`):
|
| 286 |
+
The prompt or prompts to guide the image generation.
|
| 287 |
+
uncond_prompt (`str` or `List[str]`):
|
| 288 |
+
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
| 289 |
+
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
| 290 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
| 291 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 292 |
+
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
| 293 |
+
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
| 294 |
+
ending token in each of the chunk in the middle.
|
| 295 |
+
skip_parsing (`bool`, *optional*, defaults to `False`):
|
| 296 |
+
Skip the parsing of brackets.
|
| 297 |
+
skip_weighting (`bool`, *optional*, defaults to `False`):
|
| 298 |
+
Skip the weighting. When the parsing is skipped, it is forced True.
|
| 299 |
+
"""
|
| 300 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 301 |
+
if isinstance(prompt, str):
|
| 302 |
+
prompt = [prompt]
|
| 303 |
+
|
| 304 |
+
if not skip_parsing:
|
| 305 |
+
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
| 306 |
+
if uncond_prompt is not None:
|
| 307 |
+
if isinstance(uncond_prompt, str):
|
| 308 |
+
uncond_prompt = [uncond_prompt]
|
| 309 |
+
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
| 310 |
+
else:
|
| 311 |
+
prompt_tokens = [
|
| 312 |
+
token[1:-1]
|
| 313 |
+
for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
|
| 314 |
+
]
|
| 315 |
+
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
| 316 |
+
if uncond_prompt is not None:
|
| 317 |
+
if isinstance(uncond_prompt, str):
|
| 318 |
+
uncond_prompt = [uncond_prompt]
|
| 319 |
+
uncond_tokens = [
|
| 320 |
+
token[1:-1]
|
| 321 |
+
for token in pipe.tokenizer(
|
| 322 |
+
uncond_prompt,
|
| 323 |
+
max_length=max_length,
|
| 324 |
+
truncation=True,
|
| 325 |
+
return_tensors="np",
|
| 326 |
+
).input_ids
|
| 327 |
+
]
|
| 328 |
+
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
| 329 |
+
|
| 330 |
+
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
| 331 |
+
max_length = max([len(token) for token in prompt_tokens])
|
| 332 |
+
if uncond_prompt is not None:
|
| 333 |
+
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
| 334 |
+
|
| 335 |
+
max_embeddings_multiples = min(
|
| 336 |
+
max_embeddings_multiples,
|
| 337 |
+
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
| 338 |
+
)
|
| 339 |
+
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
| 340 |
+
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
| 341 |
+
|
| 342 |
+
# pad the length of tokens and weights
|
| 343 |
+
bos = pipe.tokenizer.bos_token_id
|
| 344 |
+
eos = pipe.tokenizer.eos_token_id
|
| 345 |
+
pad = getattr(pipe.tokenizer, "pad_token_id", eos)
|
| 346 |
+
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
| 347 |
+
prompt_tokens,
|
| 348 |
+
prompt_weights,
|
| 349 |
+
max_length,
|
| 350 |
+
bos,
|
| 351 |
+
eos,
|
| 352 |
+
pad,
|
| 353 |
+
no_boseos_middle=no_boseos_middle,
|
| 354 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 355 |
+
)
|
| 356 |
+
prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
|
| 357 |
+
if uncond_prompt is not None:
|
| 358 |
+
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
| 359 |
+
uncond_tokens,
|
| 360 |
+
uncond_weights,
|
| 361 |
+
max_length,
|
| 362 |
+
bos,
|
| 363 |
+
eos,
|
| 364 |
+
pad,
|
| 365 |
+
no_boseos_middle=no_boseos_middle,
|
| 366 |
+
chunk_length=pipe.tokenizer.model_max_length,
|
| 367 |
+
)
|
| 368 |
+
uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
|
| 369 |
+
|
| 370 |
+
# get the embeddings
|
| 371 |
+
text_embeddings = get_unweighted_text_embeddings(
|
| 372 |
+
pipe,
|
| 373 |
+
prompt_tokens,
|
| 374 |
+
pipe.tokenizer.model_max_length,
|
| 375 |
+
no_boseos_middle=no_boseos_middle,
|
| 376 |
+
)
|
| 377 |
+
prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
|
| 378 |
+
if uncond_prompt is not None:
|
| 379 |
+
uncond_embeddings = get_unweighted_text_embeddings(
|
| 380 |
+
pipe,
|
| 381 |
+
uncond_tokens,
|
| 382 |
+
pipe.tokenizer.model_max_length,
|
| 383 |
+
no_boseos_middle=no_boseos_middle,
|
| 384 |
+
)
|
| 385 |
+
uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
|
| 386 |
+
|
| 387 |
+
# assign weights to the prompts and normalize in the sense of mean
|
| 388 |
+
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
| 389 |
+
if (not skip_parsing) and (not skip_weighting):
|
| 390 |
+
previous_mean = text_embeddings.mean(axis=(-2, -1))
|
| 391 |
+
text_embeddings *= prompt_weights[:, :, None]
|
| 392 |
+
text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
| 393 |
+
if uncond_prompt is not None:
|
| 394 |
+
previous_mean = uncond_embeddings.mean(axis=(-2, -1))
|
| 395 |
+
uncond_embeddings *= uncond_weights[:, :, None]
|
| 396 |
+
uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
| 397 |
+
|
| 398 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 399 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 400 |
+
# to avoid doing two forward passes
|
| 401 |
+
if uncond_prompt is not None:
|
| 402 |
+
return text_embeddings, uncond_embeddings
|
| 403 |
+
|
| 404 |
+
return text_embeddings
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def preprocess_image(image):
|
| 408 |
+
w, h = image.size
|
| 409 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 410 |
+
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
| 411 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 412 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 413 |
+
return 2.0 * image - 1.0
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
def preprocess_mask(mask, scale_factor=8):
|
| 417 |
+
mask = mask.convert("L")
|
| 418 |
+
w, h = mask.size
|
| 419 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 420 |
+
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
| 421 |
+
mask = np.array(mask).astype(np.float32) / 255.0
|
| 422 |
+
mask = np.tile(mask, (4, 1, 1))
|
| 423 |
+
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
| 424 |
+
mask = 1 - mask # repaint white, keep black
|
| 425 |
+
return mask
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
|
| 429 |
+
r"""
|
| 430 |
+
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
| 431 |
+
weighting in prompt.
|
| 432 |
+
|
| 433 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 434 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
|
| 438 |
+
|
| 439 |
+
def __init__(
|
| 440 |
+
self,
|
| 441 |
+
vae_encoder: OnnxRuntimeModel,
|
| 442 |
+
vae_decoder: OnnxRuntimeModel,
|
| 443 |
+
text_encoder: OnnxRuntimeModel,
|
| 444 |
+
tokenizer: CLIPTokenizer,
|
| 445 |
+
unet: OnnxRuntimeModel,
|
| 446 |
+
scheduler: SchedulerMixin,
|
| 447 |
+
safety_checker: OnnxRuntimeModel,
|
| 448 |
+
feature_extractor: CLIPImageProcessor,
|
| 449 |
+
requires_safety_checker: bool = True,
|
| 450 |
+
):
|
| 451 |
+
super().__init__(
|
| 452 |
+
vae_encoder=vae_encoder,
|
| 453 |
+
vae_decoder=vae_decoder,
|
| 454 |
+
text_encoder=text_encoder,
|
| 455 |
+
tokenizer=tokenizer,
|
| 456 |
+
unet=unet,
|
| 457 |
+
scheduler=scheduler,
|
| 458 |
+
safety_checker=safety_checker,
|
| 459 |
+
feature_extractor=feature_extractor,
|
| 460 |
+
requires_safety_checker=requires_safety_checker,
|
| 461 |
+
)
|
| 462 |
+
self.__init__additional__()
|
| 463 |
+
|
| 464 |
+
else:
|
| 465 |
+
|
| 466 |
+
def __init__(
|
| 467 |
+
self,
|
| 468 |
+
vae_encoder: OnnxRuntimeModel,
|
| 469 |
+
vae_decoder: OnnxRuntimeModel,
|
| 470 |
+
text_encoder: OnnxRuntimeModel,
|
| 471 |
+
tokenizer: CLIPTokenizer,
|
| 472 |
+
unet: OnnxRuntimeModel,
|
| 473 |
+
scheduler: SchedulerMixin,
|
| 474 |
+
safety_checker: OnnxRuntimeModel,
|
| 475 |
+
feature_extractor: CLIPImageProcessor,
|
| 476 |
+
):
|
| 477 |
+
super().__init__(
|
| 478 |
+
vae_encoder=vae_encoder,
|
| 479 |
+
vae_decoder=vae_decoder,
|
| 480 |
+
text_encoder=text_encoder,
|
| 481 |
+
tokenizer=tokenizer,
|
| 482 |
+
unet=unet,
|
| 483 |
+
scheduler=scheduler,
|
| 484 |
+
safety_checker=safety_checker,
|
| 485 |
+
feature_extractor=feature_extractor,
|
| 486 |
+
)
|
| 487 |
+
self.__init__additional__()
|
| 488 |
+
|
| 489 |
+
def __init__additional__(self):
|
| 490 |
+
self.unet.config.in_channels = 4
|
| 491 |
+
self.vae_scale_factor = 8
|
| 492 |
+
|
| 493 |
+
def _encode_prompt(
|
| 494 |
+
self,
|
| 495 |
+
prompt,
|
| 496 |
+
num_images_per_prompt,
|
| 497 |
+
do_classifier_free_guidance,
|
| 498 |
+
negative_prompt,
|
| 499 |
+
max_embeddings_multiples,
|
| 500 |
+
):
|
| 501 |
+
r"""
|
| 502 |
+
Encodes the prompt into text encoder hidden states.
|
| 503 |
+
|
| 504 |
+
Args:
|
| 505 |
+
prompt (`str` or `list(int)`):
|
| 506 |
+
prompt to be encoded
|
| 507 |
+
num_images_per_prompt (`int`):
|
| 508 |
+
number of images that should be generated per prompt
|
| 509 |
+
do_classifier_free_guidance (`bool`):
|
| 510 |
+
whether to use classifier free guidance or not
|
| 511 |
+
negative_prompt (`str` or `List[str]`):
|
| 512 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 513 |
+
if `guidance_scale` is less than `1`).
|
| 514 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 515 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 516 |
+
"""
|
| 517 |
+
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
| 518 |
+
|
| 519 |
+
if negative_prompt is None:
|
| 520 |
+
negative_prompt = [""] * batch_size
|
| 521 |
+
elif isinstance(negative_prompt, str):
|
| 522 |
+
negative_prompt = [negative_prompt] * batch_size
|
| 523 |
+
if batch_size != len(negative_prompt):
|
| 524 |
+
raise ValueError(
|
| 525 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 526 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 527 |
+
" the batch size of `prompt`."
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
|
| 531 |
+
pipe=self,
|
| 532 |
+
prompt=prompt,
|
| 533 |
+
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
| 534 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
|
| 538 |
+
if do_classifier_free_guidance:
|
| 539 |
+
uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
|
| 540 |
+
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
|
| 541 |
+
|
| 542 |
+
return text_embeddings
|
| 543 |
+
|
| 544 |
+
def check_inputs(self, prompt, height, width, strength, callback_steps):
|
| 545 |
+
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
| 546 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 547 |
+
|
| 548 |
+
if strength < 0 or strength > 1:
|
| 549 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 550 |
+
|
| 551 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 552 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 553 |
+
|
| 554 |
+
if (callback_steps is None) or (
|
| 555 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 556 |
+
):
|
| 557 |
+
raise ValueError(
|
| 558 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 559 |
+
f" {type(callback_steps)}."
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
def get_timesteps(self, num_inference_steps, strength, is_text2img):
|
| 563 |
+
if is_text2img:
|
| 564 |
+
return self.scheduler.timesteps, num_inference_steps
|
| 565 |
+
else:
|
| 566 |
+
# get the original timestep using init_timestep
|
| 567 |
+
offset = self.scheduler.config.get("steps_offset", 0)
|
| 568 |
+
init_timestep = int(num_inference_steps * strength) + offset
|
| 569 |
+
init_timestep = min(init_timestep, num_inference_steps)
|
| 570 |
+
|
| 571 |
+
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
| 572 |
+
timesteps = self.scheduler.timesteps[t_start:]
|
| 573 |
+
return timesteps, num_inference_steps - t_start
|
| 574 |
+
|
| 575 |
+
def run_safety_checker(self, image):
|
| 576 |
+
if self.safety_checker is not None:
|
| 577 |
+
safety_checker_input = self.feature_extractor(
|
| 578 |
+
self.numpy_to_pil(image), return_tensors="np"
|
| 579 |
+
).pixel_values.astype(image.dtype)
|
| 580 |
+
# There will throw an error if use safety_checker directly and batchsize>1
|
| 581 |
+
images, has_nsfw_concept = [], []
|
| 582 |
+
for i in range(image.shape[0]):
|
| 583 |
+
image_i, has_nsfw_concept_i = self.safety_checker(
|
| 584 |
+
clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
|
| 585 |
+
)
|
| 586 |
+
images.append(image_i)
|
| 587 |
+
has_nsfw_concept.append(has_nsfw_concept_i[0])
|
| 588 |
+
image = np.concatenate(images)
|
| 589 |
+
else:
|
| 590 |
+
has_nsfw_concept = None
|
| 591 |
+
return image, has_nsfw_concept
|
| 592 |
+
|
| 593 |
+
def decode_latents(self, latents):
|
| 594 |
+
latents = 1 / 0.18215 * latents
|
| 595 |
+
# image = self.vae_decoder(latent_sample=latents)[0]
|
| 596 |
+
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
|
| 597 |
+
image = np.concatenate(
|
| 598 |
+
[self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
|
| 599 |
+
)
|
| 600 |
+
image = np.clip(image / 2 + 0.5, 0, 1)
|
| 601 |
+
image = image.transpose((0, 2, 3, 1))
|
| 602 |
+
return image
|
| 603 |
+
|
| 604 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 605 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 606 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 607 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 608 |
+
# and should be between [0, 1]
|
| 609 |
+
|
| 610 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 611 |
+
extra_step_kwargs = {}
|
| 612 |
+
if accepts_eta:
|
| 613 |
+
extra_step_kwargs["eta"] = eta
|
| 614 |
+
|
| 615 |
+
# check if the scheduler accepts generator
|
| 616 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 617 |
+
if accepts_generator:
|
| 618 |
+
extra_step_kwargs["generator"] = generator
|
| 619 |
+
return extra_step_kwargs
|
| 620 |
+
|
| 621 |
+
def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
|
| 622 |
+
if image is None:
|
| 623 |
+
shape = (
|
| 624 |
+
batch_size,
|
| 625 |
+
self.unet.config.in_channels,
|
| 626 |
+
height // self.vae_scale_factor,
|
| 627 |
+
width // self.vae_scale_factor,
|
| 628 |
+
)
|
| 629 |
+
|
| 630 |
+
if latents is None:
|
| 631 |
+
latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
| 632 |
+
else:
|
| 633 |
+
if latents.shape != shape:
|
| 634 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 635 |
+
|
| 636 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 637 |
+
latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
|
| 638 |
+
return latents, None, None
|
| 639 |
+
else:
|
| 640 |
+
init_latents = self.vae_encoder(sample=image)[0]
|
| 641 |
+
init_latents = 0.18215 * init_latents
|
| 642 |
+
init_latents = np.concatenate([init_latents] * batch_size, axis=0)
|
| 643 |
+
init_latents_orig = init_latents
|
| 644 |
+
shape = init_latents.shape
|
| 645 |
+
|
| 646 |
+
# add noise to latents using the timesteps
|
| 647 |
+
noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
| 648 |
+
latents = self.scheduler.add_noise(
|
| 649 |
+
torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
|
| 650 |
+
).numpy()
|
| 651 |
+
return latents, init_latents_orig, noise
|
| 652 |
+
|
| 653 |
+
@torch.no_grad()
|
| 654 |
+
def __call__(
|
| 655 |
+
self,
|
| 656 |
+
prompt: Union[str, List[str]],
|
| 657 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 658 |
+
image: Union[np.ndarray, PIL.Image.Image] = None,
|
| 659 |
+
mask_image: Union[np.ndarray, PIL.Image.Image] = None,
|
| 660 |
+
height: int = 512,
|
| 661 |
+
width: int = 512,
|
| 662 |
+
num_inference_steps: int = 50,
|
| 663 |
+
guidance_scale: float = 7.5,
|
| 664 |
+
strength: float = 0.8,
|
| 665 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 666 |
+
eta: float = 0.0,
|
| 667 |
+
generator: Optional[torch.Generator] = None,
|
| 668 |
+
latents: Optional[np.ndarray] = None,
|
| 669 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 670 |
+
output_type: Optional[str] = "pil",
|
| 671 |
+
return_dict: bool = True,
|
| 672 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 673 |
+
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
| 674 |
+
callback_steps: int = 1,
|
| 675 |
+
**kwargs,
|
| 676 |
+
):
|
| 677 |
+
r"""
|
| 678 |
+
Function invoked when calling the pipeline for generation.
|
| 679 |
+
|
| 680 |
+
Args:
|
| 681 |
+
prompt (`str` or `List[str]`):
|
| 682 |
+
The prompt or prompts to guide the image generation.
|
| 683 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 684 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 685 |
+
if `guidance_scale` is less than `1`).
|
| 686 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 687 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 688 |
+
process.
|
| 689 |
+
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
| 690 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 691 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 692 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 693 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 694 |
+
height (`int`, *optional*, defaults to 512):
|
| 695 |
+
The height in pixels of the generated image.
|
| 696 |
+
width (`int`, *optional*, defaults to 512):
|
| 697 |
+
The width in pixels of the generated image.
|
| 698 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 699 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 700 |
+
expense of slower inference.
|
| 701 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 702 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 703 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 704 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 705 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 706 |
+
usually at the expense of lower image quality.
|
| 707 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 708 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 709 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 710 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 711 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 712 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 713 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 714 |
+
The number of images to generate per prompt.
|
| 715 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 716 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 717 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 718 |
+
generator (`torch.Generator`, *optional*):
|
| 719 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 720 |
+
deterministic.
|
| 721 |
+
latents (`np.ndarray`, *optional*):
|
| 722 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 723 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 724 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 725 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 726 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 727 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 728 |
+
The output format of the generate image. Choose between
|
| 729 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 730 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 731 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 732 |
+
plain tuple.
|
| 733 |
+
callback (`Callable`, *optional*):
|
| 734 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 735 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 736 |
+
is_cancelled_callback (`Callable`, *optional*):
|
| 737 |
+
A function that will be called every `callback_steps` steps during inference. If the function returns
|
| 738 |
+
`True`, the inference will be cancelled.
|
| 739 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 740 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 741 |
+
called at every step.
|
| 742 |
+
|
| 743 |
+
Returns:
|
| 744 |
+
`None` if cancelled by `is_cancelled_callback`,
|
| 745 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 746 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 747 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 748 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 749 |
+
(nsfw) content, according to the `safety_checker`.
|
| 750 |
+
"""
|
| 751 |
+
# 0. Default height and width to unet
|
| 752 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 753 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 754 |
+
|
| 755 |
+
# 1. Check inputs. Raise error if not correct
|
| 756 |
+
self.check_inputs(prompt, height, width, strength, callback_steps)
|
| 757 |
+
|
| 758 |
+
# 2. Define call parameters
|
| 759 |
+
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
| 760 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 761 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 762 |
+
# corresponds to doing no classifier free guidance.
|
| 763 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 764 |
+
|
| 765 |
+
# 3. Encode input prompt
|
| 766 |
+
text_embeddings = self._encode_prompt(
|
| 767 |
+
prompt,
|
| 768 |
+
num_images_per_prompt,
|
| 769 |
+
do_classifier_free_guidance,
|
| 770 |
+
negative_prompt,
|
| 771 |
+
max_embeddings_multiples,
|
| 772 |
+
)
|
| 773 |
+
dtype = text_embeddings.dtype
|
| 774 |
+
|
| 775 |
+
# 4. Preprocess image and mask
|
| 776 |
+
if isinstance(image, PIL.Image.Image):
|
| 777 |
+
image = preprocess_image(image)
|
| 778 |
+
if image is not None:
|
| 779 |
+
image = image.astype(dtype)
|
| 780 |
+
if isinstance(mask_image, PIL.Image.Image):
|
| 781 |
+
mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
|
| 782 |
+
if mask_image is not None:
|
| 783 |
+
mask = mask_image.astype(dtype)
|
| 784 |
+
mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
|
| 785 |
+
else:
|
| 786 |
+
mask = None
|
| 787 |
+
|
| 788 |
+
# 5. set timesteps
|
| 789 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 790 |
+
timestep_dtype = next(
|
| 791 |
+
(input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
|
| 792 |
+
)
|
| 793 |
+
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
|
| 794 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
|
| 795 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 796 |
+
|
| 797 |
+
# 6. Prepare latent variables
|
| 798 |
+
latents, init_latents_orig, noise = self.prepare_latents(
|
| 799 |
+
image,
|
| 800 |
+
latent_timestep,
|
| 801 |
+
batch_size * num_images_per_prompt,
|
| 802 |
+
height,
|
| 803 |
+
width,
|
| 804 |
+
dtype,
|
| 805 |
+
generator,
|
| 806 |
+
latents,
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 810 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 811 |
+
|
| 812 |
+
# 8. Denoising loop
|
| 813 |
+
for i, t in enumerate(self.progress_bar(timesteps)):
|
| 814 |
+
# expand the latents if we are doing classifier free guidance
|
| 815 |
+
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
|
| 816 |
+
latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
|
| 817 |
+
latent_model_input = latent_model_input.numpy()
|
| 818 |
+
|
| 819 |
+
# predict the noise residual
|
| 820 |
+
noise_pred = self.unet(
|
| 821 |
+
sample=latent_model_input,
|
| 822 |
+
timestep=np.array([t], dtype=timestep_dtype),
|
| 823 |
+
encoder_hidden_states=text_embeddings,
|
| 824 |
+
)
|
| 825 |
+
noise_pred = noise_pred[0]
|
| 826 |
+
|
| 827 |
+
# perform guidance
|
| 828 |
+
if do_classifier_free_guidance:
|
| 829 |
+
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
|
| 830 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 831 |
+
|
| 832 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 833 |
+
scheduler_output = self.scheduler.step(
|
| 834 |
+
torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
|
| 835 |
+
)
|
| 836 |
+
latents = scheduler_output.prev_sample.numpy()
|
| 837 |
+
|
| 838 |
+
if mask is not None:
|
| 839 |
+
# masking
|
| 840 |
+
init_latents_proper = self.scheduler.add_noise(
|
| 841 |
+
torch.from_numpy(init_latents_orig),
|
| 842 |
+
torch.from_numpy(noise),
|
| 843 |
+
t,
|
| 844 |
+
).numpy()
|
| 845 |
+
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
| 846 |
+
|
| 847 |
+
# call the callback, if provided
|
| 848 |
+
if i % callback_steps == 0:
|
| 849 |
+
if callback is not None:
|
| 850 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 851 |
+
callback(step_idx, t, latents)
|
| 852 |
+
if is_cancelled_callback is not None and is_cancelled_callback():
|
| 853 |
+
return None
|
| 854 |
+
|
| 855 |
+
# 9. Post-processing
|
| 856 |
+
image = self.decode_latents(latents)
|
| 857 |
+
|
| 858 |
+
# 10. Run safety checker
|
| 859 |
+
image, has_nsfw_concept = self.run_safety_checker(image)
|
| 860 |
+
|
| 861 |
+
# 11. Convert to PIL
|
| 862 |
+
if output_type == "pil":
|
| 863 |
+
image = self.numpy_to_pil(image)
|
| 864 |
+
|
| 865 |
+
if not return_dict:
|
| 866 |
+
return image, has_nsfw_concept
|
| 867 |
+
|
| 868 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 869 |
+
|
| 870 |
+
def text2img(
|
| 871 |
+
self,
|
| 872 |
+
prompt: Union[str, List[str]],
|
| 873 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 874 |
+
height: int = 512,
|
| 875 |
+
width: int = 512,
|
| 876 |
+
num_inference_steps: int = 50,
|
| 877 |
+
guidance_scale: float = 7.5,
|
| 878 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 879 |
+
eta: float = 0.0,
|
| 880 |
+
generator: Optional[torch.Generator] = None,
|
| 881 |
+
latents: Optional[np.ndarray] = None,
|
| 882 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 883 |
+
output_type: Optional[str] = "pil",
|
| 884 |
+
return_dict: bool = True,
|
| 885 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 886 |
+
callback_steps: int = 1,
|
| 887 |
+
**kwargs,
|
| 888 |
+
):
|
| 889 |
+
r"""
|
| 890 |
+
Function for text-to-image generation.
|
| 891 |
+
Args:
|
| 892 |
+
prompt (`str` or `List[str]`):
|
| 893 |
+
The prompt or prompts to guide the image generation.
|
| 894 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 895 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 896 |
+
if `guidance_scale` is less than `1`).
|
| 897 |
+
height (`int`, *optional*, defaults to 512):
|
| 898 |
+
The height in pixels of the generated image.
|
| 899 |
+
width (`int`, *optional*, defaults to 512):
|
| 900 |
+
The width in pixels of the generated image.
|
| 901 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 902 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 903 |
+
expense of slower inference.
|
| 904 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 905 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 906 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 907 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 908 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 909 |
+
usually at the expense of lower image quality.
|
| 910 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 911 |
+
The number of images to generate per prompt.
|
| 912 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 913 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 914 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 915 |
+
generator (`torch.Generator`, *optional*):
|
| 916 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 917 |
+
deterministic.
|
| 918 |
+
latents (`np.ndarray`, *optional*):
|
| 919 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 920 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 921 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 922 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 923 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 924 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 925 |
+
The output format of the generate image. Choose between
|
| 926 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 927 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 928 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 929 |
+
plain tuple.
|
| 930 |
+
callback (`Callable`, *optional*):
|
| 931 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 932 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 933 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 934 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 935 |
+
called at every step.
|
| 936 |
+
Returns:
|
| 937 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 938 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 939 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 940 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 941 |
+
(nsfw) content, according to the `safety_checker`.
|
| 942 |
+
"""
|
| 943 |
+
return self.__call__(
|
| 944 |
+
prompt=prompt,
|
| 945 |
+
negative_prompt=negative_prompt,
|
| 946 |
+
height=height,
|
| 947 |
+
width=width,
|
| 948 |
+
num_inference_steps=num_inference_steps,
|
| 949 |
+
guidance_scale=guidance_scale,
|
| 950 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 951 |
+
eta=eta,
|
| 952 |
+
generator=generator,
|
| 953 |
+
latents=latents,
|
| 954 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 955 |
+
output_type=output_type,
|
| 956 |
+
return_dict=return_dict,
|
| 957 |
+
callback=callback,
|
| 958 |
+
callback_steps=callback_steps,
|
| 959 |
+
**kwargs,
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
def img2img(
|
| 963 |
+
self,
|
| 964 |
+
image: Union[np.ndarray, PIL.Image.Image],
|
| 965 |
+
prompt: Union[str, List[str]],
|
| 966 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 967 |
+
strength: float = 0.8,
|
| 968 |
+
num_inference_steps: Optional[int] = 50,
|
| 969 |
+
guidance_scale: Optional[float] = 7.5,
|
| 970 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 971 |
+
eta: Optional[float] = 0.0,
|
| 972 |
+
generator: Optional[torch.Generator] = None,
|
| 973 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 974 |
+
output_type: Optional[str] = "pil",
|
| 975 |
+
return_dict: bool = True,
|
| 976 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 977 |
+
callback_steps: int = 1,
|
| 978 |
+
**kwargs,
|
| 979 |
+
):
|
| 980 |
+
r"""
|
| 981 |
+
Function for image-to-image generation.
|
| 982 |
+
Args:
|
| 983 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 984 |
+
`Image`, or ndarray representing an image batch, that will be used as the starting point for the
|
| 985 |
+
process.
|
| 986 |
+
prompt (`str` or `List[str]`):
|
| 987 |
+
The prompt or prompts to guide the image generation.
|
| 988 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 989 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 990 |
+
if `guidance_scale` is less than `1`).
|
| 991 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 992 |
+
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
| 993 |
+
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
| 994 |
+
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
| 995 |
+
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
| 996 |
+
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
| 997 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 998 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 999 |
+
expense of slower inference. This parameter will be modulated by `strength`.
|
| 1000 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1001 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1002 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1003 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1004 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1005 |
+
usually at the expense of lower image quality.
|
| 1006 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1007 |
+
The number of images to generate per prompt.
|
| 1008 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1009 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1010 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1011 |
+
generator (`torch.Generator`, *optional*):
|
| 1012 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 1013 |
+
deterministic.
|
| 1014 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1015 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1016 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1017 |
+
The output format of the generate image. Choose between
|
| 1018 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1019 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1020 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1021 |
+
plain tuple.
|
| 1022 |
+
callback (`Callable`, *optional*):
|
| 1023 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1024 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 1025 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1026 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1027 |
+
called at every step.
|
| 1028 |
+
Returns:
|
| 1029 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1030 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1031 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1032 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1033 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1034 |
+
"""
|
| 1035 |
+
return self.__call__(
|
| 1036 |
+
prompt=prompt,
|
| 1037 |
+
negative_prompt=negative_prompt,
|
| 1038 |
+
image=image,
|
| 1039 |
+
num_inference_steps=num_inference_steps,
|
| 1040 |
+
guidance_scale=guidance_scale,
|
| 1041 |
+
strength=strength,
|
| 1042 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1043 |
+
eta=eta,
|
| 1044 |
+
generator=generator,
|
| 1045 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1046 |
+
output_type=output_type,
|
| 1047 |
+
return_dict=return_dict,
|
| 1048 |
+
callback=callback,
|
| 1049 |
+
callback_steps=callback_steps,
|
| 1050 |
+
**kwargs,
|
| 1051 |
+
)
|
| 1052 |
+
|
| 1053 |
+
def inpaint(
|
| 1054 |
+
self,
|
| 1055 |
+
image: Union[np.ndarray, PIL.Image.Image],
|
| 1056 |
+
mask_image: Union[np.ndarray, PIL.Image.Image],
|
| 1057 |
+
prompt: Union[str, List[str]],
|
| 1058 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1059 |
+
strength: float = 0.8,
|
| 1060 |
+
num_inference_steps: Optional[int] = 50,
|
| 1061 |
+
guidance_scale: Optional[float] = 7.5,
|
| 1062 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1063 |
+
eta: Optional[float] = 0.0,
|
| 1064 |
+
generator: Optional[torch.Generator] = None,
|
| 1065 |
+
max_embeddings_multiples: Optional[int] = 3,
|
| 1066 |
+
output_type: Optional[str] = "pil",
|
| 1067 |
+
return_dict: bool = True,
|
| 1068 |
+
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
| 1069 |
+
callback_steps: int = 1,
|
| 1070 |
+
**kwargs,
|
| 1071 |
+
):
|
| 1072 |
+
r"""
|
| 1073 |
+
Function for inpaint.
|
| 1074 |
+
Args:
|
| 1075 |
+
image (`np.ndarray` or `PIL.Image.Image`):
|
| 1076 |
+
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
| 1077 |
+
process. This is the image whose masked region will be inpainted.
|
| 1078 |
+
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
| 1079 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1080 |
+
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
| 1081 |
+
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
| 1082 |
+
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1083 |
+
prompt (`str` or `List[str]`):
|
| 1084 |
+
The prompt or prompts to guide the image generation.
|
| 1085 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1086 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 1087 |
+
if `guidance_scale` is less than `1`).
|
| 1088 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1089 |
+
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
| 1090 |
+
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
| 1091 |
+
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
| 1092 |
+
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
| 1093 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1094 |
+
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
| 1095 |
+
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
| 1096 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1097 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1098 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1099 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1100 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1101 |
+
usually at the expense of lower image quality.
|
| 1102 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1103 |
+
The number of images to generate per prompt.
|
| 1104 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1105 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1106 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1107 |
+
generator (`torch.Generator`, *optional*):
|
| 1108 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 1109 |
+
deterministic.
|
| 1110 |
+
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
| 1111 |
+
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
| 1112 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1113 |
+
The output format of the generate image. Choose between
|
| 1114 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1115 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1116 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1117 |
+
plain tuple.
|
| 1118 |
+
callback (`Callable`, *optional*):
|
| 1119 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 1120 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
| 1121 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 1122 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 1123 |
+
called at every step.
|
| 1124 |
+
Returns:
|
| 1125 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1126 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 1127 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 1128 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 1129 |
+
(nsfw) content, according to the `safety_checker`.
|
| 1130 |
+
"""
|
| 1131 |
+
return self.__call__(
|
| 1132 |
+
prompt=prompt,
|
| 1133 |
+
negative_prompt=negative_prompt,
|
| 1134 |
+
image=image,
|
| 1135 |
+
mask_image=mask_image,
|
| 1136 |
+
num_inference_steps=num_inference_steps,
|
| 1137 |
+
guidance_scale=guidance_scale,
|
| 1138 |
+
strength=strength,
|
| 1139 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1140 |
+
eta=eta,
|
| 1141 |
+
generator=generator,
|
| 1142 |
+
max_embeddings_multiples=max_embeddings_multiples,
|
| 1143 |
+
output_type=output_type,
|
| 1144 |
+
return_dict=return_dict,
|
| 1145 |
+
callback=callback,
|
| 1146 |
+
callback_steps=callback_steps,
|
| 1147 |
+
**kwargs,
|
| 1148 |
+
)
|
v0.36.0/lpw_stable_diffusion_xl.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.36.0/magic_mix.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Union
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from torchvision import transforms as tfms
|
| 6 |
+
from tqdm.auto import tqdm
|
| 7 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 8 |
+
|
| 9 |
+
from diffusers import (
|
| 10 |
+
AutoencoderKL,
|
| 11 |
+
DDIMScheduler,
|
| 12 |
+
DiffusionPipeline,
|
| 13 |
+
LMSDiscreteScheduler,
|
| 14 |
+
PNDMScheduler,
|
| 15 |
+
UNet2DConditionModel,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MagicMixPipeline(DiffusionPipeline):
|
| 20 |
+
def __init__(
|
| 21 |
+
self,
|
| 22 |
+
vae: AutoencoderKL,
|
| 23 |
+
text_encoder: CLIPTextModel,
|
| 24 |
+
tokenizer: CLIPTokenizer,
|
| 25 |
+
unet: UNet2DConditionModel,
|
| 26 |
+
scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
|
| 27 |
+
):
|
| 28 |
+
super().__init__()
|
| 29 |
+
|
| 30 |
+
self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
|
| 31 |
+
|
| 32 |
+
# convert PIL image to latents
|
| 33 |
+
def encode(self, img):
|
| 34 |
+
with torch.no_grad():
|
| 35 |
+
latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
|
| 36 |
+
latent = 0.18215 * latent.latent_dist.sample()
|
| 37 |
+
return latent
|
| 38 |
+
|
| 39 |
+
# convert latents to PIL image
|
| 40 |
+
def decode(self, latent):
|
| 41 |
+
latent = (1 / 0.18215) * latent
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
img = self.vae.decode(latent).sample
|
| 44 |
+
img = (img / 2 + 0.5).clamp(0, 1)
|
| 45 |
+
img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
|
| 46 |
+
img = (img * 255).round().astype("uint8")
|
| 47 |
+
return Image.fromarray(img[0])
|
| 48 |
+
|
| 49 |
+
# convert prompt into text embeddings, also unconditional embeddings
|
| 50 |
+
def prep_text(self, prompt):
|
| 51 |
+
text_input = self.tokenizer(
|
| 52 |
+
prompt,
|
| 53 |
+
padding="max_length",
|
| 54 |
+
max_length=self.tokenizer.model_max_length,
|
| 55 |
+
truncation=True,
|
| 56 |
+
return_tensors="pt",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
|
| 60 |
+
|
| 61 |
+
uncond_input = self.tokenizer(
|
| 62 |
+
"",
|
| 63 |
+
padding="max_length",
|
| 64 |
+
max_length=self.tokenizer.model_max_length,
|
| 65 |
+
truncation=True,
|
| 66 |
+
return_tensors="pt",
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 70 |
+
|
| 71 |
+
return torch.cat([uncond_embedding, text_embedding])
|
| 72 |
+
|
| 73 |
+
def __call__(
|
| 74 |
+
self,
|
| 75 |
+
img: Image.Image,
|
| 76 |
+
prompt: str,
|
| 77 |
+
kmin: float = 0.3,
|
| 78 |
+
kmax: float = 0.6,
|
| 79 |
+
mix_factor: float = 0.5,
|
| 80 |
+
seed: int = 42,
|
| 81 |
+
steps: int = 50,
|
| 82 |
+
guidance_scale: float = 7.5,
|
| 83 |
+
) -> Image.Image:
|
| 84 |
+
tmin = steps - int(kmin * steps)
|
| 85 |
+
tmax = steps - int(kmax * steps)
|
| 86 |
+
|
| 87 |
+
text_embeddings = self.prep_text(prompt)
|
| 88 |
+
|
| 89 |
+
self.scheduler.set_timesteps(steps)
|
| 90 |
+
|
| 91 |
+
width, height = img.size
|
| 92 |
+
encoded = self.encode(img)
|
| 93 |
+
|
| 94 |
+
torch.manual_seed(seed)
|
| 95 |
+
noise = torch.randn(
|
| 96 |
+
(1, self.unet.config.in_channels, height // 8, width // 8),
|
| 97 |
+
).to(self.device)
|
| 98 |
+
|
| 99 |
+
latents = self.scheduler.add_noise(
|
| 100 |
+
encoded,
|
| 101 |
+
noise,
|
| 102 |
+
timesteps=self.scheduler.timesteps[tmax],
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
input = torch.cat([latents] * 2)
|
| 106 |
+
|
| 107 |
+
input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
|
| 108 |
+
|
| 109 |
+
with torch.no_grad():
|
| 110 |
+
pred = self.unet(
|
| 111 |
+
input,
|
| 112 |
+
self.scheduler.timesteps[tmax],
|
| 113 |
+
encoder_hidden_states=text_embeddings,
|
| 114 |
+
).sample
|
| 115 |
+
|
| 116 |
+
pred_uncond, pred_text = pred.chunk(2)
|
| 117 |
+
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
|
| 118 |
+
|
| 119 |
+
latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
|
| 120 |
+
|
| 121 |
+
for i, t in enumerate(tqdm(self.scheduler.timesteps)):
|
| 122 |
+
if i > tmax:
|
| 123 |
+
if i < tmin: # layout generation phase
|
| 124 |
+
orig_latents = self.scheduler.add_noise(
|
| 125 |
+
encoded,
|
| 126 |
+
noise,
|
| 127 |
+
timesteps=t,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
input = (
|
| 131 |
+
(mix_factor * latents) + (1 - mix_factor) * orig_latents
|
| 132 |
+
) # interpolating between layout noise and conditionally generated noise to preserve layout semantics
|
| 133 |
+
input = torch.cat([input] * 2)
|
| 134 |
+
|
| 135 |
+
else: # content generation phase
|
| 136 |
+
input = torch.cat([latents] * 2)
|
| 137 |
+
|
| 138 |
+
input = self.scheduler.scale_model_input(input, t)
|
| 139 |
+
|
| 140 |
+
with torch.no_grad():
|
| 141 |
+
pred = self.unet(
|
| 142 |
+
input,
|
| 143 |
+
t,
|
| 144 |
+
encoder_hidden_states=text_embeddings,
|
| 145 |
+
).sample
|
| 146 |
+
|
| 147 |
+
pred_uncond, pred_text = pred.chunk(2)
|
| 148 |
+
pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
|
| 149 |
+
|
| 150 |
+
latents = self.scheduler.step(pred, t, latents).prev_sample
|
| 151 |
+
|
| 152 |
+
return self.decode(latents)
|
v0.36.0/marigold_depth_estimation.py
ADDED
|
@@ -0,0 +1,673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 Bingxin Ke, ETH Zurich and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# --------------------------------------------------------------------------
|
| 15 |
+
# If you find this code useful, we kindly ask you to cite our paper in your work.
|
| 16 |
+
# Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
|
| 17 |
+
# More information about the method can be found at https://marigoldmonodepth.github.io
|
| 18 |
+
# --------------------------------------------------------------------------
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import logging
|
| 22 |
+
import math
|
| 23 |
+
from typing import Dict, Union
|
| 24 |
+
|
| 25 |
+
import matplotlib
|
| 26 |
+
import numpy as np
|
| 27 |
+
import torch
|
| 28 |
+
from PIL import Image
|
| 29 |
+
from PIL.Image import Resampling
|
| 30 |
+
from scipy.optimize import minimize
|
| 31 |
+
from torch.utils.data import DataLoader, TensorDataset
|
| 32 |
+
from tqdm.auto import tqdm
|
| 33 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 34 |
+
|
| 35 |
+
from diffusers import (
|
| 36 |
+
AutoencoderKL,
|
| 37 |
+
DDIMScheduler,
|
| 38 |
+
DiffusionPipeline,
|
| 39 |
+
LCMScheduler,
|
| 40 |
+
UNet2DConditionModel,
|
| 41 |
+
)
|
| 42 |
+
from diffusers.utils import BaseOutput, check_min_version
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
| 46 |
+
check_min_version("0.36.0")
|
| 47 |
+
|
| 48 |
+
class MarigoldDepthOutput(BaseOutput):
|
| 49 |
+
"""
|
| 50 |
+
Output class for Marigold monocular depth prediction pipeline.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
depth_np (`np.ndarray`):
|
| 54 |
+
Predicted depth map, with depth values in the range of [0, 1].
|
| 55 |
+
depth_colored (`None` or `PIL.Image.Image`):
|
| 56 |
+
Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
|
| 57 |
+
uncertainty (`None` or `np.ndarray`):
|
| 58 |
+
Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
depth_np: np.ndarray
|
| 62 |
+
depth_colored: Union[None, Image.Image]
|
| 63 |
+
uncertainty: Union[None, np.ndarray]
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_pil_resample_method(method_str: str) -> Resampling:
|
| 67 |
+
resample_method_dic = {
|
| 68 |
+
"bilinear": Resampling.BILINEAR,
|
| 69 |
+
"bicubic": Resampling.BICUBIC,
|
| 70 |
+
"nearest": Resampling.NEAREST,
|
| 71 |
+
}
|
| 72 |
+
resample_method = resample_method_dic.get(method_str, None)
|
| 73 |
+
if resample_method is None:
|
| 74 |
+
raise ValueError(f"Unknown resampling method: {resample_method}")
|
| 75 |
+
else:
|
| 76 |
+
return resample_method
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class MarigoldPipeline(DiffusionPipeline):
|
| 80 |
+
"""
|
| 81 |
+
Pipeline for monocular depth estimation using Marigold: https://marigoldmonodepth.github.io.
|
| 82 |
+
|
| 83 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 84 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
unet (`UNet2DConditionModel`):
|
| 88 |
+
Conditional U-Net to denoise the depth latent, conditioned on image latent.
|
| 89 |
+
vae (`AutoencoderKL`):
|
| 90 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images and depth maps
|
| 91 |
+
to and from latent representations.
|
| 92 |
+
scheduler (`DDIMScheduler`):
|
| 93 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
|
| 94 |
+
text_encoder (`CLIPTextModel`):
|
| 95 |
+
Text-encoder, for empty text embedding.
|
| 96 |
+
tokenizer (`CLIPTokenizer`):
|
| 97 |
+
CLIP tokenizer.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
rgb_latent_scale_factor = 0.18215
|
| 101 |
+
depth_latent_scale_factor = 0.18215
|
| 102 |
+
|
| 103 |
+
def __init__(
|
| 104 |
+
self,
|
| 105 |
+
unet: UNet2DConditionModel,
|
| 106 |
+
vae: AutoencoderKL,
|
| 107 |
+
scheduler: DDIMScheduler,
|
| 108 |
+
text_encoder: CLIPTextModel,
|
| 109 |
+
tokenizer: CLIPTokenizer,
|
| 110 |
+
):
|
| 111 |
+
super().__init__()
|
| 112 |
+
|
| 113 |
+
self.register_modules(
|
| 114 |
+
unet=unet,
|
| 115 |
+
vae=vae,
|
| 116 |
+
scheduler=scheduler,
|
| 117 |
+
text_encoder=text_encoder,
|
| 118 |
+
tokenizer=tokenizer,
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
self.empty_text_embed = None
|
| 122 |
+
|
| 123 |
+
@torch.no_grad()
|
| 124 |
+
def __call__(
|
| 125 |
+
self,
|
| 126 |
+
input_image: Image,
|
| 127 |
+
denoising_steps: int = 10,
|
| 128 |
+
ensemble_size: int = 10,
|
| 129 |
+
processing_res: int = 768,
|
| 130 |
+
match_input_res: bool = True,
|
| 131 |
+
resample_method: str = "bilinear",
|
| 132 |
+
batch_size: int = 0,
|
| 133 |
+
seed: Union[int, None] = None,
|
| 134 |
+
color_map: str = "Spectral",
|
| 135 |
+
show_progress_bar: bool = True,
|
| 136 |
+
ensemble_kwargs: Dict = None,
|
| 137 |
+
) -> MarigoldDepthOutput:
|
| 138 |
+
"""
|
| 139 |
+
Function invoked when calling the pipeline.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
input_image (`Image`):
|
| 143 |
+
Input RGB (or gray-scale) image.
|
| 144 |
+
processing_res (`int`, *optional*, defaults to `768`):
|
| 145 |
+
Maximum resolution of processing.
|
| 146 |
+
If set to 0: will not resize at all.
|
| 147 |
+
match_input_res (`bool`, *optional*, defaults to `True`):
|
| 148 |
+
Resize depth prediction to match input resolution.
|
| 149 |
+
Only valid if `processing_res` > 0.
|
| 150 |
+
resample_method: (`str`, *optional*, defaults to `bilinear`):
|
| 151 |
+
Resampling method used to resize images and depth predictions. This can be one of `bilinear`, `bicubic` or `nearest`, defaults to: `bilinear`.
|
| 152 |
+
denoising_steps (`int`, *optional*, defaults to `10`):
|
| 153 |
+
Number of diffusion denoising steps (DDIM) during inference.
|
| 154 |
+
ensemble_size (`int`, *optional*, defaults to `10`):
|
| 155 |
+
Number of predictions to be ensembled.
|
| 156 |
+
batch_size (`int`, *optional*, defaults to `0`):
|
| 157 |
+
Inference batch size, no bigger than `num_ensemble`.
|
| 158 |
+
If set to 0, the script will automatically decide the proper batch size.
|
| 159 |
+
seed (`int`, *optional*, defaults to `None`)
|
| 160 |
+
Reproducibility seed.
|
| 161 |
+
show_progress_bar (`bool`, *optional*, defaults to `True`):
|
| 162 |
+
Display a progress bar of diffusion denoising.
|
| 163 |
+
color_map (`str`, *optional*, defaults to `"Spectral"`, pass `None` to skip colorized depth map generation):
|
| 164 |
+
Colormap used to colorize the depth map.
|
| 165 |
+
ensemble_kwargs (`dict`, *optional*, defaults to `None`):
|
| 166 |
+
Arguments for detailed ensembling settings.
|
| 167 |
+
Returns:
|
| 168 |
+
`MarigoldDepthOutput`: Output class for Marigold monocular depth prediction pipeline, including:
|
| 169 |
+
- **depth_np** (`np.ndarray`) Predicted depth map, with depth values in the range of [0, 1]
|
| 170 |
+
- **depth_colored** (`PIL.Image.Image`) Colorized depth map, with the shape of [3, H, W] and values in [0, 1], None if `color_map` is `None`
|
| 171 |
+
- **uncertainty** (`None` or `np.ndarray`) Uncalibrated uncertainty(MAD, median absolute deviation)
|
| 172 |
+
coming from ensembling. None if `ensemble_size = 1`
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
device = self.device
|
| 176 |
+
input_size = input_image.size
|
| 177 |
+
|
| 178 |
+
if not match_input_res:
|
| 179 |
+
assert processing_res is not None, "Value error: `resize_output_back` is only valid with "
|
| 180 |
+
assert processing_res >= 0
|
| 181 |
+
assert ensemble_size >= 1
|
| 182 |
+
|
| 183 |
+
# Check if denoising step is reasonable
|
| 184 |
+
self._check_inference_step(denoising_steps)
|
| 185 |
+
|
| 186 |
+
resample_method: Resampling = get_pil_resample_method(resample_method)
|
| 187 |
+
|
| 188 |
+
# ----------------- Image Preprocess -----------------
|
| 189 |
+
# Resize image
|
| 190 |
+
if processing_res > 0:
|
| 191 |
+
input_image = self.resize_max_res(
|
| 192 |
+
input_image,
|
| 193 |
+
max_edge_resolution=processing_res,
|
| 194 |
+
resample_method=resample_method,
|
| 195 |
+
)
|
| 196 |
+
# Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
|
| 197 |
+
input_image = input_image.convert("RGB")
|
| 198 |
+
image = np.asarray(input_image)
|
| 199 |
+
|
| 200 |
+
# Normalize rgb values
|
| 201 |
+
rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
|
| 202 |
+
rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
|
| 203 |
+
rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
|
| 204 |
+
rgb_norm = rgb_norm.to(device)
|
| 205 |
+
assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
|
| 206 |
+
|
| 207 |
+
# ----------------- Predicting depth -----------------
|
| 208 |
+
# Batch repeated input image
|
| 209 |
+
duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
|
| 210 |
+
single_rgb_dataset = TensorDataset(duplicated_rgb)
|
| 211 |
+
if batch_size > 0:
|
| 212 |
+
_bs = batch_size
|
| 213 |
+
else:
|
| 214 |
+
_bs = self._find_batch_size(
|
| 215 |
+
ensemble_size=ensemble_size,
|
| 216 |
+
input_res=max(rgb_norm.shape[1:]),
|
| 217 |
+
dtype=self.dtype,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
|
| 221 |
+
|
| 222 |
+
# Predict depth maps (batched)
|
| 223 |
+
depth_pred_ls = []
|
| 224 |
+
if show_progress_bar:
|
| 225 |
+
iterable = tqdm(single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False)
|
| 226 |
+
else:
|
| 227 |
+
iterable = single_rgb_loader
|
| 228 |
+
for batch in iterable:
|
| 229 |
+
(batched_img,) = batch
|
| 230 |
+
depth_pred_raw = self.single_infer(
|
| 231 |
+
rgb_in=batched_img,
|
| 232 |
+
num_inference_steps=denoising_steps,
|
| 233 |
+
show_pbar=show_progress_bar,
|
| 234 |
+
seed=seed,
|
| 235 |
+
)
|
| 236 |
+
depth_pred_ls.append(depth_pred_raw.detach())
|
| 237 |
+
depth_preds = torch.concat(depth_pred_ls, dim=0).squeeze()
|
| 238 |
+
torch.cuda.empty_cache() # clear vram cache for ensembling
|
| 239 |
+
|
| 240 |
+
# ----------------- Test-time ensembling -----------------
|
| 241 |
+
if ensemble_size > 1:
|
| 242 |
+
depth_pred, pred_uncert = self.ensemble_depths(depth_preds, **(ensemble_kwargs or {}))
|
| 243 |
+
else:
|
| 244 |
+
depth_pred = depth_preds
|
| 245 |
+
pred_uncert = None
|
| 246 |
+
|
| 247 |
+
# ----------------- Post processing -----------------
|
| 248 |
+
# Scale prediction to [0, 1]
|
| 249 |
+
min_d = torch.min(depth_pred)
|
| 250 |
+
max_d = torch.max(depth_pred)
|
| 251 |
+
depth_pred = (depth_pred - min_d) / (max_d - min_d)
|
| 252 |
+
|
| 253 |
+
# Convert to numpy
|
| 254 |
+
depth_pred = depth_pred.cpu().numpy().astype(np.float32)
|
| 255 |
+
|
| 256 |
+
# Resize back to original resolution
|
| 257 |
+
if match_input_res:
|
| 258 |
+
pred_img = Image.fromarray(depth_pred)
|
| 259 |
+
pred_img = pred_img.resize(input_size, resample=resample_method)
|
| 260 |
+
depth_pred = np.asarray(pred_img)
|
| 261 |
+
|
| 262 |
+
# Clip output range
|
| 263 |
+
depth_pred = depth_pred.clip(0, 1)
|
| 264 |
+
|
| 265 |
+
# Colorize
|
| 266 |
+
if color_map is not None:
|
| 267 |
+
depth_colored = self.colorize_depth_maps(
|
| 268 |
+
depth_pred, 0, 1, cmap=color_map
|
| 269 |
+
).squeeze() # [3, H, W], value in (0, 1)
|
| 270 |
+
depth_colored = (depth_colored * 255).astype(np.uint8)
|
| 271 |
+
depth_colored_hwc = self.chw2hwc(depth_colored)
|
| 272 |
+
depth_colored_img = Image.fromarray(depth_colored_hwc)
|
| 273 |
+
else:
|
| 274 |
+
depth_colored_img = None
|
| 275 |
+
|
| 276 |
+
return MarigoldDepthOutput(
|
| 277 |
+
depth_np=depth_pred,
|
| 278 |
+
depth_colored=depth_colored_img,
|
| 279 |
+
uncertainty=pred_uncert,
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
def _check_inference_step(self, n_step: int):
|
| 283 |
+
"""
|
| 284 |
+
Check if denoising step is reasonable
|
| 285 |
+
Args:
|
| 286 |
+
n_step (`int`): denoising steps
|
| 287 |
+
"""
|
| 288 |
+
assert n_step >= 1
|
| 289 |
+
|
| 290 |
+
if isinstance(self.scheduler, DDIMScheduler):
|
| 291 |
+
if n_step < 10:
|
| 292 |
+
logging.warning(
|
| 293 |
+
f"Too few denoising steps: {n_step}. Recommended to use the LCM checkpoint for few-step inference."
|
| 294 |
+
)
|
| 295 |
+
elif isinstance(self.scheduler, LCMScheduler):
|
| 296 |
+
if not 1 <= n_step <= 4:
|
| 297 |
+
logging.warning(f"Non-optimal setting of denoising steps: {n_step}. Recommended setting is 1-4 steps.")
|
| 298 |
+
else:
|
| 299 |
+
raise RuntimeError(f"Unsupported scheduler type: {type(self.scheduler)}")
|
| 300 |
+
|
| 301 |
+
def _encode_empty_text(self):
|
| 302 |
+
"""
|
| 303 |
+
Encode text embedding for empty prompt.
|
| 304 |
+
"""
|
| 305 |
+
prompt = ""
|
| 306 |
+
text_inputs = self.tokenizer(
|
| 307 |
+
prompt,
|
| 308 |
+
padding="do_not_pad",
|
| 309 |
+
max_length=self.tokenizer.model_max_length,
|
| 310 |
+
truncation=True,
|
| 311 |
+
return_tensors="pt",
|
| 312 |
+
)
|
| 313 |
+
text_input_ids = text_inputs.input_ids.to(self.text_encoder.device)
|
| 314 |
+
self.empty_text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype)
|
| 315 |
+
|
| 316 |
+
@torch.no_grad()
|
| 317 |
+
def single_infer(
|
| 318 |
+
self,
|
| 319 |
+
rgb_in: torch.Tensor,
|
| 320 |
+
num_inference_steps: int,
|
| 321 |
+
seed: Union[int, None],
|
| 322 |
+
show_pbar: bool,
|
| 323 |
+
) -> torch.Tensor:
|
| 324 |
+
"""
|
| 325 |
+
Perform an individual depth prediction without ensembling.
|
| 326 |
+
|
| 327 |
+
Args:
|
| 328 |
+
rgb_in (`torch.Tensor`):
|
| 329 |
+
Input RGB image.
|
| 330 |
+
num_inference_steps (`int`):
|
| 331 |
+
Number of diffusion denoisign steps (DDIM) during inference.
|
| 332 |
+
show_pbar (`bool`):
|
| 333 |
+
Display a progress bar of diffusion denoising.
|
| 334 |
+
Returns:
|
| 335 |
+
`torch.Tensor`: Predicted depth map.
|
| 336 |
+
"""
|
| 337 |
+
device = rgb_in.device
|
| 338 |
+
|
| 339 |
+
# Set timesteps
|
| 340 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 341 |
+
timesteps = self.scheduler.timesteps # [T]
|
| 342 |
+
|
| 343 |
+
# Encode image
|
| 344 |
+
rgb_latent = self.encode_rgb(rgb_in)
|
| 345 |
+
|
| 346 |
+
# Initial depth map (noise)
|
| 347 |
+
if seed is None:
|
| 348 |
+
rand_num_generator = None
|
| 349 |
+
else:
|
| 350 |
+
rand_num_generator = torch.Generator(device=device)
|
| 351 |
+
rand_num_generator.manual_seed(seed)
|
| 352 |
+
depth_latent = torch.randn(
|
| 353 |
+
rgb_latent.shape,
|
| 354 |
+
device=device,
|
| 355 |
+
dtype=self.dtype,
|
| 356 |
+
generator=rand_num_generator,
|
| 357 |
+
) # [B, 4, h, w]
|
| 358 |
+
|
| 359 |
+
# Batched empty text embedding
|
| 360 |
+
if self.empty_text_embed is None:
|
| 361 |
+
self._encode_empty_text()
|
| 362 |
+
batch_empty_text_embed = self.empty_text_embed.repeat((rgb_latent.shape[0], 1, 1)) # [B, 2, 1024]
|
| 363 |
+
|
| 364 |
+
# Denoising loop
|
| 365 |
+
if show_pbar:
|
| 366 |
+
iterable = tqdm(
|
| 367 |
+
enumerate(timesteps),
|
| 368 |
+
total=len(timesteps),
|
| 369 |
+
leave=False,
|
| 370 |
+
desc=" " * 4 + "Diffusion denoising",
|
| 371 |
+
)
|
| 372 |
+
else:
|
| 373 |
+
iterable = enumerate(timesteps)
|
| 374 |
+
|
| 375 |
+
for i, t in iterable:
|
| 376 |
+
unet_input = torch.cat([rgb_latent, depth_latent], dim=1) # this order is important
|
| 377 |
+
|
| 378 |
+
# predict the noise residual
|
| 379 |
+
noise_pred = self.unet(unet_input, t, encoder_hidden_states=batch_empty_text_embed).sample # [B, 4, h, w]
|
| 380 |
+
|
| 381 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 382 |
+
depth_latent = self.scheduler.step(noise_pred, t, depth_latent, generator=rand_num_generator).prev_sample
|
| 383 |
+
|
| 384 |
+
depth = self.decode_depth(depth_latent)
|
| 385 |
+
|
| 386 |
+
# clip prediction
|
| 387 |
+
depth = torch.clip(depth, -1.0, 1.0)
|
| 388 |
+
# shift to [0, 1]
|
| 389 |
+
depth = (depth + 1.0) / 2.0
|
| 390 |
+
|
| 391 |
+
return depth
|
| 392 |
+
|
| 393 |
+
def encode_rgb(self, rgb_in: torch.Tensor) -> torch.Tensor:
|
| 394 |
+
"""
|
| 395 |
+
Encode RGB image into latent.
|
| 396 |
+
|
| 397 |
+
Args:
|
| 398 |
+
rgb_in (`torch.Tensor`):
|
| 399 |
+
Input RGB image to be encoded.
|
| 400 |
+
|
| 401 |
+
Returns:
|
| 402 |
+
`torch.Tensor`: Image latent.
|
| 403 |
+
"""
|
| 404 |
+
# encode
|
| 405 |
+
h = self.vae.encoder(rgb_in)
|
| 406 |
+
moments = self.vae.quant_conv(h)
|
| 407 |
+
mean, logvar = torch.chunk(moments, 2, dim=1)
|
| 408 |
+
# scale latent
|
| 409 |
+
rgb_latent = mean * self.rgb_latent_scale_factor
|
| 410 |
+
return rgb_latent
|
| 411 |
+
|
| 412 |
+
def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
|
| 413 |
+
"""
|
| 414 |
+
Decode depth latent into depth map.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
depth_latent (`torch.Tensor`):
|
| 418 |
+
Depth latent to be decoded.
|
| 419 |
+
|
| 420 |
+
Returns:
|
| 421 |
+
`torch.Tensor`: Decoded depth map.
|
| 422 |
+
"""
|
| 423 |
+
# scale latent
|
| 424 |
+
depth_latent = depth_latent / self.depth_latent_scale_factor
|
| 425 |
+
# decode
|
| 426 |
+
z = self.vae.post_quant_conv(depth_latent)
|
| 427 |
+
stacked = self.vae.decoder(z)
|
| 428 |
+
# mean of output channels
|
| 429 |
+
depth_mean = stacked.mean(dim=1, keepdim=True)
|
| 430 |
+
return depth_mean
|
| 431 |
+
|
| 432 |
+
@staticmethod
|
| 433 |
+
def resize_max_res(img: Image.Image, max_edge_resolution: int, resample_method=Resampling.BILINEAR) -> Image.Image:
|
| 434 |
+
"""
|
| 435 |
+
Resize image to limit maximum edge length while keeping aspect ratio.
|
| 436 |
+
|
| 437 |
+
Args:
|
| 438 |
+
img (`Image.Image`):
|
| 439 |
+
Image to be resized.
|
| 440 |
+
max_edge_resolution (`int`):
|
| 441 |
+
Maximum edge length (pixel).
|
| 442 |
+
resample_method (`PIL.Image.Resampling`):
|
| 443 |
+
Resampling method used to resize images.
|
| 444 |
+
|
| 445 |
+
Returns:
|
| 446 |
+
`Image.Image`: Resized image.
|
| 447 |
+
"""
|
| 448 |
+
original_width, original_height = img.size
|
| 449 |
+
downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
|
| 450 |
+
|
| 451 |
+
new_width = int(original_width * downscale_factor)
|
| 452 |
+
new_height = int(original_height * downscale_factor)
|
| 453 |
+
|
| 454 |
+
resized_img = img.resize((new_width, new_height), resample=resample_method)
|
| 455 |
+
return resized_img
|
| 456 |
+
|
| 457 |
+
@staticmethod
|
| 458 |
+
def colorize_depth_maps(depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None):
|
| 459 |
+
"""
|
| 460 |
+
Colorize depth maps.
|
| 461 |
+
"""
|
| 462 |
+
assert len(depth_map.shape) >= 2, "Invalid dimension"
|
| 463 |
+
|
| 464 |
+
if isinstance(depth_map, torch.Tensor):
|
| 465 |
+
depth = depth_map.detach().clone().squeeze().numpy()
|
| 466 |
+
elif isinstance(depth_map, np.ndarray):
|
| 467 |
+
depth = depth_map.copy().squeeze()
|
| 468 |
+
# reshape to [ (B,) H, W ]
|
| 469 |
+
if depth.ndim < 3:
|
| 470 |
+
depth = depth[np.newaxis, :, :]
|
| 471 |
+
|
| 472 |
+
# colorize
|
| 473 |
+
cm = matplotlib.colormaps[cmap]
|
| 474 |
+
depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
|
| 475 |
+
img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
|
| 476 |
+
img_colored_np = np.rollaxis(img_colored_np, 3, 1)
|
| 477 |
+
|
| 478 |
+
if valid_mask is not None:
|
| 479 |
+
if isinstance(depth_map, torch.Tensor):
|
| 480 |
+
valid_mask = valid_mask.detach().numpy()
|
| 481 |
+
valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
|
| 482 |
+
if valid_mask.ndim < 3:
|
| 483 |
+
valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
|
| 484 |
+
else:
|
| 485 |
+
valid_mask = valid_mask[:, np.newaxis, :, :]
|
| 486 |
+
valid_mask = np.repeat(valid_mask, 3, axis=1)
|
| 487 |
+
img_colored_np[~valid_mask] = 0
|
| 488 |
+
|
| 489 |
+
if isinstance(depth_map, torch.Tensor):
|
| 490 |
+
img_colored = torch.from_numpy(img_colored_np).float()
|
| 491 |
+
elif isinstance(depth_map, np.ndarray):
|
| 492 |
+
img_colored = img_colored_np
|
| 493 |
+
|
| 494 |
+
return img_colored
|
| 495 |
+
|
| 496 |
+
@staticmethod
|
| 497 |
+
def chw2hwc(chw):
|
| 498 |
+
assert 3 == len(chw.shape)
|
| 499 |
+
if isinstance(chw, torch.Tensor):
|
| 500 |
+
hwc = torch.permute(chw, (1, 2, 0))
|
| 501 |
+
elif isinstance(chw, np.ndarray):
|
| 502 |
+
hwc = np.moveaxis(chw, 0, -1)
|
| 503 |
+
return hwc
|
| 504 |
+
|
| 505 |
+
@staticmethod
|
| 506 |
+
def _find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int:
|
| 507 |
+
"""
|
| 508 |
+
Automatically search for suitable operating batch size.
|
| 509 |
+
|
| 510 |
+
Args:
|
| 511 |
+
ensemble_size (`int`):
|
| 512 |
+
Number of predictions to be ensembled.
|
| 513 |
+
input_res (`int`):
|
| 514 |
+
Operating resolution of the input image.
|
| 515 |
+
|
| 516 |
+
Returns:
|
| 517 |
+
`int`: Operating batch size.
|
| 518 |
+
"""
|
| 519 |
+
# Search table for suggested max. inference batch size
|
| 520 |
+
bs_search_table = [
|
| 521 |
+
# tested on A100-PCIE-80GB
|
| 522 |
+
{"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32},
|
| 523 |
+
{"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32},
|
| 524 |
+
# tested on A100-PCIE-40GB
|
| 525 |
+
{"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32},
|
| 526 |
+
{"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32},
|
| 527 |
+
{"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16},
|
| 528 |
+
{"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16},
|
| 529 |
+
# tested on RTX3090, RTX4090
|
| 530 |
+
{"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32},
|
| 531 |
+
{"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32},
|
| 532 |
+
{"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32},
|
| 533 |
+
{"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16},
|
| 534 |
+
{"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16},
|
| 535 |
+
{"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16},
|
| 536 |
+
# tested on GTX1080Ti
|
| 537 |
+
{"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32},
|
| 538 |
+
{"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32},
|
| 539 |
+
{"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16},
|
| 540 |
+
{"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16},
|
| 541 |
+
{"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16},
|
| 542 |
+
]
|
| 543 |
+
|
| 544 |
+
if not torch.cuda.is_available():
|
| 545 |
+
return 1
|
| 546 |
+
|
| 547 |
+
total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
|
| 548 |
+
filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype]
|
| 549 |
+
for settings in sorted(
|
| 550 |
+
filtered_bs_search_table,
|
| 551 |
+
key=lambda k: (k["res"], -k["total_vram"]),
|
| 552 |
+
):
|
| 553 |
+
if input_res <= settings["res"] and total_vram >= settings["total_vram"]:
|
| 554 |
+
bs = settings["bs"]
|
| 555 |
+
if bs > ensemble_size:
|
| 556 |
+
bs = ensemble_size
|
| 557 |
+
elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size:
|
| 558 |
+
bs = math.ceil(ensemble_size / 2)
|
| 559 |
+
return bs
|
| 560 |
+
|
| 561 |
+
return 1
|
| 562 |
+
|
| 563 |
+
@staticmethod
|
| 564 |
+
def ensemble_depths(
|
| 565 |
+
input_images: torch.Tensor,
|
| 566 |
+
regularizer_strength: float = 0.02,
|
| 567 |
+
max_iter: int = 2,
|
| 568 |
+
tol: float = 1e-3,
|
| 569 |
+
reduction: str = "median",
|
| 570 |
+
max_res: int = None,
|
| 571 |
+
):
|
| 572 |
+
"""
|
| 573 |
+
To ensemble multiple affine-invariant depth images (up to scale and shift),
|
| 574 |
+
by aligning estimating the scale and shift
|
| 575 |
+
"""
|
| 576 |
+
|
| 577 |
+
def inter_distances(tensors: torch.Tensor):
|
| 578 |
+
"""
|
| 579 |
+
To calculate the distance between each two depth maps.
|
| 580 |
+
"""
|
| 581 |
+
distances = []
|
| 582 |
+
for i, j in torch.combinations(torch.arange(tensors.shape[0])):
|
| 583 |
+
arr1 = tensors[i : i + 1]
|
| 584 |
+
arr2 = tensors[j : j + 1]
|
| 585 |
+
distances.append(arr1 - arr2)
|
| 586 |
+
dist = torch.concatenate(distances, dim=0)
|
| 587 |
+
return dist
|
| 588 |
+
|
| 589 |
+
device = input_images.device
|
| 590 |
+
dtype = input_images.dtype
|
| 591 |
+
np_dtype = np.float32
|
| 592 |
+
|
| 593 |
+
original_input = input_images.clone()
|
| 594 |
+
n_img = input_images.shape[0]
|
| 595 |
+
ori_shape = input_images.shape
|
| 596 |
+
|
| 597 |
+
if max_res is not None:
|
| 598 |
+
scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
|
| 599 |
+
if scale_factor < 1:
|
| 600 |
+
downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
|
| 601 |
+
input_images = downscaler(torch.from_numpy(input_images)).numpy()
|
| 602 |
+
|
| 603 |
+
# init guess
|
| 604 |
+
_min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
|
| 605 |
+
_max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
|
| 606 |
+
s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
|
| 607 |
+
t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
|
| 608 |
+
x = np.concatenate([s_init, t_init]).reshape(-1).astype(np_dtype)
|
| 609 |
+
|
| 610 |
+
input_images = input_images.to(device)
|
| 611 |
+
|
| 612 |
+
# objective function
|
| 613 |
+
def closure(x):
|
| 614 |
+
l = len(x)
|
| 615 |
+
s = x[: int(l / 2)]
|
| 616 |
+
t = x[int(l / 2) :]
|
| 617 |
+
s = torch.from_numpy(s).to(dtype=dtype).to(device)
|
| 618 |
+
t = torch.from_numpy(t).to(dtype=dtype).to(device)
|
| 619 |
+
|
| 620 |
+
transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
|
| 621 |
+
dists = inter_distances(transformed_arrays)
|
| 622 |
+
sqrt_dist = torch.sqrt(torch.mean(dists**2))
|
| 623 |
+
|
| 624 |
+
if "mean" == reduction:
|
| 625 |
+
pred = torch.mean(transformed_arrays, dim=0)
|
| 626 |
+
elif "median" == reduction:
|
| 627 |
+
pred = torch.median(transformed_arrays, dim=0).values
|
| 628 |
+
else:
|
| 629 |
+
raise ValueError
|
| 630 |
+
|
| 631 |
+
near_err = torch.sqrt((0 - torch.min(pred)) ** 2)
|
| 632 |
+
far_err = torch.sqrt((1 - torch.max(pred)) ** 2)
|
| 633 |
+
|
| 634 |
+
err = sqrt_dist + (near_err + far_err) * regularizer_strength
|
| 635 |
+
err = err.detach().cpu().numpy().astype(np_dtype)
|
| 636 |
+
return err
|
| 637 |
+
|
| 638 |
+
res = minimize(
|
| 639 |
+
closure,
|
| 640 |
+
x,
|
| 641 |
+
method="BFGS",
|
| 642 |
+
tol=tol,
|
| 643 |
+
options={"maxiter": max_iter, "disp": False},
|
| 644 |
+
)
|
| 645 |
+
x = res.x
|
| 646 |
+
l = len(x)
|
| 647 |
+
s = x[: int(l / 2)]
|
| 648 |
+
t = x[int(l / 2) :]
|
| 649 |
+
|
| 650 |
+
# Prediction
|
| 651 |
+
s = torch.from_numpy(s).to(dtype=dtype).to(device)
|
| 652 |
+
t = torch.from_numpy(t).to(dtype=dtype).to(device)
|
| 653 |
+
transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
|
| 654 |
+
if "mean" == reduction:
|
| 655 |
+
aligned_images = torch.mean(transformed_arrays, dim=0)
|
| 656 |
+
std = torch.std(transformed_arrays, dim=0)
|
| 657 |
+
uncertainty = std
|
| 658 |
+
elif "median" == reduction:
|
| 659 |
+
aligned_images = torch.median(transformed_arrays, dim=0).values
|
| 660 |
+
# MAD (median absolute deviation) as uncertainty indicator
|
| 661 |
+
abs_dev = torch.abs(transformed_arrays - aligned_images)
|
| 662 |
+
mad = torch.median(abs_dev, dim=0).values
|
| 663 |
+
uncertainty = mad
|
| 664 |
+
else:
|
| 665 |
+
raise ValueError(f"Unknown reduction method: {reduction}")
|
| 666 |
+
|
| 667 |
+
# Scale and shift to [0, 1]
|
| 668 |
+
_min = torch.min(aligned_images)
|
| 669 |
+
_max = torch.max(aligned_images)
|
| 670 |
+
aligned_images = (aligned_images - _min) / (_max - _min)
|
| 671 |
+
uncertainty /= _max - _min
|
| 672 |
+
|
| 673 |
+
return aligned_images, uncertainty
|
v0.36.0/masked_stable_diffusion_img2img.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import PIL.Image
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
| 8 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
|
| 12 |
+
debug_save = False
|
| 13 |
+
|
| 14 |
+
@torch.no_grad()
|
| 15 |
+
def __call__(
|
| 16 |
+
self,
|
| 17 |
+
prompt: Union[str, List[str]] = None,
|
| 18 |
+
image: Union[
|
| 19 |
+
torch.Tensor,
|
| 20 |
+
PIL.Image.Image,
|
| 21 |
+
np.ndarray,
|
| 22 |
+
List[torch.Tensor],
|
| 23 |
+
List[PIL.Image.Image],
|
| 24 |
+
List[np.ndarray],
|
| 25 |
+
] = None,
|
| 26 |
+
strength: float = 0.8,
|
| 27 |
+
num_inference_steps: Optional[int] = 50,
|
| 28 |
+
guidance_scale: Optional[float] = 7.5,
|
| 29 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 30 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 31 |
+
eta: Optional[float] = 0.0,
|
| 32 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 33 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 34 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 35 |
+
output_type: Optional[str] = "pil",
|
| 36 |
+
return_dict: bool = True,
|
| 37 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 38 |
+
callback_steps: int = 1,
|
| 39 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 40 |
+
mask: Union[
|
| 41 |
+
torch.Tensor,
|
| 42 |
+
PIL.Image.Image,
|
| 43 |
+
np.ndarray,
|
| 44 |
+
List[torch.Tensor],
|
| 45 |
+
List[PIL.Image.Image],
|
| 46 |
+
List[np.ndarray],
|
| 47 |
+
] = None,
|
| 48 |
+
):
|
| 49 |
+
r"""
|
| 50 |
+
The call function to the pipeline for generation.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 54 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 55 |
+
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
|
| 56 |
+
`Image` or tensor representing an image batch to be used as the starting point. Can also accept image
|
| 57 |
+
latents as `image`, but if passing latents directly it is not encoded again.
|
| 58 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 59 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 60 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 61 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 62 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 63 |
+
essentially ignores `image`.
|
| 64 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 65 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 66 |
+
expense of slower inference. This parameter is modulated by `strength`.
|
| 67 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 68 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 69 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 70 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 71 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 72 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 73 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 74 |
+
The number of images to generate per prompt.
|
| 75 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 76 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 77 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 78 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 79 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 80 |
+
generation deterministic.
|
| 81 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 82 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 83 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 84 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 85 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 86 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 87 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 88 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 89 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 90 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 91 |
+
plain tuple.
|
| 92 |
+
callback (`Callable`, *optional*):
|
| 93 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 94 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 95 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 96 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 97 |
+
every step.
|
| 98 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 99 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 100 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 101 |
+
mask (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
|
| 102 |
+
A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
|
| 103 |
+
Examples:
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 107 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 108 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 109 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 110 |
+
"not-safe-for-work" (nsfw) content.
|
| 111 |
+
"""
|
| 112 |
+
# code adapted from parent class StableDiffusionImg2ImgPipeline
|
| 113 |
+
|
| 114 |
+
# 0. Check inputs. Raise error if not correct
|
| 115 |
+
self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
|
| 116 |
+
|
| 117 |
+
# 1. Define call parameters
|
| 118 |
+
if prompt is not None and isinstance(prompt, str):
|
| 119 |
+
batch_size = 1
|
| 120 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 121 |
+
batch_size = len(prompt)
|
| 122 |
+
else:
|
| 123 |
+
batch_size = prompt_embeds.shape[0]
|
| 124 |
+
device = self._execution_device
|
| 125 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 126 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 127 |
+
# corresponds to doing no classifier free guidance.
|
| 128 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 129 |
+
|
| 130 |
+
# 2. Encode input prompt
|
| 131 |
+
text_encoder_lora_scale = (
|
| 132 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 133 |
+
)
|
| 134 |
+
prompt_embeds = self._encode_prompt(
|
| 135 |
+
prompt,
|
| 136 |
+
device,
|
| 137 |
+
num_images_per_prompt,
|
| 138 |
+
do_classifier_free_guidance,
|
| 139 |
+
negative_prompt,
|
| 140 |
+
prompt_embeds=prompt_embeds,
|
| 141 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 142 |
+
lora_scale=text_encoder_lora_scale,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
# 3. Preprocess image
|
| 146 |
+
image = self.image_processor.preprocess(image)
|
| 147 |
+
|
| 148 |
+
# 4. set timesteps
|
| 149 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 150 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 151 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 152 |
+
|
| 153 |
+
# 5. Prepare latent variables
|
| 154 |
+
# it is sampled from the latent distribution of the VAE
|
| 155 |
+
latents = self.prepare_latents(
|
| 156 |
+
image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
# mean of the latent distribution
|
| 160 |
+
init_latents = [
|
| 161 |
+
self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean
|
| 162 |
+
for i in range(batch_size)
|
| 163 |
+
]
|
| 164 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 165 |
+
|
| 166 |
+
# 6. create latent mask
|
| 167 |
+
latent_mask = self._make_latent_mask(latents, mask)
|
| 168 |
+
|
| 169 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 170 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 171 |
+
|
| 172 |
+
# 8. Denoising loop
|
| 173 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 174 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 175 |
+
for i, t in enumerate(timesteps):
|
| 176 |
+
# expand the latents if we are doing classifier free guidance
|
| 177 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 178 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 179 |
+
|
| 180 |
+
# predict the noise residual
|
| 181 |
+
noise_pred = self.unet(
|
| 182 |
+
latent_model_input,
|
| 183 |
+
t,
|
| 184 |
+
encoder_hidden_states=prompt_embeds,
|
| 185 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 186 |
+
return_dict=False,
|
| 187 |
+
)[0]
|
| 188 |
+
|
| 189 |
+
# perform guidance
|
| 190 |
+
if do_classifier_free_guidance:
|
| 191 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 192 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 193 |
+
|
| 194 |
+
if latent_mask is not None:
|
| 195 |
+
latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask)
|
| 196 |
+
noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask)
|
| 197 |
+
|
| 198 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 199 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 200 |
+
|
| 201 |
+
# call the callback, if provided
|
| 202 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 203 |
+
progress_bar.update()
|
| 204 |
+
if callback is not None and i % callback_steps == 0:
|
| 205 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 206 |
+
callback(step_idx, t, latents)
|
| 207 |
+
|
| 208 |
+
if not output_type == "latent":
|
| 209 |
+
scaled = latents / self.vae.config.scaling_factor
|
| 210 |
+
if latent_mask is not None:
|
| 211 |
+
# scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask)
|
| 212 |
+
scaled = torch.lerp(init_latents, scaled, latent_mask)
|
| 213 |
+
image = self.vae.decode(scaled, return_dict=False)[0]
|
| 214 |
+
if self.debug_save:
|
| 215 |
+
image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 216 |
+
image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True])
|
| 217 |
+
image_gen[0].save("from_latent.png")
|
| 218 |
+
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
| 219 |
+
else:
|
| 220 |
+
image = latents
|
| 221 |
+
has_nsfw_concept = None
|
| 222 |
+
|
| 223 |
+
if has_nsfw_concept is None:
|
| 224 |
+
do_denormalize = [True] * image.shape[0]
|
| 225 |
+
else:
|
| 226 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 227 |
+
|
| 228 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 229 |
+
|
| 230 |
+
# Offload last model to CPU
|
| 231 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 232 |
+
self.final_offload_hook.offload()
|
| 233 |
+
|
| 234 |
+
if not return_dict:
|
| 235 |
+
return (image, has_nsfw_concept)
|
| 236 |
+
|
| 237 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 238 |
+
|
| 239 |
+
def _make_latent_mask(self, latents, mask):
|
| 240 |
+
if mask is not None:
|
| 241 |
+
latent_mask = []
|
| 242 |
+
if not isinstance(mask, list):
|
| 243 |
+
tmp_mask = [mask]
|
| 244 |
+
else:
|
| 245 |
+
tmp_mask = mask
|
| 246 |
+
_, l_channels, l_height, l_width = latents.shape
|
| 247 |
+
for m in tmp_mask:
|
| 248 |
+
if not isinstance(m, PIL.Image.Image):
|
| 249 |
+
if len(m.shape) == 2:
|
| 250 |
+
m = m[..., np.newaxis]
|
| 251 |
+
if m.max() > 1:
|
| 252 |
+
m = m / 255.0
|
| 253 |
+
m = self.image_processor.numpy_to_pil(m)[0]
|
| 254 |
+
if m.mode != "L":
|
| 255 |
+
m = m.convert("L")
|
| 256 |
+
resized = self.image_processor.resize(m, l_height, l_width)
|
| 257 |
+
if self.debug_save:
|
| 258 |
+
resized.save("latent_mask.png")
|
| 259 |
+
latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
|
| 260 |
+
latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
|
| 261 |
+
latent_mask = latent_mask / latent_mask.max()
|
| 262 |
+
return latent_mask
|
v0.36.0/masked_stable_diffusion_xl_img2img.py
ADDED
|
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from PIL import Image, ImageFilter
|
| 6 |
+
|
| 7 |
+
from diffusers.image_processor import PipelineImageInput
|
| 8 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 9 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img import (
|
| 10 |
+
StableDiffusionXLImg2ImgPipeline,
|
| 11 |
+
rescale_noise_cfg,
|
| 12 |
+
retrieve_latents,
|
| 13 |
+
retrieve_timesteps,
|
| 14 |
+
)
|
| 15 |
+
from diffusers.utils import (
|
| 16 |
+
deprecate,
|
| 17 |
+
is_torch_xla_available,
|
| 18 |
+
logging,
|
| 19 |
+
)
|
| 20 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if is_torch_xla_available():
|
| 24 |
+
import torch_xla.core.xla_model as xm
|
| 25 |
+
|
| 26 |
+
XLA_AVAILABLE = True
|
| 27 |
+
else:
|
| 28 |
+
XLA_AVAILABLE = False
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class MaskedStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
|
| 35 |
+
debug_save = 0
|
| 36 |
+
|
| 37 |
+
@torch.no_grad()
|
| 38 |
+
def __call__(
|
| 39 |
+
self,
|
| 40 |
+
prompt: Union[str, List[str]] = None,
|
| 41 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 42 |
+
image: PipelineImageInput = None,
|
| 43 |
+
original_image: PipelineImageInput = None,
|
| 44 |
+
strength: float = 0.3,
|
| 45 |
+
num_inference_steps: Optional[int] = 50,
|
| 46 |
+
timesteps: List[int] = None,
|
| 47 |
+
denoising_start: Optional[float] = None,
|
| 48 |
+
denoising_end: Optional[float] = None,
|
| 49 |
+
guidance_scale: Optional[float] = 5.0,
|
| 50 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 51 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 52 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 53 |
+
eta: Optional[float] = 0.0,
|
| 54 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 55 |
+
latents: Optional[torch.FloatTensor] = None,
|
| 56 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 57 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 58 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 59 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 60 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 61 |
+
ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
|
| 62 |
+
output_type: Optional[str] = "pil",
|
| 63 |
+
return_dict: bool = True,
|
| 64 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 65 |
+
guidance_rescale: float = 0.0,
|
| 66 |
+
original_size: Tuple[int, int] = None,
|
| 67 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 68 |
+
target_size: Tuple[int, int] = None,
|
| 69 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 70 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 71 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 72 |
+
aesthetic_score: float = 6.0,
|
| 73 |
+
negative_aesthetic_score: float = 2.5,
|
| 74 |
+
clip_skip: Optional[int] = None,
|
| 75 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 76 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 77 |
+
mask: Union[
|
| 78 |
+
torch.FloatTensor,
|
| 79 |
+
Image.Image,
|
| 80 |
+
np.ndarray,
|
| 81 |
+
List[torch.FloatTensor],
|
| 82 |
+
List[Image.Image],
|
| 83 |
+
List[np.ndarray],
|
| 84 |
+
] = None,
|
| 85 |
+
blur=24,
|
| 86 |
+
blur_compose=4,
|
| 87 |
+
sample_mode="sample",
|
| 88 |
+
**kwargs,
|
| 89 |
+
):
|
| 90 |
+
r"""
|
| 91 |
+
The call function to the pipeline for generation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 95 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 96 |
+
image (`PipelineImageInput`):
|
| 97 |
+
`Image` or tensor representing an image batch to be used as the starting point. This image might have mask painted on it.
|
| 98 |
+
original_image (`PipelineImageInput`, *optional*):
|
| 99 |
+
`Image` or tensor representing an image batch to be used for blending with the result.
|
| 100 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 101 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 102 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 103 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 104 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 105 |
+
essentially ignores `image`.
|
| 106 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 107 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 108 |
+
expense of slower inference. This parameter is modulated by `strength`.
|
| 109 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 110 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 111 |
+
,`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 112 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 113 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 114 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 115 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 116 |
+
The number of images to generate per prompt.
|
| 117 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 118 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 119 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 120 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 121 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 122 |
+
generation deterministic.
|
| 123 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 124 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 125 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 126 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 127 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 128 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 129 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 130 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 131 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 132 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 133 |
+
plain tuple.
|
| 134 |
+
callback (`Callable`, *optional*):
|
| 135 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 136 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
| 137 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 138 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 139 |
+
every step.
|
| 140 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 141 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 142 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 143 |
+
blur (`int`, *optional*):
|
| 144 |
+
blur to apply to mask
|
| 145 |
+
blur_compose (`int`, *optional*):
|
| 146 |
+
blur to apply for composition of original a
|
| 147 |
+
mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
|
| 148 |
+
A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
|
| 149 |
+
sample_mode (`str`, *optional*):
|
| 150 |
+
control latents initialisation for the inpaint area, can be one of sample, argmax, random
|
| 151 |
+
Examples:
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 155 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 156 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 157 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 158 |
+
"not-safe-for-work" (nsfw) content.
|
| 159 |
+
"""
|
| 160 |
+
# code adapted from parent class StableDiffusionXLImg2ImgPipeline
|
| 161 |
+
callback = kwargs.pop("callback", None)
|
| 162 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 163 |
+
|
| 164 |
+
if callback is not None:
|
| 165 |
+
deprecate(
|
| 166 |
+
"callback",
|
| 167 |
+
"1.0.0",
|
| 168 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 169 |
+
)
|
| 170 |
+
if callback_steps is not None:
|
| 171 |
+
deprecate(
|
| 172 |
+
"callback_steps",
|
| 173 |
+
"1.0.0",
|
| 174 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# 0. Check inputs. Raise error if not correct
|
| 178 |
+
self.check_inputs(
|
| 179 |
+
prompt,
|
| 180 |
+
prompt_2,
|
| 181 |
+
strength,
|
| 182 |
+
num_inference_steps,
|
| 183 |
+
callback_steps,
|
| 184 |
+
negative_prompt,
|
| 185 |
+
negative_prompt_2,
|
| 186 |
+
prompt_embeds,
|
| 187 |
+
negative_prompt_embeds,
|
| 188 |
+
ip_adapter_image,
|
| 189 |
+
ip_adapter_image_embeds,
|
| 190 |
+
callback_on_step_end_tensor_inputs,
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
self._guidance_scale = guidance_scale
|
| 194 |
+
self._guidance_rescale = guidance_rescale
|
| 195 |
+
self._clip_skip = clip_skip
|
| 196 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 197 |
+
self._denoising_end = denoising_end
|
| 198 |
+
self._denoising_start = denoising_start
|
| 199 |
+
self._interrupt = False
|
| 200 |
+
|
| 201 |
+
# 1. Define call parameters
|
| 202 |
+
# mask is computed from difference between image and original_image
|
| 203 |
+
if image is not None:
|
| 204 |
+
neq = np.any(np.array(original_image) != np.array(image), axis=-1)
|
| 205 |
+
mask = neq.astype(np.uint8) * 255
|
| 206 |
+
else:
|
| 207 |
+
assert mask is not None
|
| 208 |
+
|
| 209 |
+
if not isinstance(mask, Image.Image):
|
| 210 |
+
pil_mask = Image.fromarray(mask)
|
| 211 |
+
if pil_mask.mode != "L":
|
| 212 |
+
pil_mask = pil_mask.convert("L")
|
| 213 |
+
mask_blur = self.blur_mask(pil_mask, blur)
|
| 214 |
+
mask_compose = self.blur_mask(pil_mask, blur_compose)
|
| 215 |
+
if original_image is None:
|
| 216 |
+
original_image = image
|
| 217 |
+
if prompt is not None and isinstance(prompt, str):
|
| 218 |
+
batch_size = 1
|
| 219 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 220 |
+
batch_size = len(prompt)
|
| 221 |
+
else:
|
| 222 |
+
batch_size = prompt_embeds.shape[0]
|
| 223 |
+
|
| 224 |
+
device = self._execution_device
|
| 225 |
+
|
| 226 |
+
# 2. Encode input prompt
|
| 227 |
+
text_encoder_lora_scale = (
|
| 228 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 229 |
+
)
|
| 230 |
+
(
|
| 231 |
+
prompt_embeds,
|
| 232 |
+
negative_prompt_embeds,
|
| 233 |
+
pooled_prompt_embeds,
|
| 234 |
+
negative_pooled_prompt_embeds,
|
| 235 |
+
) = self.encode_prompt(
|
| 236 |
+
prompt=prompt,
|
| 237 |
+
prompt_2=prompt_2,
|
| 238 |
+
device=device,
|
| 239 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 240 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 241 |
+
negative_prompt=negative_prompt,
|
| 242 |
+
negative_prompt_2=negative_prompt_2,
|
| 243 |
+
prompt_embeds=prompt_embeds,
|
| 244 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 245 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 246 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 247 |
+
lora_scale=text_encoder_lora_scale,
|
| 248 |
+
clip_skip=self.clip_skip,
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
# 3. Preprocess image
|
| 252 |
+
input_image = image if image is not None else original_image
|
| 253 |
+
image = self.image_processor.preprocess(input_image)
|
| 254 |
+
original_image = self.image_processor.preprocess(original_image)
|
| 255 |
+
|
| 256 |
+
# 4. set timesteps
|
| 257 |
+
def denoising_value_valid(dnv):
|
| 258 |
+
return isinstance(dnv, float) and 0 < dnv < 1
|
| 259 |
+
|
| 260 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 261 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 262 |
+
num_inference_steps,
|
| 263 |
+
strength,
|
| 264 |
+
device,
|
| 265 |
+
denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
|
| 266 |
+
)
|
| 267 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 268 |
+
|
| 269 |
+
add_noise = True if self.denoising_start is None else False
|
| 270 |
+
|
| 271 |
+
# 5. Prepare latent variables
|
| 272 |
+
# It is sampled from the latent distribution of the VAE
|
| 273 |
+
# that's what we repaint
|
| 274 |
+
latents = self.prepare_latents(
|
| 275 |
+
image,
|
| 276 |
+
latent_timestep,
|
| 277 |
+
batch_size,
|
| 278 |
+
num_images_per_prompt,
|
| 279 |
+
prompt_embeds.dtype,
|
| 280 |
+
device,
|
| 281 |
+
generator,
|
| 282 |
+
add_noise,
|
| 283 |
+
sample_mode=sample_mode,
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# mean of the latent distribution
|
| 287 |
+
# it is multiplied by self.vae.config.scaling_factor
|
| 288 |
+
non_paint_latents = self.prepare_latents(
|
| 289 |
+
original_image,
|
| 290 |
+
latent_timestep,
|
| 291 |
+
batch_size,
|
| 292 |
+
num_images_per_prompt,
|
| 293 |
+
prompt_embeds.dtype,
|
| 294 |
+
device,
|
| 295 |
+
generator,
|
| 296 |
+
add_noise=False,
|
| 297 |
+
sample_mode="argmax",
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
if self.debug_save:
|
| 301 |
+
init_img_from_latents = self.latents_to_img(non_paint_latents)
|
| 302 |
+
init_img_from_latents[0].save("non_paint_latents.png")
|
| 303 |
+
# 6. create latent mask
|
| 304 |
+
latent_mask = self._make_latent_mask(latents, mask)
|
| 305 |
+
|
| 306 |
+
# 7. Prepare extra step kwargs.
|
| 307 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 308 |
+
|
| 309 |
+
height, width = latents.shape[-2:]
|
| 310 |
+
height = height * self.vae_scale_factor
|
| 311 |
+
width = width * self.vae_scale_factor
|
| 312 |
+
|
| 313 |
+
original_size = original_size or (height, width)
|
| 314 |
+
target_size = target_size or (height, width)
|
| 315 |
+
|
| 316 |
+
# 8. Prepare added time ids & embeddings
|
| 317 |
+
if negative_original_size is None:
|
| 318 |
+
negative_original_size = original_size
|
| 319 |
+
if negative_target_size is None:
|
| 320 |
+
negative_target_size = target_size
|
| 321 |
+
|
| 322 |
+
add_text_embeds = pooled_prompt_embeds
|
| 323 |
+
if self.text_encoder_2 is None:
|
| 324 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 325 |
+
else:
|
| 326 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 327 |
+
|
| 328 |
+
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
| 329 |
+
original_size,
|
| 330 |
+
crops_coords_top_left,
|
| 331 |
+
target_size,
|
| 332 |
+
aesthetic_score,
|
| 333 |
+
negative_aesthetic_score,
|
| 334 |
+
negative_original_size,
|
| 335 |
+
negative_crops_coords_top_left,
|
| 336 |
+
negative_target_size,
|
| 337 |
+
dtype=prompt_embeds.dtype,
|
| 338 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 339 |
+
)
|
| 340 |
+
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 341 |
+
|
| 342 |
+
if self.do_classifier_free_guidance:
|
| 343 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 344 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 345 |
+
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 346 |
+
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
|
| 347 |
+
|
| 348 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 349 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 350 |
+
add_time_ids = add_time_ids.to(device)
|
| 351 |
+
|
| 352 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 353 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 354 |
+
ip_adapter_image,
|
| 355 |
+
ip_adapter_image_embeds,
|
| 356 |
+
device,
|
| 357 |
+
batch_size * num_images_per_prompt,
|
| 358 |
+
self.do_classifier_free_guidance,
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# 10. Denoising loop
|
| 362 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 363 |
+
|
| 364 |
+
# 10.1 Apply denoising_end
|
| 365 |
+
if (
|
| 366 |
+
self.denoising_end is not None
|
| 367 |
+
and self.denoising_start is not None
|
| 368 |
+
and denoising_value_valid(self.denoising_end)
|
| 369 |
+
and denoising_value_valid(self.denoising_start)
|
| 370 |
+
and self.denoising_start >= self.denoising_end
|
| 371 |
+
):
|
| 372 |
+
raise ValueError(
|
| 373 |
+
f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
|
| 374 |
+
+ f" {self.denoising_end} when using type float."
|
| 375 |
+
)
|
| 376 |
+
elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
|
| 377 |
+
discrete_timestep_cutoff = int(
|
| 378 |
+
round(
|
| 379 |
+
self.scheduler.config.num_train_timesteps
|
| 380 |
+
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
|
| 381 |
+
)
|
| 382 |
+
)
|
| 383 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 384 |
+
timesteps = timesteps[:num_inference_steps]
|
| 385 |
+
|
| 386 |
+
# 10.2 Optionally get Guidance Scale Embedding
|
| 387 |
+
timestep_cond = None
|
| 388 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 389 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 390 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 391 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 392 |
+
).to(device=device, dtype=latents.dtype)
|
| 393 |
+
|
| 394 |
+
self._num_timesteps = len(timesteps)
|
| 395 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 396 |
+
for i, t in enumerate(timesteps):
|
| 397 |
+
if self.interrupt:
|
| 398 |
+
continue
|
| 399 |
+
|
| 400 |
+
shape = non_paint_latents.shape
|
| 401 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=latents.dtype)
|
| 402 |
+
# noisy latent code of input image at current step
|
| 403 |
+
orig_latents_t = non_paint_latents
|
| 404 |
+
orig_latents_t = self.scheduler.add_noise(non_paint_latents, noise, t.unsqueeze(0))
|
| 405 |
+
|
| 406 |
+
# orig_latents_t (1 - latent_mask) + latents * latent_mask
|
| 407 |
+
latents = torch.lerp(orig_latents_t, latents, latent_mask)
|
| 408 |
+
|
| 409 |
+
if self.debug_save:
|
| 410 |
+
img1 = self.latents_to_img(latents)
|
| 411 |
+
t_str = str(t.int().item())
|
| 412 |
+
for i in range(3 - len(t_str)):
|
| 413 |
+
t_str = "0" + t_str
|
| 414 |
+
img1[0].save(f"step{t_str}.png")
|
| 415 |
+
|
| 416 |
+
# expand the latents if we are doing classifier free guidance
|
| 417 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 418 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 419 |
+
|
| 420 |
+
# predict the noise residual
|
| 421 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 422 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 423 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 424 |
+
|
| 425 |
+
noise_pred = self.unet(
|
| 426 |
+
latent_model_input,
|
| 427 |
+
t,
|
| 428 |
+
encoder_hidden_states=prompt_embeds,
|
| 429 |
+
timestep_cond=timestep_cond,
|
| 430 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 431 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 432 |
+
return_dict=False,
|
| 433 |
+
)[0]
|
| 434 |
+
|
| 435 |
+
# perform guidance
|
| 436 |
+
if self.do_classifier_free_guidance:
|
| 437 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 438 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 439 |
+
|
| 440 |
+
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 441 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 442 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 443 |
+
|
| 444 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 445 |
+
latents_dtype = latents.dtype
|
| 446 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 447 |
+
|
| 448 |
+
if latents.dtype != latents_dtype:
|
| 449 |
+
if torch.backends.mps.is_available():
|
| 450 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 451 |
+
latents = latents.to(latents_dtype)
|
| 452 |
+
|
| 453 |
+
if callback_on_step_end is not None:
|
| 454 |
+
callback_kwargs = {}
|
| 455 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 456 |
+
callback_kwargs[k] = locals()[k]
|
| 457 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 458 |
+
|
| 459 |
+
latents = callback_outputs.pop("latents", latents)
|
| 460 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 461 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 462 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 463 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 464 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 465 |
+
)
|
| 466 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 467 |
+
add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
|
| 468 |
+
|
| 469 |
+
# call the callback, if provided
|
| 470 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 471 |
+
progress_bar.update()
|
| 472 |
+
if callback is not None and i % callback_steps == 0:
|
| 473 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 474 |
+
callback(step_idx, t, latents)
|
| 475 |
+
|
| 476 |
+
if XLA_AVAILABLE:
|
| 477 |
+
xm.mark_step()
|
| 478 |
+
|
| 479 |
+
if not output_type == "latent":
|
| 480 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 481 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 482 |
+
|
| 483 |
+
if needs_upcasting:
|
| 484 |
+
self.upcast_vae()
|
| 485 |
+
elif latents.dtype != self.vae.dtype:
|
| 486 |
+
if torch.backends.mps.is_available():
|
| 487 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 488 |
+
self.vae = self.vae.to(latents.dtype)
|
| 489 |
+
|
| 490 |
+
if self.debug_save:
|
| 491 |
+
image_gen = self.latents_to_img(latents)
|
| 492 |
+
image_gen[0].save("from_latent.png")
|
| 493 |
+
|
| 494 |
+
if latent_mask is not None:
|
| 495 |
+
# interpolate with latent mask
|
| 496 |
+
latents = torch.lerp(non_paint_latents, latents, latent_mask)
|
| 497 |
+
|
| 498 |
+
latents = self.denormalize(latents)
|
| 499 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 500 |
+
m = mask_compose.permute(2, 0, 1).unsqueeze(0).to(image)
|
| 501 |
+
img_compose = m * image + (1 - m) * original_image.to(image)
|
| 502 |
+
image = img_compose
|
| 503 |
+
# cast back to fp16 if needed
|
| 504 |
+
if needs_upcasting:
|
| 505 |
+
self.vae.to(dtype=torch.float16)
|
| 506 |
+
else:
|
| 507 |
+
image = latents
|
| 508 |
+
|
| 509 |
+
# apply watermark if available
|
| 510 |
+
if self.watermark is not None:
|
| 511 |
+
image = self.watermark.apply_watermark(image)
|
| 512 |
+
|
| 513 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 514 |
+
|
| 515 |
+
# Offload all models
|
| 516 |
+
self.maybe_free_model_hooks()
|
| 517 |
+
|
| 518 |
+
if not return_dict:
|
| 519 |
+
return (image,)
|
| 520 |
+
|
| 521 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 522 |
+
|
| 523 |
+
def _make_latent_mask(self, latents, mask):
|
| 524 |
+
if mask is not None:
|
| 525 |
+
latent_mask = []
|
| 526 |
+
if not isinstance(mask, list):
|
| 527 |
+
tmp_mask = [mask]
|
| 528 |
+
else:
|
| 529 |
+
tmp_mask = mask
|
| 530 |
+
_, l_channels, l_height, l_width = latents.shape
|
| 531 |
+
for m in tmp_mask:
|
| 532 |
+
if not isinstance(m, Image.Image):
|
| 533 |
+
if len(m.shape) == 2:
|
| 534 |
+
m = m[..., np.newaxis]
|
| 535 |
+
if m.max() > 1:
|
| 536 |
+
m = m / 255.0
|
| 537 |
+
m = self.image_processor.numpy_to_pil(m)[0]
|
| 538 |
+
if m.mode != "L":
|
| 539 |
+
m = m.convert("L")
|
| 540 |
+
resized = self.image_processor.resize(m, l_height, l_width)
|
| 541 |
+
if self.debug_save:
|
| 542 |
+
resized.save("latent_mask.png")
|
| 543 |
+
latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
|
| 544 |
+
latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
|
| 545 |
+
latent_mask = latent_mask / max(latent_mask.max(), 1)
|
| 546 |
+
return latent_mask
|
| 547 |
+
|
| 548 |
+
def prepare_latents(
|
| 549 |
+
self,
|
| 550 |
+
image,
|
| 551 |
+
timestep,
|
| 552 |
+
batch_size,
|
| 553 |
+
num_images_per_prompt,
|
| 554 |
+
dtype,
|
| 555 |
+
device,
|
| 556 |
+
generator=None,
|
| 557 |
+
add_noise=True,
|
| 558 |
+
sample_mode: str = "sample",
|
| 559 |
+
):
|
| 560 |
+
if not isinstance(image, (torch.Tensor, Image.Image, list)):
|
| 561 |
+
raise ValueError(
|
| 562 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
# Offload text encoder if `enable_model_cpu_offload` was enabled
|
| 566 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 567 |
+
self.text_encoder_2.to("cpu")
|
| 568 |
+
torch.cuda.empty_cache()
|
| 569 |
+
|
| 570 |
+
image = image.to(device=device, dtype=dtype)
|
| 571 |
+
|
| 572 |
+
batch_size = batch_size * num_images_per_prompt
|
| 573 |
+
|
| 574 |
+
if image.shape[1] == 4:
|
| 575 |
+
init_latents = image
|
| 576 |
+
elif sample_mode == "random":
|
| 577 |
+
height, width = image.shape[-2:]
|
| 578 |
+
num_channels_latents = self.unet.config.in_channels
|
| 579 |
+
latents = self.random_latents(
|
| 580 |
+
batch_size,
|
| 581 |
+
num_channels_latents,
|
| 582 |
+
height,
|
| 583 |
+
width,
|
| 584 |
+
dtype,
|
| 585 |
+
device,
|
| 586 |
+
generator,
|
| 587 |
+
)
|
| 588 |
+
return self.vae.config.scaling_factor * latents
|
| 589 |
+
else:
|
| 590 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 591 |
+
if self.vae.config.force_upcast:
|
| 592 |
+
image = image.float()
|
| 593 |
+
self.vae.to(dtype=torch.float32)
|
| 594 |
+
|
| 595 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 596 |
+
raise ValueError(
|
| 597 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 598 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
elif isinstance(generator, list):
|
| 602 |
+
init_latents = [
|
| 603 |
+
retrieve_latents(
|
| 604 |
+
self.vae.encode(image[i : i + 1]), generator=generator[i], sample_mode=sample_mode
|
| 605 |
+
)
|
| 606 |
+
for i in range(batch_size)
|
| 607 |
+
]
|
| 608 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 609 |
+
else:
|
| 610 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator, sample_mode=sample_mode)
|
| 611 |
+
|
| 612 |
+
if self.vae.config.force_upcast:
|
| 613 |
+
self.vae.to(dtype)
|
| 614 |
+
|
| 615 |
+
init_latents = init_latents.to(dtype)
|
| 616 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 617 |
+
|
| 618 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 619 |
+
# expand init_latents for batch_size
|
| 620 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 621 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 622 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 623 |
+
raise ValueError(
|
| 624 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 625 |
+
)
|
| 626 |
+
else:
|
| 627 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 628 |
+
|
| 629 |
+
if add_noise:
|
| 630 |
+
shape = init_latents.shape
|
| 631 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 632 |
+
# get latents
|
| 633 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 634 |
+
|
| 635 |
+
latents = init_latents
|
| 636 |
+
|
| 637 |
+
return latents
|
| 638 |
+
|
| 639 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 640 |
+
def random_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 641 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 642 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 643 |
+
raise ValueError(
|
| 644 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 645 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
if latents is None:
|
| 649 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 650 |
+
else:
|
| 651 |
+
latents = latents.to(device)
|
| 652 |
+
|
| 653 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 654 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 655 |
+
return latents
|
| 656 |
+
|
| 657 |
+
def denormalize(self, latents):
|
| 658 |
+
# unscale/denormalize the latents
|
| 659 |
+
# denormalize with the mean and std if available and not None
|
| 660 |
+
has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
|
| 661 |
+
has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
|
| 662 |
+
if has_latents_mean and has_latents_std:
|
| 663 |
+
latents_mean = (
|
| 664 |
+
torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 665 |
+
)
|
| 666 |
+
latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 667 |
+
latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
|
| 668 |
+
else:
|
| 669 |
+
latents = latents / self.vae.config.scaling_factor
|
| 670 |
+
|
| 671 |
+
return latents
|
| 672 |
+
|
| 673 |
+
def latents_to_img(self, latents):
|
| 674 |
+
l1 = self.denormalize(latents)
|
| 675 |
+
img1 = self.vae.decode(l1, return_dict=False)[0]
|
| 676 |
+
img1 = self.image_processor.postprocess(img1, output_type="pil", do_denormalize=[True])
|
| 677 |
+
return img1
|
| 678 |
+
|
| 679 |
+
def blur_mask(self, pil_mask, blur):
|
| 680 |
+
mask_blur = pil_mask.filter(ImageFilter.GaussianBlur(radius=blur))
|
| 681 |
+
mask_blur = np.array(mask_blur)
|
| 682 |
+
return torch.from_numpy(np.tile(mask_blur / mask_blur.max(), (3, 1, 1)).transpose(1, 2, 0))
|
v0.36.0/matryoshka.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
v0.36.0/mixture_canvas.py
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from dataclasses import asdict, dataclass
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import List, Optional, Union
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
from numpy import exp, pi, sqrt
|
| 10 |
+
from torchvision.transforms.functional import resize
|
| 11 |
+
from tqdm.auto import tqdm
|
| 12 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 13 |
+
|
| 14 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 15 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 16 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 17 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def preprocess_image(image):
|
| 21 |
+
from PIL import Image
|
| 22 |
+
|
| 23 |
+
"""Preprocess an input image
|
| 24 |
+
|
| 25 |
+
Same as
|
| 26 |
+
https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
|
| 27 |
+
"""
|
| 28 |
+
w, h = image.size
|
| 29 |
+
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
|
| 30 |
+
image = image.resize((w, h), resample=Image.LANCZOS)
|
| 31 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 32 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 33 |
+
image = torch.from_numpy(image)
|
| 34 |
+
return 2.0 * image - 1.0
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class CanvasRegion:
|
| 39 |
+
"""Class defining a rectangular region in the canvas"""
|
| 40 |
+
|
| 41 |
+
row_init: int # Region starting row in pixel space (included)
|
| 42 |
+
row_end: int # Region end row in pixel space (not included)
|
| 43 |
+
col_init: int # Region starting column in pixel space (included)
|
| 44 |
+
col_end: int # Region end column in pixel space (not included)
|
| 45 |
+
region_seed: int = None # Seed for random operations in this region
|
| 46 |
+
noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
|
| 47 |
+
|
| 48 |
+
def __post_init__(self):
|
| 49 |
+
# Initialize arguments if not specified
|
| 50 |
+
if self.region_seed is None:
|
| 51 |
+
self.region_seed = np.random.randint(9999999999)
|
| 52 |
+
# Check coordinates are non-negative
|
| 53 |
+
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
|
| 54 |
+
if coord < 0:
|
| 55 |
+
raise ValueError(
|
| 56 |
+
f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})"
|
| 57 |
+
)
|
| 58 |
+
# Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
|
| 59 |
+
for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
|
| 60 |
+
if coord // 8 != coord / 8:
|
| 61 |
+
raise ValueError(
|
| 62 |
+
f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})"
|
| 63 |
+
)
|
| 64 |
+
# Check noise eps is non-negative
|
| 65 |
+
if self.noise_eps < 0:
|
| 66 |
+
raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
|
| 67 |
+
# Compute coordinates for this region in latent space
|
| 68 |
+
self.latent_row_init = self.row_init // 8
|
| 69 |
+
self.latent_row_end = self.row_end // 8
|
| 70 |
+
self.latent_col_init = self.col_init // 8
|
| 71 |
+
self.latent_col_end = self.col_end // 8
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def width(self):
|
| 75 |
+
return self.col_end - self.col_init
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def height(self):
|
| 79 |
+
return self.row_end - self.row_init
|
| 80 |
+
|
| 81 |
+
def get_region_generator(self, device="cpu"):
|
| 82 |
+
"""Creates a torch.Generator based on the random seed of this region"""
|
| 83 |
+
# Initialize region generator
|
| 84 |
+
return torch.Generator(device).manual_seed(self.region_seed)
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def __dict__(self):
|
| 88 |
+
return asdict(self)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class MaskModes(Enum):
|
| 92 |
+
"""Modes in which the influence of diffuser is masked"""
|
| 93 |
+
|
| 94 |
+
CONSTANT = "constant"
|
| 95 |
+
GAUSSIAN = "gaussian"
|
| 96 |
+
QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class DiffusionRegion(CanvasRegion):
|
| 101 |
+
"""Abstract class defining a region where some class of diffusion process is acting"""
|
| 102 |
+
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
@dataclass
|
| 107 |
+
class Text2ImageRegion(DiffusionRegion):
|
| 108 |
+
"""Class defining a region where a text guided diffusion process is acting"""
|
| 109 |
+
|
| 110 |
+
prompt: str = "" # Text prompt guiding the diffuser in this region
|
| 111 |
+
guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
|
| 112 |
+
mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
|
| 113 |
+
mask_weight: float = 1.0 # Global weights multiplier of the mask
|
| 114 |
+
tokenized_prompt = None # Tokenized prompt
|
| 115 |
+
encoded_prompt = None # Encoded prompt
|
| 116 |
+
|
| 117 |
+
def __post_init__(self):
|
| 118 |
+
super().__post_init__()
|
| 119 |
+
# Mask weight cannot be negative
|
| 120 |
+
if self.mask_weight < 0:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}"
|
| 123 |
+
)
|
| 124 |
+
# Mask type must be an actual known mask
|
| 125 |
+
if self.mask_type not in [e.value for e in MaskModes]:
|
| 126 |
+
raise ValueError(
|
| 127 |
+
f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})"
|
| 128 |
+
)
|
| 129 |
+
# Randomize arguments if given as None
|
| 130 |
+
if self.guidance_scale is None:
|
| 131 |
+
self.guidance_scale = np.random.randint(5, 30)
|
| 132 |
+
# Clean prompt
|
| 133 |
+
self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ")
|
| 134 |
+
|
| 135 |
+
def tokenize_prompt(self, tokenizer):
|
| 136 |
+
"""Tokenizes the prompt for this diffusion region using a given tokenizer"""
|
| 137 |
+
self.tokenized_prompt = tokenizer(
|
| 138 |
+
self.prompt,
|
| 139 |
+
padding="max_length",
|
| 140 |
+
max_length=tokenizer.model_max_length,
|
| 141 |
+
truncation=True,
|
| 142 |
+
return_tensors="pt",
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def encode_prompt(self, text_encoder, device):
|
| 146 |
+
"""Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
|
| 147 |
+
assert self.tokenized_prompt is not None, ValueError(
|
| 148 |
+
"Prompt in diffusion region must be tokenized before encoding"
|
| 149 |
+
)
|
| 150 |
+
self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
@dataclass
|
| 154 |
+
class Image2ImageRegion(DiffusionRegion):
|
| 155 |
+
"""Class defining a region where an image guided diffusion process is acting"""
|
| 156 |
+
|
| 157 |
+
reference_image: torch.Tensor = None
|
| 158 |
+
strength: float = 0.8 # Strength of the image
|
| 159 |
+
|
| 160 |
+
def __post_init__(self):
|
| 161 |
+
super().__post_init__()
|
| 162 |
+
if self.reference_image is None:
|
| 163 |
+
raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
|
| 164 |
+
if self.strength < 0 or self.strength > 1:
|
| 165 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}")
|
| 166 |
+
# Rescale image to region shape
|
| 167 |
+
self.reference_image = resize(self.reference_image, size=[self.height, self.width])
|
| 168 |
+
|
| 169 |
+
def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
|
| 170 |
+
"""Encodes the reference image for this Image2Image region into the latent space"""
|
| 171 |
+
# Place encoder in CPU or not following the parameter cpu_vae
|
| 172 |
+
if cpu_vae:
|
| 173 |
+
# Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
|
| 174 |
+
self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
|
| 175 |
+
else:
|
| 176 |
+
self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(
|
| 177 |
+
generator=generator
|
| 178 |
+
)
|
| 179 |
+
self.reference_latents = 0.18215 * self.reference_latents
|
| 180 |
+
|
| 181 |
+
@property
|
| 182 |
+
def __dict__(self):
|
| 183 |
+
# This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
|
| 184 |
+
|
| 185 |
+
# Get all basic fields from parent class
|
| 186 |
+
super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
|
| 187 |
+
# Pack other fields
|
| 188 |
+
return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength}
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class RerollModes(Enum):
|
| 192 |
+
"""Modes in which the reroll regions operate"""
|
| 193 |
+
|
| 194 |
+
RESET = "reset" # Completely reset the random noise in the region
|
| 195 |
+
EPSILON = "epsilon" # Alter slightly the latents in the region
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
@dataclass
|
| 199 |
+
class RerollRegion(CanvasRegion):
|
| 200 |
+
"""Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
|
| 201 |
+
|
| 202 |
+
reroll_mode: RerollModes = RerollModes.RESET.value
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@dataclass
|
| 206 |
+
class MaskWeightsBuilder:
|
| 207 |
+
"""Auxiliary class to compute a tensor of weights for a given diffusion region"""
|
| 208 |
+
|
| 209 |
+
latent_space_dim: int # Size of the U-net latent space
|
| 210 |
+
nbatch: int = 1 # Batch size in the U-net
|
| 211 |
+
|
| 212 |
+
def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 213 |
+
"""Computes a tensor of weights for a given diffusion region"""
|
| 214 |
+
MASK_BUILDERS = {
|
| 215 |
+
MaskModes.CONSTANT.value: self._constant_weights,
|
| 216 |
+
MaskModes.GAUSSIAN.value: self._gaussian_weights,
|
| 217 |
+
MaskModes.QUARTIC.value: self._quartic_weights,
|
| 218 |
+
}
|
| 219 |
+
return MASK_BUILDERS[region.mask_type](region)
|
| 220 |
+
|
| 221 |
+
def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 222 |
+
"""Computes a tensor of constant for a given diffusion region"""
|
| 223 |
+
latent_width = region.latent_col_end - region.latent_col_init
|
| 224 |
+
latent_height = region.latent_row_end - region.latent_row_init
|
| 225 |
+
return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
|
| 226 |
+
|
| 227 |
+
def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 228 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
| 229 |
+
latent_width = region.latent_col_end - region.latent_col_init
|
| 230 |
+
latent_height = region.latent_row_end - region.latent_row_init
|
| 231 |
+
|
| 232 |
+
var = 0.01
|
| 233 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
| 234 |
+
x_probs = [
|
| 235 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
| 236 |
+
for x in range(latent_width)
|
| 237 |
+
]
|
| 238 |
+
midpoint = (latent_height - 1) / 2
|
| 239 |
+
y_probs = [
|
| 240 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
| 241 |
+
for y in range(latent_height)
|
| 242 |
+
]
|
| 243 |
+
|
| 244 |
+
weights = np.outer(y_probs, x_probs) * region.mask_weight
|
| 245 |
+
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
|
| 246 |
+
|
| 247 |
+
def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
|
| 248 |
+
"""Generates a quartic mask of weights for tile contributions
|
| 249 |
+
|
| 250 |
+
The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
|
| 251 |
+
"""
|
| 252 |
+
quartic_constant = 15.0 / 16.0
|
| 253 |
+
|
| 254 |
+
support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (
|
| 255 |
+
region.latent_col_end - region.latent_col_init - 1
|
| 256 |
+
) * 1.99 - (1.99 / 2.0)
|
| 257 |
+
x_probs = quartic_constant * np.square(1 - np.square(support))
|
| 258 |
+
support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (
|
| 259 |
+
region.latent_row_end - region.latent_row_init - 1
|
| 260 |
+
) * 1.99 - (1.99 / 2.0)
|
| 261 |
+
y_probs = quartic_constant * np.square(1 - np.square(support))
|
| 262 |
+
|
| 263 |
+
weights = np.outer(y_probs, x_probs) * region.mask_weight
|
| 264 |
+
return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
class StableDiffusionCanvasPipeline(DiffusionPipeline, StableDiffusionMixin):
|
| 268 |
+
"""Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
|
| 269 |
+
|
| 270 |
+
def __init__(
|
| 271 |
+
self,
|
| 272 |
+
vae: AutoencoderKL,
|
| 273 |
+
text_encoder: CLIPTextModel,
|
| 274 |
+
tokenizer: CLIPTokenizer,
|
| 275 |
+
unet: UNet2DConditionModel,
|
| 276 |
+
scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
|
| 277 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 278 |
+
feature_extractor: CLIPImageProcessor,
|
| 279 |
+
):
|
| 280 |
+
super().__init__()
|
| 281 |
+
self.register_modules(
|
| 282 |
+
vae=vae,
|
| 283 |
+
text_encoder=text_encoder,
|
| 284 |
+
tokenizer=tokenizer,
|
| 285 |
+
unet=unet,
|
| 286 |
+
scheduler=scheduler,
|
| 287 |
+
safety_checker=safety_checker,
|
| 288 |
+
feature_extractor=feature_extractor,
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
def decode_latents(self, latents, cpu_vae=False):
|
| 292 |
+
"""Decodes a given array of latents into pixel space"""
|
| 293 |
+
# scale and decode the image latents with vae
|
| 294 |
+
if cpu_vae:
|
| 295 |
+
lat = deepcopy(latents).cpu()
|
| 296 |
+
vae = deepcopy(self.vae).cpu()
|
| 297 |
+
else:
|
| 298 |
+
lat = latents
|
| 299 |
+
vae = self.vae
|
| 300 |
+
|
| 301 |
+
lat = 1 / 0.18215 * lat
|
| 302 |
+
image = vae.decode(lat).sample
|
| 303 |
+
|
| 304 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 305 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 306 |
+
|
| 307 |
+
return self.numpy_to_pil(image)
|
| 308 |
+
|
| 309 |
+
def get_latest_timestep_img2img(self, num_inference_steps, strength):
|
| 310 |
+
"""Finds the latest timesteps where an img2img strength does not impose latents anymore"""
|
| 311 |
+
# get the original timestep using init_timestep
|
| 312 |
+
offset = self.scheduler.config.get("steps_offset", 0)
|
| 313 |
+
init_timestep = int(num_inference_steps * (1 - strength)) + offset
|
| 314 |
+
init_timestep = min(init_timestep, num_inference_steps)
|
| 315 |
+
|
| 316 |
+
t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1)
|
| 317 |
+
latest_timestep = self.scheduler.timesteps[t_start]
|
| 318 |
+
|
| 319 |
+
return latest_timestep
|
| 320 |
+
|
| 321 |
+
@torch.no_grad()
|
| 322 |
+
def __call__(
|
| 323 |
+
self,
|
| 324 |
+
canvas_height: int,
|
| 325 |
+
canvas_width: int,
|
| 326 |
+
regions: List[DiffusionRegion],
|
| 327 |
+
num_inference_steps: Optional[int] = 50,
|
| 328 |
+
seed: Optional[int] = 12345,
|
| 329 |
+
reroll_regions: Optional[List[RerollRegion]] = None,
|
| 330 |
+
cpu_vae: Optional[bool] = False,
|
| 331 |
+
decode_steps: Optional[bool] = False,
|
| 332 |
+
):
|
| 333 |
+
if reroll_regions is None:
|
| 334 |
+
reroll_regions = []
|
| 335 |
+
batch_size = 1
|
| 336 |
+
|
| 337 |
+
if decode_steps:
|
| 338 |
+
steps_images = []
|
| 339 |
+
|
| 340 |
+
# Prepare scheduler
|
| 341 |
+
self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
| 342 |
+
|
| 343 |
+
# Split diffusion regions by their kind
|
| 344 |
+
text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
|
| 345 |
+
image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
|
| 346 |
+
|
| 347 |
+
# Prepare text embeddings
|
| 348 |
+
for region in text2image_regions:
|
| 349 |
+
region.tokenize_prompt(self.tokenizer)
|
| 350 |
+
region.encode_prompt(self.text_encoder, self.device)
|
| 351 |
+
|
| 352 |
+
# Create original noisy latents using the timesteps
|
| 353 |
+
latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
|
| 354 |
+
generator = torch.Generator(self.device).manual_seed(seed)
|
| 355 |
+
init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
|
| 356 |
+
|
| 357 |
+
# Reset latents in seed reroll regions, if requested
|
| 358 |
+
for region in reroll_regions:
|
| 359 |
+
if region.reroll_mode == RerollModes.RESET.value:
|
| 360 |
+
region_shape = (
|
| 361 |
+
latents_shape[0],
|
| 362 |
+
latents_shape[1],
|
| 363 |
+
region.latent_row_end - region.latent_row_init,
|
| 364 |
+
region.latent_col_end - region.latent_col_init,
|
| 365 |
+
)
|
| 366 |
+
init_noise[
|
| 367 |
+
:,
|
| 368 |
+
:,
|
| 369 |
+
region.latent_row_init : region.latent_row_end,
|
| 370 |
+
region.latent_col_init : region.latent_col_end,
|
| 371 |
+
] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
|
| 372 |
+
|
| 373 |
+
# Apply epsilon noise to regions: first diffusion regions, then reroll regions
|
| 374 |
+
all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
|
| 375 |
+
for region in all_eps_rerolls:
|
| 376 |
+
if region.noise_eps > 0:
|
| 377 |
+
region_noise = init_noise[
|
| 378 |
+
:,
|
| 379 |
+
:,
|
| 380 |
+
region.latent_row_init : region.latent_row_end,
|
| 381 |
+
region.latent_col_init : region.latent_col_end,
|
| 382 |
+
]
|
| 383 |
+
eps_noise = (
|
| 384 |
+
torch.randn(
|
| 385 |
+
region_noise.shape, generator=region.get_region_generator(self.device), device=self.device
|
| 386 |
+
)
|
| 387 |
+
* region.noise_eps
|
| 388 |
+
)
|
| 389 |
+
init_noise[
|
| 390 |
+
:,
|
| 391 |
+
:,
|
| 392 |
+
region.latent_row_init : region.latent_row_end,
|
| 393 |
+
region.latent_col_init : region.latent_col_end,
|
| 394 |
+
] += eps_noise
|
| 395 |
+
|
| 396 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 397 |
+
latents = init_noise * self.scheduler.init_noise_sigma
|
| 398 |
+
|
| 399 |
+
# Get unconditional embeddings for classifier free guidance in text2image regions
|
| 400 |
+
for region in text2image_regions:
|
| 401 |
+
max_length = region.tokenized_prompt.input_ids.shape[-1]
|
| 402 |
+
uncond_input = self.tokenizer(
|
| 403 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
| 404 |
+
)
|
| 405 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 406 |
+
|
| 407 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 408 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 409 |
+
# to avoid doing two forward passes
|
| 410 |
+
region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
|
| 411 |
+
|
| 412 |
+
# Prepare image latents
|
| 413 |
+
for region in image2image_regions:
|
| 414 |
+
region.encode_reference_image(self.vae, device=self.device, generator=generator)
|
| 415 |
+
|
| 416 |
+
# Prepare mask of weights for each region
|
| 417 |
+
mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
|
| 418 |
+
mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
|
| 419 |
+
|
| 420 |
+
# Diffusion timesteps
|
| 421 |
+
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
| 422 |
+
# Diffuse each region
|
| 423 |
+
noise_preds_regions = []
|
| 424 |
+
|
| 425 |
+
# text2image regions
|
| 426 |
+
for region in text2image_regions:
|
| 427 |
+
region_latents = latents[
|
| 428 |
+
:,
|
| 429 |
+
:,
|
| 430 |
+
region.latent_row_init : region.latent_row_end,
|
| 431 |
+
region.latent_col_init : region.latent_col_end,
|
| 432 |
+
]
|
| 433 |
+
# expand the latents if we are doing classifier free guidance
|
| 434 |
+
latent_model_input = torch.cat([region_latents] * 2)
|
| 435 |
+
# scale model input following scheduler rules
|
| 436 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 437 |
+
# predict the noise residual
|
| 438 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
|
| 439 |
+
# perform guidance
|
| 440 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 441 |
+
noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 442 |
+
noise_preds_regions.append(noise_pred_region)
|
| 443 |
+
|
| 444 |
+
# Merge noise predictions for all tiles
|
| 445 |
+
noise_pred = torch.zeros(latents.shape, device=self.device)
|
| 446 |
+
contributors = torch.zeros(latents.shape, device=self.device)
|
| 447 |
+
# Add each tile contribution to overall latents
|
| 448 |
+
for region, noise_pred_region, mask_weights_region in zip(
|
| 449 |
+
text2image_regions, noise_preds_regions, mask_weights
|
| 450 |
+
):
|
| 451 |
+
noise_pred[
|
| 452 |
+
:,
|
| 453 |
+
:,
|
| 454 |
+
region.latent_row_init : region.latent_row_end,
|
| 455 |
+
region.latent_col_init : region.latent_col_end,
|
| 456 |
+
] += noise_pred_region * mask_weights_region
|
| 457 |
+
contributors[
|
| 458 |
+
:,
|
| 459 |
+
:,
|
| 460 |
+
region.latent_row_init : region.latent_row_end,
|
| 461 |
+
region.latent_col_init : region.latent_col_end,
|
| 462 |
+
] += mask_weights_region
|
| 463 |
+
# Average overlapping areas with more than 1 contributor
|
| 464 |
+
noise_pred /= contributors
|
| 465 |
+
noise_pred = torch.nan_to_num(
|
| 466 |
+
noise_pred
|
| 467 |
+
) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
|
| 468 |
+
|
| 469 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 470 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 471 |
+
|
| 472 |
+
# Image2Image regions: override latents generated by the scheduler
|
| 473 |
+
for region in image2image_regions:
|
| 474 |
+
influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
|
| 475 |
+
# Only override in the timesteps before the last influence step of the image (given by its strength)
|
| 476 |
+
if t > influence_step:
|
| 477 |
+
timestep = t.repeat(batch_size)
|
| 478 |
+
region_init_noise = init_noise[
|
| 479 |
+
:,
|
| 480 |
+
:,
|
| 481 |
+
region.latent_row_init : region.latent_row_end,
|
| 482 |
+
region.latent_col_init : region.latent_col_end,
|
| 483 |
+
]
|
| 484 |
+
region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
|
| 485 |
+
latents[
|
| 486 |
+
:,
|
| 487 |
+
:,
|
| 488 |
+
region.latent_row_init : region.latent_row_end,
|
| 489 |
+
region.latent_col_init : region.latent_col_end,
|
| 490 |
+
] = region_latents
|
| 491 |
+
|
| 492 |
+
if decode_steps:
|
| 493 |
+
steps_images.append(self.decode_latents(latents, cpu_vae))
|
| 494 |
+
|
| 495 |
+
# scale and decode the image latents with vae
|
| 496 |
+
image = self.decode_latents(latents, cpu_vae)
|
| 497 |
+
|
| 498 |
+
output = {"images": image}
|
| 499 |
+
if decode_steps:
|
| 500 |
+
output = {**output, "steps_images": steps_images}
|
| 501 |
+
return output
|
v0.36.0/mixture_tiling.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from enum import Enum
|
| 4 |
+
from typing import List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from tqdm.auto import tqdm
|
| 8 |
+
|
| 9 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 10 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 11 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 12 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 13 |
+
from diffusers.utils import logging
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from ligo.segments import segment
|
| 18 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 19 |
+
except ImportError:
|
| 20 |
+
raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 23 |
+
|
| 24 |
+
EXAMPLE_DOC_STRING = """
|
| 25 |
+
Examples:
|
| 26 |
+
```py
|
| 27 |
+
>>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
|
| 28 |
+
|
| 29 |
+
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
| 30 |
+
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
|
| 31 |
+
>>> pipeline.to("cuda")
|
| 32 |
+
|
| 33 |
+
>>> image = pipeline(
|
| 34 |
+
>>> prompt=[[
|
| 35 |
+
>>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
| 36 |
+
>>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
| 37 |
+
>>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
|
| 38 |
+
>>> ]],
|
| 39 |
+
>>> tile_height=640,
|
| 40 |
+
>>> tile_width=640,
|
| 41 |
+
>>> tile_row_overlap=0,
|
| 42 |
+
>>> tile_col_overlap=256,
|
| 43 |
+
>>> guidance_scale=8,
|
| 44 |
+
>>> seed=7178915308,
|
| 45 |
+
>>> num_inference_steps=50,
|
| 46 |
+
>>> )["images"][0]
|
| 47 |
+
```
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 52 |
+
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
| 53 |
+
|
| 54 |
+
Returns a tuple with:
|
| 55 |
+
- Starting coordinates of rows in pixel space
|
| 56 |
+
- Ending coordinates of rows in pixel space
|
| 57 |
+
- Starting coordinates of columns in pixel space
|
| 58 |
+
- Ending coordinates of columns in pixel space
|
| 59 |
+
"""
|
| 60 |
+
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
| 61 |
+
px_row_end = px_row_init + tile_height
|
| 62 |
+
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
| 63 |
+
px_col_end = px_col_init + tile_width
|
| 64 |
+
return px_row_init, px_row_end, px_col_init, px_col_end
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
| 68 |
+
"""Translates coordinates in pixel space to coordinates in latent space"""
|
| 69 |
+
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 73 |
+
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
| 74 |
+
|
| 75 |
+
Returns a tuple with:
|
| 76 |
+
- Starting coordinates of rows in latent space
|
| 77 |
+
- Ending coordinates of rows in latent space
|
| 78 |
+
- Starting coordinates of columns in latent space
|
| 79 |
+
- Ending coordinates of columns in latent space
|
| 80 |
+
"""
|
| 81 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 82 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 83 |
+
)
|
| 84 |
+
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _tile2latent_exclusive_indices(
|
| 88 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
| 89 |
+
):
|
| 90 |
+
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
| 91 |
+
|
| 92 |
+
Returns a tuple with:
|
| 93 |
+
- Starting coordinates of rows in latent space
|
| 94 |
+
- Ending coordinates of rows in latent space
|
| 95 |
+
- Starting coordinates of columns in latent space
|
| 96 |
+
- Ending coordinates of columns in latent space
|
| 97 |
+
"""
|
| 98 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 99 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 100 |
+
)
|
| 101 |
+
row_segment = segment(row_init, row_end)
|
| 102 |
+
col_segment = segment(col_init, col_end)
|
| 103 |
+
# Iterate over the rest of tiles, clipping the region for the current tile
|
| 104 |
+
for row in range(rows):
|
| 105 |
+
for column in range(columns):
|
| 106 |
+
if row != tile_row and column != tile_col:
|
| 107 |
+
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
| 108 |
+
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 109 |
+
)
|
| 110 |
+
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
| 111 |
+
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
| 112 |
+
# return row_init, row_end, col_init, col_end
|
| 113 |
+
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class StableDiffusionExtrasMixin:
|
| 117 |
+
"""Mixin providing additional convenience method to Stable Diffusion pipelines"""
|
| 118 |
+
|
| 119 |
+
def decode_latents(self, latents, cpu_vae=False):
|
| 120 |
+
"""Decodes a given array of latents into pixel space"""
|
| 121 |
+
# scale and decode the image latents with vae
|
| 122 |
+
if cpu_vae:
|
| 123 |
+
lat = deepcopy(latents).cpu()
|
| 124 |
+
vae = deepcopy(self.vae).cpu()
|
| 125 |
+
else:
|
| 126 |
+
lat = latents
|
| 127 |
+
vae = self.vae
|
| 128 |
+
|
| 129 |
+
lat = 1 / 0.18215 * lat
|
| 130 |
+
image = vae.decode(lat).sample
|
| 131 |
+
|
| 132 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 133 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 134 |
+
|
| 135 |
+
return self.numpy_to_pil(image)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
|
| 139 |
+
def __init__(
|
| 140 |
+
self,
|
| 141 |
+
vae: AutoencoderKL,
|
| 142 |
+
text_encoder: CLIPTextModel,
|
| 143 |
+
tokenizer: CLIPTokenizer,
|
| 144 |
+
unet: UNet2DConditionModel,
|
| 145 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler],
|
| 146 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 147 |
+
feature_extractor: CLIPImageProcessor,
|
| 148 |
+
):
|
| 149 |
+
super().__init__()
|
| 150 |
+
self.register_modules(
|
| 151 |
+
vae=vae,
|
| 152 |
+
text_encoder=text_encoder,
|
| 153 |
+
tokenizer=tokenizer,
|
| 154 |
+
unet=unet,
|
| 155 |
+
scheduler=scheduler,
|
| 156 |
+
safety_checker=safety_checker,
|
| 157 |
+
feature_extractor=feature_extractor,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
class SeedTilesMode(Enum):
|
| 161 |
+
"""Modes in which the latents of a particular tile can be re-seeded"""
|
| 162 |
+
|
| 163 |
+
FULL = "full"
|
| 164 |
+
EXCLUSIVE = "exclusive"
|
| 165 |
+
|
| 166 |
+
@torch.no_grad()
|
| 167 |
+
def __call__(
|
| 168 |
+
self,
|
| 169 |
+
prompt: Union[str, List[List[str]]],
|
| 170 |
+
num_inference_steps: Optional[int] = 50,
|
| 171 |
+
guidance_scale: Optional[float] = 7.5,
|
| 172 |
+
eta: Optional[float] = 0.0,
|
| 173 |
+
seed: Optional[int] = None,
|
| 174 |
+
tile_height: Optional[int] = 512,
|
| 175 |
+
tile_width: Optional[int] = 512,
|
| 176 |
+
tile_row_overlap: Optional[int] = 256,
|
| 177 |
+
tile_col_overlap: Optional[int] = 256,
|
| 178 |
+
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
| 179 |
+
seed_tiles: Optional[List[List[int]]] = None,
|
| 180 |
+
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
| 181 |
+
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
| 182 |
+
cpu_vae: Optional[bool] = False,
|
| 183 |
+
):
|
| 184 |
+
r"""
|
| 185 |
+
Function to run the diffusion pipeline with tiling support.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
|
| 189 |
+
num_inference_steps: number of diffusions steps.
|
| 190 |
+
guidance_scale: classifier-free guidance.
|
| 191 |
+
seed: general random seed to initialize latents.
|
| 192 |
+
tile_height: height in pixels of each grid tile.
|
| 193 |
+
tile_width: width in pixels of each grid tile.
|
| 194 |
+
tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
|
| 195 |
+
tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
|
| 196 |
+
guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
|
| 197 |
+
guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
|
| 198 |
+
seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
|
| 199 |
+
seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overridden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overridden.
|
| 200 |
+
seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overridden using the given seed. Takes priority over seed_tiles.
|
| 201 |
+
cpu_vae: the decoder from latent space to pixel space can require too much GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
|
| 202 |
+
|
| 203 |
+
Examples:
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
A PIL image with the generated image.
|
| 207 |
+
|
| 208 |
+
"""
|
| 209 |
+
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
| 210 |
+
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
| 211 |
+
grid_rows = len(prompt)
|
| 212 |
+
grid_cols = len(prompt[0])
|
| 213 |
+
if not all(len(row) == grid_cols for row in prompt):
|
| 214 |
+
raise ValueError("All prompt rows must have the same number of prompt columns")
|
| 215 |
+
if not isinstance(seed_tiles_mode, str) and (
|
| 216 |
+
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
| 217 |
+
):
|
| 218 |
+
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
| 219 |
+
if isinstance(seed_tiles_mode, str):
|
| 220 |
+
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
| 221 |
+
|
| 222 |
+
modes = [mode.value for mode in self.SeedTilesMode]
|
| 223 |
+
if any(mode not in modes for row in seed_tiles_mode for mode in row):
|
| 224 |
+
raise ValueError(f"Seed tiles mode must be one of {modes}")
|
| 225 |
+
if seed_reroll_regions is None:
|
| 226 |
+
seed_reroll_regions = []
|
| 227 |
+
batch_size = 1
|
| 228 |
+
|
| 229 |
+
# create original noisy latents using the timesteps
|
| 230 |
+
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
| 231 |
+
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
| 232 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
| 233 |
+
generator = torch.Generator("cuda").manual_seed(seed)
|
| 234 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device)
|
| 235 |
+
|
| 236 |
+
# overwrite latents for specific tiles if provided
|
| 237 |
+
if seed_tiles is not None:
|
| 238 |
+
for row in range(grid_rows):
|
| 239 |
+
for col in range(grid_cols):
|
| 240 |
+
if (seed_tile := seed_tiles[row][col]) is not None:
|
| 241 |
+
mode = seed_tiles_mode[row][col]
|
| 242 |
+
if mode == self.SeedTilesMode.FULL.value:
|
| 243 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 244 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 245 |
+
)
|
| 246 |
+
else:
|
| 247 |
+
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
| 248 |
+
row,
|
| 249 |
+
col,
|
| 250 |
+
tile_width,
|
| 251 |
+
tile_height,
|
| 252 |
+
tile_row_overlap,
|
| 253 |
+
tile_col_overlap,
|
| 254 |
+
grid_rows,
|
| 255 |
+
grid_cols,
|
| 256 |
+
)
|
| 257 |
+
tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
|
| 258 |
+
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 259 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 260 |
+
tile_shape, generator=tile_generator, device=self.device
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
# overwrite again for seed reroll regions
|
| 264 |
+
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
| 265 |
+
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
| 266 |
+
row_init, row_end, col_init, col_end
|
| 267 |
+
) # to latent space coordinates
|
| 268 |
+
reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
|
| 269 |
+
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 270 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 271 |
+
region_shape, generator=reroll_generator, device=self.device
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# Prepare scheduler
|
| 275 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 276 |
+
extra_set_kwargs = {}
|
| 277 |
+
if accepts_offset:
|
| 278 |
+
extra_set_kwargs["offset"] = 1
|
| 279 |
+
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
|
| 280 |
+
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
| 281 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 282 |
+
latents = latents * self.scheduler.sigmas[0]
|
| 283 |
+
|
| 284 |
+
# get prompts text embeddings
|
| 285 |
+
text_input = [
|
| 286 |
+
[
|
| 287 |
+
self.tokenizer(
|
| 288 |
+
col,
|
| 289 |
+
padding="max_length",
|
| 290 |
+
max_length=self.tokenizer.model_max_length,
|
| 291 |
+
truncation=True,
|
| 292 |
+
return_tensors="pt",
|
| 293 |
+
)
|
| 294 |
+
for col in row
|
| 295 |
+
]
|
| 296 |
+
for row in prompt
|
| 297 |
+
]
|
| 298 |
+
text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
|
| 299 |
+
|
| 300 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 301 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 302 |
+
# corresponds to doing no classifier free guidance.
|
| 303 |
+
do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
|
| 304 |
+
# get unconditional embeddings for classifier free guidance
|
| 305 |
+
if do_classifier_free_guidance:
|
| 306 |
+
for i in range(grid_rows):
|
| 307 |
+
for j in range(grid_cols):
|
| 308 |
+
max_length = text_input[i][j].input_ids.shape[-1]
|
| 309 |
+
uncond_input = self.tokenizer(
|
| 310 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
| 311 |
+
)
|
| 312 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 313 |
+
|
| 314 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 315 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 316 |
+
# to avoid doing two forward passes
|
| 317 |
+
text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
|
| 318 |
+
|
| 319 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 320 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 321 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 322 |
+
# and should be between [0, 1]
|
| 323 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 324 |
+
extra_step_kwargs = {}
|
| 325 |
+
if accepts_eta:
|
| 326 |
+
extra_step_kwargs["eta"] = eta
|
| 327 |
+
|
| 328 |
+
# Mask for tile weights strength
|
| 329 |
+
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
|
| 330 |
+
|
| 331 |
+
# Diffusion timesteps
|
| 332 |
+
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
| 333 |
+
# Diffuse each tile
|
| 334 |
+
noise_preds = []
|
| 335 |
+
for row in range(grid_rows):
|
| 336 |
+
noise_preds_row = []
|
| 337 |
+
for col in range(grid_cols):
|
| 338 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 339 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 340 |
+
)
|
| 341 |
+
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
| 342 |
+
# expand the latents if we are doing classifier free guidance
|
| 343 |
+
latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
|
| 344 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 345 |
+
# predict the noise residual
|
| 346 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
|
| 347 |
+
"sample"
|
| 348 |
+
]
|
| 349 |
+
# perform guidance
|
| 350 |
+
if do_classifier_free_guidance:
|
| 351 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 352 |
+
guidance = (
|
| 353 |
+
guidance_scale
|
| 354 |
+
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
| 355 |
+
else guidance_scale_tiles[row][col]
|
| 356 |
+
)
|
| 357 |
+
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
| 358 |
+
noise_preds_row.append(noise_pred_tile)
|
| 359 |
+
noise_preds.append(noise_preds_row)
|
| 360 |
+
# Stitch noise predictions for all tiles
|
| 361 |
+
noise_pred = torch.zeros(latents.shape, device=self.device)
|
| 362 |
+
contributors = torch.zeros(latents.shape, device=self.device)
|
| 363 |
+
# Add each tile contribution to overall latents
|
| 364 |
+
for row in range(grid_rows):
|
| 365 |
+
for col in range(grid_cols):
|
| 366 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 367 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 368 |
+
)
|
| 369 |
+
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
| 370 |
+
noise_preds[row][col] * tile_weights
|
| 371 |
+
)
|
| 372 |
+
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
| 373 |
+
# Average overlapping areas with more than 1 contributor
|
| 374 |
+
noise_pred /= contributors
|
| 375 |
+
|
| 376 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 377 |
+
latents = self.scheduler.step(noise_pred, t, latents).prev_sample
|
| 378 |
+
|
| 379 |
+
# scale and decode the image latents with vae
|
| 380 |
+
image = self.decode_latents(latents, cpu_vae)
|
| 381 |
+
|
| 382 |
+
return {"images": image}
|
| 383 |
+
|
| 384 |
+
def _gaussian_weights(self, tile_width, tile_height, nbatches):
|
| 385 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
| 386 |
+
import numpy as np
|
| 387 |
+
from numpy import exp, pi, sqrt
|
| 388 |
+
|
| 389 |
+
latent_width = tile_width // 8
|
| 390 |
+
latent_height = tile_height // 8
|
| 391 |
+
|
| 392 |
+
var = 0.01
|
| 393 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
| 394 |
+
x_probs = [
|
| 395 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
| 396 |
+
for x in range(latent_width)
|
| 397 |
+
]
|
| 398 |
+
midpoint = latent_height / 2
|
| 399 |
+
y_probs = [
|
| 400 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
| 401 |
+
for y in range(latent_height)
|
| 402 |
+
]
|
| 403 |
+
|
| 404 |
+
weights = np.outer(y_probs, x_probs)
|
| 405 |
+
return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
|
v0.36.0/mixture_tiling_sdxl.py
ADDED
|
@@ -0,0 +1,1219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The DEVAIEXP Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from enum import Enum
|
| 17 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from transformers import (
|
| 21 |
+
CLIPTextModel,
|
| 22 |
+
CLIPTextModelWithProjection,
|
| 23 |
+
CLIPTokenizer,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 27 |
+
from diffusers.loaders import (
|
| 28 |
+
FromSingleFileMixin,
|
| 29 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 30 |
+
TextualInversionLoaderMixin,
|
| 31 |
+
)
|
| 32 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 33 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 34 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 35 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 36 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
|
| 37 |
+
from diffusers.utils import (
|
| 38 |
+
USE_PEFT_BACKEND,
|
| 39 |
+
deprecate,
|
| 40 |
+
is_invisible_watermark_available,
|
| 41 |
+
is_torch_xla_available,
|
| 42 |
+
logging,
|
| 43 |
+
replace_example_docstring,
|
| 44 |
+
scale_lora_layers,
|
| 45 |
+
unscale_lora_layers,
|
| 46 |
+
)
|
| 47 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
from ligo.segments import segment
|
| 52 |
+
except ImportError:
|
| 53 |
+
raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
|
| 54 |
+
|
| 55 |
+
if is_invisible_watermark_available():
|
| 56 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 57 |
+
|
| 58 |
+
if is_torch_xla_available():
|
| 59 |
+
import torch_xla.core.xla_model as xm
|
| 60 |
+
|
| 61 |
+
XLA_AVAILABLE = True
|
| 62 |
+
else:
|
| 63 |
+
XLA_AVAILABLE = False
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 67 |
+
|
| 68 |
+
EXAMPLE_DOC_STRING = """
|
| 69 |
+
Examples:
|
| 70 |
+
```py
|
| 71 |
+
>>> import torch
|
| 72 |
+
>>> from diffusers import StableDiffusionXLPipeline
|
| 73 |
+
|
| 74 |
+
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 75 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 76 |
+
... )
|
| 77 |
+
>>> pipe = pipe.to("cuda")
|
| 78 |
+
|
| 79 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 80 |
+
>>> image = pipe(prompt).images[0]
|
| 81 |
+
```
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 86 |
+
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
| 87 |
+
|
| 88 |
+
Returns a tuple with:
|
| 89 |
+
- Starting coordinates of rows in pixel space
|
| 90 |
+
- Ending coordinates of rows in pixel space
|
| 91 |
+
- Starting coordinates of columns in pixel space
|
| 92 |
+
- Ending coordinates of columns in pixel space
|
| 93 |
+
"""
|
| 94 |
+
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
| 95 |
+
px_row_end = px_row_init + tile_height
|
| 96 |
+
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
| 97 |
+
px_col_end = px_col_init + tile_width
|
| 98 |
+
return px_row_init, px_row_end, px_col_init, px_col_end
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
| 102 |
+
"""Translates coordinates in pixel space to coordinates in latent space"""
|
| 103 |
+
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
| 107 |
+
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
| 108 |
+
|
| 109 |
+
Returns a tuple with:
|
| 110 |
+
- Starting coordinates of rows in latent space
|
| 111 |
+
- Ending coordinates of rows in latent space
|
| 112 |
+
- Starting coordinates of columns in latent space
|
| 113 |
+
- Ending coordinates of columns in latent space
|
| 114 |
+
"""
|
| 115 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 116 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 117 |
+
)
|
| 118 |
+
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _tile2latent_exclusive_indices(
|
| 122 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
| 123 |
+
):
|
| 124 |
+
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
| 125 |
+
|
| 126 |
+
Returns a tuple with:
|
| 127 |
+
- Starting coordinates of rows in latent space
|
| 128 |
+
- Ending coordinates of rows in latent space
|
| 129 |
+
- Starting coordinates of columns in latent space
|
| 130 |
+
- Ending coordinates of columns in latent space
|
| 131 |
+
"""
|
| 132 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 133 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 134 |
+
)
|
| 135 |
+
row_segment = segment(row_init, row_end)
|
| 136 |
+
col_segment = segment(col_init, col_end)
|
| 137 |
+
# Iterate over the rest of tiles, clipping the region for the current tile
|
| 138 |
+
for row in range(rows):
|
| 139 |
+
for column in range(columns):
|
| 140 |
+
if row != tile_row and column != tile_col:
|
| 141 |
+
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
| 142 |
+
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 143 |
+
)
|
| 144 |
+
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
| 145 |
+
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
| 146 |
+
# return row_init, row_end, col_init, col_end
|
| 147 |
+
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _get_crops_coords_list(num_rows, num_cols, output_width):
|
| 151 |
+
"""
|
| 152 |
+
Generates a list of lists of `crops_coords_top_left` tuples for focusing on
|
| 153 |
+
different horizontal parts of an image, and repeats this list for the specified
|
| 154 |
+
number of rows in the output structure.
|
| 155 |
+
|
| 156 |
+
This function calculates `crops_coords_top_left` tuples to create horizontal
|
| 157 |
+
focus variations (like left, center, right focus) based on `output_width`
|
| 158 |
+
and `num_cols` (which represents the number of horizontal focus points/columns).
|
| 159 |
+
It then repeats the *list* of these horizontal focus tuples `num_rows` times to
|
| 160 |
+
create the final list of lists output structure.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
num_rows (int): The desired number of rows in the output list of lists.
|
| 164 |
+
This determines how many times the list of horizontal
|
| 165 |
+
focus variations will be repeated.
|
| 166 |
+
num_cols (int): The number of horizontal focus points (columns) to generate.
|
| 167 |
+
This determines how many horizontal focus variations are
|
| 168 |
+
created based on dividing the `output_width`.
|
| 169 |
+
output_width (int): The desired width of the output image.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
list[list[tuple[int, int]]]: A list of lists of tuples. Each inner list
|
| 173 |
+
contains `num_cols` tuples of `(ctop, cleft)`,
|
| 174 |
+
representing horizontal focus points. The outer list
|
| 175 |
+
contains `num_rows` such inner lists.
|
| 176 |
+
"""
|
| 177 |
+
crops_coords_list = []
|
| 178 |
+
if num_cols <= 0:
|
| 179 |
+
crops_coords_list = []
|
| 180 |
+
elif num_cols == 1:
|
| 181 |
+
crops_coords_list = [(0, 0)]
|
| 182 |
+
else:
|
| 183 |
+
section_width = output_width / num_cols
|
| 184 |
+
for i in range(num_cols):
|
| 185 |
+
cleft = int(round(i * section_width))
|
| 186 |
+
crops_coords_list.append((0, cleft))
|
| 187 |
+
|
| 188 |
+
result_list = []
|
| 189 |
+
for _ in range(num_rows):
|
| 190 |
+
result_list.append(list(crops_coords_list))
|
| 191 |
+
|
| 192 |
+
return result_list
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 196 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 197 |
+
r"""
|
| 198 |
+
Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
|
| 199 |
+
Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
|
| 200 |
+
Flawed](https://huggingface.co/papers/2305.08891).
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
noise_cfg (`torch.Tensor`):
|
| 204 |
+
The predicted noise tensor for the guided diffusion process.
|
| 205 |
+
noise_pred_text (`torch.Tensor`):
|
| 206 |
+
The predicted noise tensor for the text-guided diffusion process.
|
| 207 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 208 |
+
A rescale factor applied to the noise predictions.
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
|
| 212 |
+
"""
|
| 213 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 214 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 215 |
+
# rescale the results from guidance (fixes overexposure)
|
| 216 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 217 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 218 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 219 |
+
return noise_cfg
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 223 |
+
def retrieve_timesteps(
|
| 224 |
+
scheduler,
|
| 225 |
+
num_inference_steps: Optional[int] = None,
|
| 226 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 227 |
+
timesteps: Optional[List[int]] = None,
|
| 228 |
+
sigmas: Optional[List[float]] = None,
|
| 229 |
+
**kwargs,
|
| 230 |
+
):
|
| 231 |
+
r"""
|
| 232 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 233 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 234 |
+
|
| 235 |
+
Args:
|
| 236 |
+
scheduler (`SchedulerMixin`):
|
| 237 |
+
The scheduler to get timesteps from.
|
| 238 |
+
num_inference_steps (`int`):
|
| 239 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 240 |
+
must be `None`.
|
| 241 |
+
device (`str` or `torch.device`, *optional*):
|
| 242 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 243 |
+
timesteps (`List[int]`, *optional*):
|
| 244 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 245 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 246 |
+
sigmas (`List[float]`, *optional*):
|
| 247 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 248 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 249 |
+
|
| 250 |
+
Returns:
|
| 251 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 252 |
+
second element is the number of inference steps.
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
if timesteps is not None and sigmas is not None:
|
| 256 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 257 |
+
if timesteps is not None:
|
| 258 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 259 |
+
if not accepts_timesteps:
|
| 260 |
+
raise ValueError(
|
| 261 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 262 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 263 |
+
)
|
| 264 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 265 |
+
timesteps = scheduler.timesteps
|
| 266 |
+
num_inference_steps = len(timesteps)
|
| 267 |
+
elif sigmas is not None:
|
| 268 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 269 |
+
if not accept_sigmas:
|
| 270 |
+
raise ValueError(
|
| 271 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 272 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 273 |
+
)
|
| 274 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 275 |
+
timesteps = scheduler.timesteps
|
| 276 |
+
num_inference_steps = len(timesteps)
|
| 277 |
+
else:
|
| 278 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 279 |
+
timesteps = scheduler.timesteps
|
| 280 |
+
return timesteps, num_inference_steps
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class StableDiffusionXLTilingPipeline(
|
| 284 |
+
DiffusionPipeline,
|
| 285 |
+
StableDiffusionMixin,
|
| 286 |
+
FromSingleFileMixin,
|
| 287 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 288 |
+
TextualInversionLoaderMixin,
|
| 289 |
+
):
|
| 290 |
+
r"""
|
| 291 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
| 292 |
+
|
| 293 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 294 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 295 |
+
|
| 296 |
+
The pipeline also inherits the following loading methods:
|
| 297 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 298 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
| 299 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 300 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
vae ([`AutoencoderKL`]):
|
| 304 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 305 |
+
text_encoder ([`CLIPTextModel`]):
|
| 306 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 307 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 308 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 309 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 310 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 311 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 312 |
+
specifically the
|
| 313 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 314 |
+
variant.
|
| 315 |
+
tokenizer (`CLIPTokenizer`):
|
| 316 |
+
Tokenizer of class
|
| 317 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 318 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 319 |
+
Second Tokenizer of class
|
| 320 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 321 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 322 |
+
scheduler ([`SchedulerMixin`]):
|
| 323 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 324 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 325 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 326 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 327 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 328 |
+
add_watermarker (`bool`, *optional*):
|
| 329 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 330 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 331 |
+
watermarker will be used.
|
| 332 |
+
"""
|
| 333 |
+
|
| 334 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
|
| 335 |
+
_optional_components = [
|
| 336 |
+
"tokenizer",
|
| 337 |
+
"tokenizer_2",
|
| 338 |
+
"text_encoder",
|
| 339 |
+
"text_encoder_2",
|
| 340 |
+
]
|
| 341 |
+
|
| 342 |
+
def __init__(
|
| 343 |
+
self,
|
| 344 |
+
vae: AutoencoderKL,
|
| 345 |
+
text_encoder: CLIPTextModel,
|
| 346 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 347 |
+
tokenizer: CLIPTokenizer,
|
| 348 |
+
tokenizer_2: CLIPTokenizer,
|
| 349 |
+
unet: UNet2DConditionModel,
|
| 350 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 351 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 352 |
+
add_watermarker: Optional[bool] = None,
|
| 353 |
+
):
|
| 354 |
+
super().__init__()
|
| 355 |
+
|
| 356 |
+
self.register_modules(
|
| 357 |
+
vae=vae,
|
| 358 |
+
text_encoder=text_encoder,
|
| 359 |
+
text_encoder_2=text_encoder_2,
|
| 360 |
+
tokenizer=tokenizer,
|
| 361 |
+
tokenizer_2=tokenizer_2,
|
| 362 |
+
unet=unet,
|
| 363 |
+
scheduler=scheduler,
|
| 364 |
+
)
|
| 365 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 366 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 367 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 368 |
+
|
| 369 |
+
self.default_sample_size = (
|
| 370 |
+
self.unet.config.sample_size
|
| 371 |
+
if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size")
|
| 372 |
+
else 128
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 376 |
+
|
| 377 |
+
if add_watermarker:
|
| 378 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 379 |
+
else:
|
| 380 |
+
self.watermark = None
|
| 381 |
+
|
| 382 |
+
class SeedTilesMode(Enum):
|
| 383 |
+
"""Modes in which the latents of a particular tile can be re-seeded"""
|
| 384 |
+
|
| 385 |
+
FULL = "full"
|
| 386 |
+
EXCLUSIVE = "exclusive"
|
| 387 |
+
|
| 388 |
+
def encode_prompt(
|
| 389 |
+
self,
|
| 390 |
+
prompt: str,
|
| 391 |
+
prompt_2: Optional[str] = None,
|
| 392 |
+
device: Optional[torch.device] = None,
|
| 393 |
+
num_images_per_prompt: int = 1,
|
| 394 |
+
do_classifier_free_guidance: bool = True,
|
| 395 |
+
negative_prompt: Optional[str] = None,
|
| 396 |
+
negative_prompt_2: Optional[str] = None,
|
| 397 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 398 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 399 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 400 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 401 |
+
lora_scale: Optional[float] = None,
|
| 402 |
+
clip_skip: Optional[int] = None,
|
| 403 |
+
):
|
| 404 |
+
r"""
|
| 405 |
+
Encodes the prompt into text encoder hidden states.
|
| 406 |
+
|
| 407 |
+
Args:
|
| 408 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 409 |
+
prompt to be encoded
|
| 410 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 411 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 412 |
+
used in both text-encoders
|
| 413 |
+
device: (`torch.device`):
|
| 414 |
+
torch device
|
| 415 |
+
num_images_per_prompt (`int`):
|
| 416 |
+
number of images that should be generated per prompt
|
| 417 |
+
do_classifier_free_guidance (`bool`):
|
| 418 |
+
whether to use classifier free guidance or not
|
| 419 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 420 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 421 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 422 |
+
less than `1`).
|
| 423 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 424 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 425 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 426 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 427 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 428 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 429 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 430 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 431 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 432 |
+
argument.
|
| 433 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 434 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 435 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 436 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 437 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 438 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 439 |
+
input argument.
|
| 440 |
+
lora_scale (`float`, *optional*):
|
| 441 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 442 |
+
clip_skip (`int`, *optional*):
|
| 443 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 444 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 445 |
+
"""
|
| 446 |
+
device = device or self._execution_device
|
| 447 |
+
|
| 448 |
+
# set lora scale so that monkey patched LoRA
|
| 449 |
+
# function of text encoder can correctly access it
|
| 450 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 451 |
+
self._lora_scale = lora_scale
|
| 452 |
+
|
| 453 |
+
# dynamically adjust the LoRA scale
|
| 454 |
+
if self.text_encoder is not None:
|
| 455 |
+
if not USE_PEFT_BACKEND:
|
| 456 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 457 |
+
else:
|
| 458 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 459 |
+
|
| 460 |
+
if self.text_encoder_2 is not None:
|
| 461 |
+
if not USE_PEFT_BACKEND:
|
| 462 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 463 |
+
else:
|
| 464 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 465 |
+
|
| 466 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 467 |
+
|
| 468 |
+
if prompt is not None:
|
| 469 |
+
batch_size = len(prompt)
|
| 470 |
+
else:
|
| 471 |
+
batch_size = prompt_embeds.shape[0]
|
| 472 |
+
|
| 473 |
+
# Define tokenizers and text encoders
|
| 474 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 475 |
+
text_encoders = (
|
| 476 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 477 |
+
)
|
| 478 |
+
|
| 479 |
+
if prompt_embeds is None:
|
| 480 |
+
prompt_2 = prompt_2 or prompt
|
| 481 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 482 |
+
|
| 483 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 484 |
+
prompt_embeds_list = []
|
| 485 |
+
prompts = [prompt, prompt_2]
|
| 486 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 487 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 488 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 489 |
+
|
| 490 |
+
text_inputs = tokenizer(
|
| 491 |
+
prompt,
|
| 492 |
+
padding="max_length",
|
| 493 |
+
max_length=tokenizer.model_max_length,
|
| 494 |
+
truncation=True,
|
| 495 |
+
return_tensors="pt",
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
text_input_ids = text_inputs.input_ids
|
| 499 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 500 |
+
|
| 501 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 502 |
+
text_input_ids, untruncated_ids
|
| 503 |
+
):
|
| 504 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 505 |
+
logger.warning(
|
| 506 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 507 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 511 |
+
|
| 512 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 513 |
+
if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2:
|
| 514 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 515 |
+
|
| 516 |
+
if clip_skip is None:
|
| 517 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 518 |
+
else:
|
| 519 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 520 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 521 |
+
|
| 522 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 523 |
+
|
| 524 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 525 |
+
|
| 526 |
+
# get unconditional embeddings for classifier free guidance
|
| 527 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 528 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 529 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 530 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 531 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 532 |
+
negative_prompt = negative_prompt or ""
|
| 533 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 534 |
+
|
| 535 |
+
# normalize str to list
|
| 536 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 537 |
+
negative_prompt_2 = (
|
| 538 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
uncond_tokens: List[str]
|
| 542 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 543 |
+
raise TypeError(
|
| 544 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 545 |
+
f" {type(prompt)}."
|
| 546 |
+
)
|
| 547 |
+
elif batch_size != len(negative_prompt):
|
| 548 |
+
raise ValueError(
|
| 549 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 550 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 551 |
+
" the batch size of `prompt`."
|
| 552 |
+
)
|
| 553 |
+
else:
|
| 554 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 555 |
+
|
| 556 |
+
negative_prompt_embeds_list = []
|
| 557 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 558 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 559 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 560 |
+
|
| 561 |
+
max_length = prompt_embeds.shape[1]
|
| 562 |
+
uncond_input = tokenizer(
|
| 563 |
+
negative_prompt,
|
| 564 |
+
padding="max_length",
|
| 565 |
+
max_length=max_length,
|
| 566 |
+
truncation=True,
|
| 567 |
+
return_tensors="pt",
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
negative_prompt_embeds = text_encoder(
|
| 571 |
+
uncond_input.input_ids.to(device),
|
| 572 |
+
output_hidden_states=True,
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 576 |
+
if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2:
|
| 577 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 578 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 579 |
+
|
| 580 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 581 |
+
|
| 582 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 583 |
+
|
| 584 |
+
if self.text_encoder_2 is not None:
|
| 585 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 586 |
+
else:
|
| 587 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 588 |
+
|
| 589 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 590 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 591 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 592 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 593 |
+
|
| 594 |
+
if do_classifier_free_guidance:
|
| 595 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 596 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 597 |
+
|
| 598 |
+
if self.text_encoder_2 is not None:
|
| 599 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 600 |
+
else:
|
| 601 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 602 |
+
|
| 603 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 604 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 605 |
+
|
| 606 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 607 |
+
bs_embed * num_images_per_prompt, -1
|
| 608 |
+
)
|
| 609 |
+
if do_classifier_free_guidance:
|
| 610 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 611 |
+
bs_embed * num_images_per_prompt, -1
|
| 612 |
+
)
|
| 613 |
+
|
| 614 |
+
if self.text_encoder is not None:
|
| 615 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 616 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 617 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 618 |
+
|
| 619 |
+
if self.text_encoder_2 is not None:
|
| 620 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 621 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 622 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 623 |
+
|
| 624 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 625 |
+
|
| 626 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 627 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 628 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 629 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 630 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 631 |
+
# and should be between [0, 1]
|
| 632 |
+
|
| 633 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 634 |
+
extra_step_kwargs = {}
|
| 635 |
+
if accepts_eta:
|
| 636 |
+
extra_step_kwargs["eta"] = eta
|
| 637 |
+
|
| 638 |
+
# check if the scheduler accepts generator
|
| 639 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 640 |
+
if accepts_generator:
|
| 641 |
+
extra_step_kwargs["generator"] = generator
|
| 642 |
+
return extra_step_kwargs
|
| 643 |
+
|
| 644 |
+
def check_inputs(self, prompt, height, width, grid_cols, seed_tiles_mode, tiles_mode):
|
| 645 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 646 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 647 |
+
|
| 648 |
+
if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 649 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 650 |
+
|
| 651 |
+
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
| 652 |
+
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
| 653 |
+
|
| 654 |
+
if not all(len(row) == grid_cols for row in prompt):
|
| 655 |
+
raise ValueError("All prompt rows must have the same number of prompt columns")
|
| 656 |
+
|
| 657 |
+
if not isinstance(seed_tiles_mode, str) and (
|
| 658 |
+
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
| 659 |
+
):
|
| 660 |
+
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
| 661 |
+
|
| 662 |
+
if any(mode not in tiles_mode for row in seed_tiles_mode for mode in row):
|
| 663 |
+
raise ValueError(f"Seed tiles mode must be one of {tiles_mode}")
|
| 664 |
+
|
| 665 |
+
def _get_add_time_ids(
|
| 666 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
| 667 |
+
):
|
| 668 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 669 |
+
|
| 670 |
+
passed_add_embed_dim = (
|
| 671 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 672 |
+
)
|
| 673 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 674 |
+
|
| 675 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 676 |
+
raise ValueError(
|
| 677 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 681 |
+
return add_time_ids
|
| 682 |
+
|
| 683 |
+
def _gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype):
|
| 684 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
| 685 |
+
import numpy as np
|
| 686 |
+
from numpy import exp, pi, sqrt
|
| 687 |
+
|
| 688 |
+
latent_width = tile_width // 8
|
| 689 |
+
latent_height = tile_height // 8
|
| 690 |
+
|
| 691 |
+
var = 0.01
|
| 692 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
| 693 |
+
x_probs = [
|
| 694 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
| 695 |
+
for x in range(latent_width)
|
| 696 |
+
]
|
| 697 |
+
midpoint = latent_height / 2
|
| 698 |
+
y_probs = [
|
| 699 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
| 700 |
+
for y in range(latent_height)
|
| 701 |
+
]
|
| 702 |
+
|
| 703 |
+
weights_np = np.outer(y_probs, x_probs)
|
| 704 |
+
weights_torch = torch.tensor(weights_np, device=device)
|
| 705 |
+
weights_torch = weights_torch.to(dtype)
|
| 706 |
+
return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
|
| 707 |
+
|
| 708 |
+
def upcast_vae(self):
|
| 709 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 710 |
+
self.vae.to(dtype=torch.float32)
|
| 711 |
+
|
| 712 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 713 |
+
def get_guidance_scale_embedding(
|
| 714 |
+
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
|
| 715 |
+
) -> torch.Tensor:
|
| 716 |
+
"""
|
| 717 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 718 |
+
|
| 719 |
+
Args:
|
| 720 |
+
w (`torch.Tensor`):
|
| 721 |
+
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
|
| 722 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 723 |
+
Dimension of the embeddings to generate.
|
| 724 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
|
| 725 |
+
Data type of the generated embeddings.
|
| 726 |
+
|
| 727 |
+
Returns:
|
| 728 |
+
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
|
| 729 |
+
"""
|
| 730 |
+
assert len(w.shape) == 1
|
| 731 |
+
w = w * 1000.0
|
| 732 |
+
|
| 733 |
+
half_dim = embedding_dim // 2
|
| 734 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 735 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 736 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 737 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 738 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 739 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 740 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 741 |
+
return emb
|
| 742 |
+
|
| 743 |
+
@property
|
| 744 |
+
def guidance_scale(self):
|
| 745 |
+
return self._guidance_scale
|
| 746 |
+
|
| 747 |
+
@property
|
| 748 |
+
def clip_skip(self):
|
| 749 |
+
return self._clip_skip
|
| 750 |
+
|
| 751 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 752 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 753 |
+
# corresponds to doing no classifier free guidance.
|
| 754 |
+
@property
|
| 755 |
+
def do_classifier_free_guidance(self):
|
| 756 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 757 |
+
|
| 758 |
+
@property
|
| 759 |
+
def cross_attention_kwargs(self):
|
| 760 |
+
return self._cross_attention_kwargs
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def num_timesteps(self):
|
| 764 |
+
return self._num_timesteps
|
| 765 |
+
|
| 766 |
+
@property
|
| 767 |
+
def interrupt(self):
|
| 768 |
+
return self._interrupt
|
| 769 |
+
|
| 770 |
+
@torch.no_grad()
|
| 771 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 772 |
+
def __call__(
|
| 773 |
+
self,
|
| 774 |
+
prompt: Union[str, List[str]] = None,
|
| 775 |
+
height: Optional[int] = None,
|
| 776 |
+
width: Optional[int] = None,
|
| 777 |
+
num_inference_steps: int = 50,
|
| 778 |
+
guidance_scale: float = 5.0,
|
| 779 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 780 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 781 |
+
eta: float = 0.0,
|
| 782 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 783 |
+
output_type: Optional[str] = "pil",
|
| 784 |
+
return_dict: bool = True,
|
| 785 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 786 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 787 |
+
crops_coords_top_left: Optional[List[List[Tuple[int, int]]]] = None,
|
| 788 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 789 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 790 |
+
negative_crops_coords_top_left: Optional[List[List[Tuple[int, int]]]] = None,
|
| 791 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 792 |
+
clip_skip: Optional[int] = None,
|
| 793 |
+
tile_height: Optional[int] = 1024,
|
| 794 |
+
tile_width: Optional[int] = 1024,
|
| 795 |
+
tile_row_overlap: Optional[int] = 128,
|
| 796 |
+
tile_col_overlap: Optional[int] = 128,
|
| 797 |
+
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
| 798 |
+
seed_tiles: Optional[List[List[int]]] = None,
|
| 799 |
+
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
| 800 |
+
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
| 801 |
+
**kwargs,
|
| 802 |
+
):
|
| 803 |
+
r"""
|
| 804 |
+
Function invoked when calling the pipeline for generation.
|
| 805 |
+
|
| 806 |
+
Args:
|
| 807 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 808 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 809 |
+
instead.
|
| 810 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 811 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 812 |
+
Anything below 512 pixels won't work well for
|
| 813 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 814 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 815 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 816 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 817 |
+
Anything below 512 pixels won't work well for
|
| 818 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 819 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 820 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 821 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 822 |
+
expense of slower inference.
|
| 823 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 824 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 825 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 826 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 827 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 828 |
+
usually at the expense of lower image quality.
|
| 829 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 830 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 831 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 832 |
+
less than `1`).
|
| 833 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 834 |
+
The number of images to generate per prompt.
|
| 835 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 836 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 837 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 838 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 839 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 840 |
+
to make generation deterministic.
|
| 841 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 842 |
+
The output format of the generate image. Choose between
|
| 843 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 844 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 845 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 846 |
+
of a plain tuple.
|
| 847 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 848 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 849 |
+
`self.processor` in
|
| 850 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 851 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 852 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 853 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 854 |
+
explained in section 2.2 of
|
| 855 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 856 |
+
crops_coords_top_left (`List[List[Tuple[int, int]]]`, *optional*, defaults to (0, 0)):
|
| 857 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 858 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 859 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 860 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 861 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 862 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 863 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 864 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 865 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 866 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 867 |
+
micro-conditioning as explained in section 2.2 of
|
| 868 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 869 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 870 |
+
negative_crops_coords_top_left (`List[List[Tuple[int, int]]]`, *optional*, defaults to (0, 0)):
|
| 871 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 872 |
+
micro-conditioning as explained in section 2.2 of
|
| 873 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 874 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 875 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 876 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 877 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 878 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 879 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 880 |
+
tile_height (`int`, *optional*, defaults to 1024):
|
| 881 |
+
Height of each grid tile in pixels.
|
| 882 |
+
tile_width (`int`, *optional*, defaults to 1024):
|
| 883 |
+
Width of each grid tile in pixels.
|
| 884 |
+
tile_row_overlap (`int`, *optional*, defaults to 128):
|
| 885 |
+
Number of overlapping pixels between tiles in consecutive rows.
|
| 886 |
+
tile_col_overlap (`int`, *optional*, defaults to 128):
|
| 887 |
+
Number of overlapping pixels between tiles in consecutive columns.
|
| 888 |
+
guidance_scale_tiles (`List[List[float]]`, *optional*):
|
| 889 |
+
Specific weights for classifier-free guidance in each tile. If `None`, the value provided in `guidance_scale` will be used.
|
| 890 |
+
seed_tiles (`List[List[int]]`, *optional*):
|
| 891 |
+
Specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard `generator` parameter.
|
| 892 |
+
seed_tiles_mode (`Union[str, List[List[str]]]`, *optional*, defaults to `"full"`):
|
| 893 |
+
Mode for seeding tiles, can be `"full"` or `"exclusive"`. If `"full"`, all the latents affected by the tile will be overridden. If `"exclusive"`, only the latents that are exclusively affected by this tile (and no other tiles) will be overridden.
|
| 894 |
+
seed_reroll_regions (`List[Tuple[int, int, int, int, int]]`, *optional*):
|
| 895 |
+
A list of tuples in the form of `(start_row, end_row, start_column, end_column, seed)` defining regions in pixel space for which the latents will be overridden using the given seed. Takes priority over `seed_tiles`.
|
| 896 |
+
**kwargs (`Dict[str, Any]`, *optional*):
|
| 897 |
+
Additional optional keyword arguments to be passed to the `unet.__call__` and `scheduler.step` functions.
|
| 898 |
+
|
| 899 |
+
Examples:
|
| 900 |
+
|
| 901 |
+
Returns:
|
| 902 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] or `tuple`:
|
| 903 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] if `return_dict` is True, otherwise a
|
| 904 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
| 905 |
+
"""
|
| 906 |
+
|
| 907 |
+
# 0. Default height and width to unet
|
| 908 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 909 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 910 |
+
|
| 911 |
+
original_size = original_size or (height, width)
|
| 912 |
+
target_size = target_size or (height, width)
|
| 913 |
+
negative_original_size = negative_original_size or (height, width)
|
| 914 |
+
negative_target_size = negative_target_size or (height, width)
|
| 915 |
+
|
| 916 |
+
self._guidance_scale = guidance_scale
|
| 917 |
+
self._clip_skip = clip_skip
|
| 918 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 919 |
+
self._interrupt = False
|
| 920 |
+
|
| 921 |
+
grid_rows = len(prompt)
|
| 922 |
+
grid_cols = len(prompt[0])
|
| 923 |
+
|
| 924 |
+
tiles_mode = [mode.value for mode in self.SeedTilesMode]
|
| 925 |
+
|
| 926 |
+
if isinstance(seed_tiles_mode, str):
|
| 927 |
+
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
| 928 |
+
|
| 929 |
+
# 1. Check inputs. Raise error if not correct
|
| 930 |
+
self.check_inputs(
|
| 931 |
+
prompt,
|
| 932 |
+
height,
|
| 933 |
+
width,
|
| 934 |
+
grid_cols,
|
| 935 |
+
seed_tiles_mode,
|
| 936 |
+
tiles_mode,
|
| 937 |
+
)
|
| 938 |
+
|
| 939 |
+
if seed_reroll_regions is None:
|
| 940 |
+
seed_reroll_regions = []
|
| 941 |
+
|
| 942 |
+
batch_size = 1
|
| 943 |
+
|
| 944 |
+
device = self._execution_device
|
| 945 |
+
|
| 946 |
+
# update crops coords list
|
| 947 |
+
crops_coords_top_left = _get_crops_coords_list(grid_rows, grid_cols, tile_width)
|
| 948 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 949 |
+
negative_crops_coords_top_left = _get_crops_coords_list(grid_rows, grid_cols, tile_width)
|
| 950 |
+
|
| 951 |
+
# update height and width tile size and tile overlap size
|
| 952 |
+
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
| 953 |
+
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
| 954 |
+
|
| 955 |
+
# 3. Encode input prompt
|
| 956 |
+
lora_scale = (
|
| 957 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 958 |
+
)
|
| 959 |
+
text_embeddings = [
|
| 960 |
+
[
|
| 961 |
+
self.encode_prompt(
|
| 962 |
+
prompt=col,
|
| 963 |
+
device=device,
|
| 964 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 965 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 966 |
+
negative_prompt=negative_prompt,
|
| 967 |
+
prompt_embeds=None,
|
| 968 |
+
negative_prompt_embeds=None,
|
| 969 |
+
pooled_prompt_embeds=None,
|
| 970 |
+
negative_pooled_prompt_embeds=None,
|
| 971 |
+
lora_scale=lora_scale,
|
| 972 |
+
clip_skip=self.clip_skip,
|
| 973 |
+
)
|
| 974 |
+
for col in row
|
| 975 |
+
]
|
| 976 |
+
for row in prompt
|
| 977 |
+
]
|
| 978 |
+
|
| 979 |
+
# 3. Prepare latents
|
| 980 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
| 981 |
+
dtype = text_embeddings[0][0][0].dtype
|
| 982 |
+
latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype)
|
| 983 |
+
|
| 984 |
+
# 3.1 overwrite latents for specific tiles if provided
|
| 985 |
+
if seed_tiles is not None:
|
| 986 |
+
for row in range(grid_rows):
|
| 987 |
+
for col in range(grid_cols):
|
| 988 |
+
if (seed_tile := seed_tiles[row][col]) is not None:
|
| 989 |
+
mode = seed_tiles_mode[row][col]
|
| 990 |
+
if mode == self.SeedTilesMode.FULL.value:
|
| 991 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
| 992 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 993 |
+
)
|
| 994 |
+
else:
|
| 995 |
+
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
| 996 |
+
row,
|
| 997 |
+
col,
|
| 998 |
+
tile_width,
|
| 999 |
+
tile_height,
|
| 1000 |
+
tile_row_overlap,
|
| 1001 |
+
tile_col_overlap,
|
| 1002 |
+
grid_rows,
|
| 1003 |
+
grid_cols,
|
| 1004 |
+
)
|
| 1005 |
+
tile_generator = torch.Generator(device).manual_seed(seed_tile)
|
| 1006 |
+
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 1007 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 1008 |
+
tile_shape, generator=tile_generator, device=device
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
# 3.2 overwrite again for seed reroll regions
|
| 1012 |
+
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
| 1013 |
+
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
| 1014 |
+
row_init, row_end, col_init, col_end
|
| 1015 |
+
) # to latent space coordinates
|
| 1016 |
+
reroll_generator = torch.Generator(device).manual_seed(seed_reroll)
|
| 1017 |
+
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
| 1018 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
| 1019 |
+
region_shape, generator=reroll_generator, device=device
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
# 4. Prepare timesteps
|
| 1023 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 1024 |
+
extra_set_kwargs = {}
|
| 1025 |
+
if accepts_offset:
|
| 1026 |
+
extra_set_kwargs["offset"] = 1
|
| 1027 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
| 1028 |
+
self.scheduler, num_inference_steps, device, None, None, **extra_set_kwargs
|
| 1029 |
+
)
|
| 1030 |
+
|
| 1031 |
+
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
| 1032 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 1033 |
+
latents = latents * self.scheduler.sigmas[0]
|
| 1034 |
+
|
| 1035 |
+
# 5. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1036 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1037 |
+
|
| 1038 |
+
# 6. Prepare added time ids & embeddings
|
| 1039 |
+
# text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 1040 |
+
embeddings_and_added_time = []
|
| 1041 |
+
for row in range(grid_rows):
|
| 1042 |
+
addition_embed_type_row = []
|
| 1043 |
+
for col in range(grid_cols):
|
| 1044 |
+
# extract generated values
|
| 1045 |
+
prompt_embeds = text_embeddings[row][col][0]
|
| 1046 |
+
negative_prompt_embeds = text_embeddings[row][col][1]
|
| 1047 |
+
pooled_prompt_embeds = text_embeddings[row][col][2]
|
| 1048 |
+
negative_pooled_prompt_embeds = text_embeddings[row][col][3]
|
| 1049 |
+
|
| 1050 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1051 |
+
if self.text_encoder_2 is None:
|
| 1052 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1053 |
+
else:
|
| 1054 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1055 |
+
add_time_ids = self._get_add_time_ids(
|
| 1056 |
+
original_size,
|
| 1057 |
+
crops_coords_top_left[row][col],
|
| 1058 |
+
target_size,
|
| 1059 |
+
dtype=prompt_embeds.dtype,
|
| 1060 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1061 |
+
)
|
| 1062 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1063 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1064 |
+
negative_original_size,
|
| 1065 |
+
negative_crops_coords_top_left[row][col],
|
| 1066 |
+
negative_target_size,
|
| 1067 |
+
dtype=prompt_embeds.dtype,
|
| 1068 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1069 |
+
)
|
| 1070 |
+
else:
|
| 1071 |
+
negative_add_time_ids = add_time_ids
|
| 1072 |
+
|
| 1073 |
+
if self.do_classifier_free_guidance:
|
| 1074 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1075 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1076 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 1077 |
+
|
| 1078 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1079 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1080 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1081 |
+
addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids))
|
| 1082 |
+
embeddings_and_added_time.append(addition_embed_type_row)
|
| 1083 |
+
|
| 1084 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1085 |
+
|
| 1086 |
+
# 7. Mask for tile weights strength
|
| 1087 |
+
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size, device, torch.float32)
|
| 1088 |
+
|
| 1089 |
+
# 8. Denoising loop
|
| 1090 |
+
self._num_timesteps = len(timesteps)
|
| 1091 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1092 |
+
for i, t in enumerate(timesteps):
|
| 1093 |
+
# Diffuse each tile
|
| 1094 |
+
noise_preds = []
|
| 1095 |
+
for row in range(grid_rows):
|
| 1096 |
+
noise_preds_row = []
|
| 1097 |
+
for col in range(grid_cols):
|
| 1098 |
+
if self.interrupt:
|
| 1099 |
+
continue
|
| 1100 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 1101 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 1102 |
+
)
|
| 1103 |
+
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
| 1104 |
+
# expand the latents if we are doing classifier free guidance
|
| 1105 |
+
latent_model_input = (
|
| 1106 |
+
torch.cat([tile_latents] * 2) if self.do_classifier_free_guidance else tile_latents
|
| 1107 |
+
)
|
| 1108 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1109 |
+
|
| 1110 |
+
# predict the noise residual
|
| 1111 |
+
added_cond_kwargs = {
|
| 1112 |
+
"text_embeds": embeddings_and_added_time[row][col][1],
|
| 1113 |
+
"time_ids": embeddings_and_added_time[row][col][2],
|
| 1114 |
+
}
|
| 1115 |
+
with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype != self.unet.dtype):
|
| 1116 |
+
noise_pred = self.unet(
|
| 1117 |
+
latent_model_input,
|
| 1118 |
+
t,
|
| 1119 |
+
encoder_hidden_states=embeddings_and_added_time[row][col][0],
|
| 1120 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1121 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1122 |
+
return_dict=False,
|
| 1123 |
+
)[0]
|
| 1124 |
+
|
| 1125 |
+
# perform guidance
|
| 1126 |
+
if self.do_classifier_free_guidance:
|
| 1127 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1128 |
+
guidance = (
|
| 1129 |
+
guidance_scale
|
| 1130 |
+
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
| 1131 |
+
else guidance_scale_tiles[row][col]
|
| 1132 |
+
)
|
| 1133 |
+
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
| 1134 |
+
noise_preds_row.append(noise_pred_tile)
|
| 1135 |
+
noise_preds.append(noise_preds_row)
|
| 1136 |
+
|
| 1137 |
+
# Stitch noise predictions for all tiles
|
| 1138 |
+
noise_pred = torch.zeros(latents.shape, device=device)
|
| 1139 |
+
contributors = torch.zeros(latents.shape, device=device)
|
| 1140 |
+
|
| 1141 |
+
# Add each tile contribution to overall latents
|
| 1142 |
+
for row in range(grid_rows):
|
| 1143 |
+
for col in range(grid_cols):
|
| 1144 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 1145 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
| 1146 |
+
)
|
| 1147 |
+
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
| 1148 |
+
noise_preds[row][col] * tile_weights
|
| 1149 |
+
)
|
| 1150 |
+
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
| 1151 |
+
|
| 1152 |
+
# Average overlapping areas with more than 1 contributor
|
| 1153 |
+
noise_pred /= contributors
|
| 1154 |
+
noise_pred = noise_pred.to(dtype)
|
| 1155 |
+
|
| 1156 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1157 |
+
latents_dtype = latents.dtype
|
| 1158 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1159 |
+
if latents.dtype != latents_dtype:
|
| 1160 |
+
if torch.backends.mps.is_available():
|
| 1161 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 1162 |
+
latents = latents.to(latents_dtype)
|
| 1163 |
+
|
| 1164 |
+
# update progress bar
|
| 1165 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1166 |
+
progress_bar.update()
|
| 1167 |
+
|
| 1168 |
+
if XLA_AVAILABLE:
|
| 1169 |
+
xm.mark_step()
|
| 1170 |
+
|
| 1171 |
+
if not output_type == "latent":
|
| 1172 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1173 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1174 |
+
|
| 1175 |
+
if needs_upcasting:
|
| 1176 |
+
self.upcast_vae()
|
| 1177 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1178 |
+
elif latents.dtype != self.vae.dtype:
|
| 1179 |
+
if torch.backends.mps.is_available():
|
| 1180 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 1181 |
+
self.vae = self.vae.to(latents.dtype)
|
| 1182 |
+
|
| 1183 |
+
# unscale/denormalize the latents
|
| 1184 |
+
# denormalize with the mean and std if available and not None
|
| 1185 |
+
has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
|
| 1186 |
+
has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
|
| 1187 |
+
if has_latents_mean and has_latents_std:
|
| 1188 |
+
latents_mean = (
|
| 1189 |
+
torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 1190 |
+
)
|
| 1191 |
+
latents_std = (
|
| 1192 |
+
torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 1193 |
+
)
|
| 1194 |
+
latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
|
| 1195 |
+
else:
|
| 1196 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1197 |
+
|
| 1198 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1199 |
+
|
| 1200 |
+
# cast back to fp16 if needed
|
| 1201 |
+
if needs_upcasting:
|
| 1202 |
+
self.vae.to(dtype=torch.float16)
|
| 1203 |
+
else:
|
| 1204 |
+
image = latents
|
| 1205 |
+
|
| 1206 |
+
if not output_type == "latent":
|
| 1207 |
+
# apply watermark if available
|
| 1208 |
+
if self.watermark is not None:
|
| 1209 |
+
image = self.watermark.apply_watermark(image)
|
| 1210 |
+
|
| 1211 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1212 |
+
|
| 1213 |
+
# Offload all models
|
| 1214 |
+
self.maybe_free_model_hooks()
|
| 1215 |
+
|
| 1216 |
+
if not return_dict:
|
| 1217 |
+
return (image,)
|
| 1218 |
+
|
| 1219 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.36.0/mod_controlnet_tile_sr_sdxl.py
ADDED
|
@@ -0,0 +1,1845 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The DEVAIEXP Team and The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from enum import Enum
|
| 17 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
from PIL import Image
|
| 23 |
+
from transformers import (
|
| 24 |
+
CLIPTextModel,
|
| 25 |
+
CLIPTextModelWithProjection,
|
| 26 |
+
CLIPTokenizer,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import (
|
| 31 |
+
FromSingleFileMixin,
|
| 32 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 33 |
+
TextualInversionLoaderMixin,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.models import (
|
| 36 |
+
AutoencoderKL,
|
| 37 |
+
ControlNetModel,
|
| 38 |
+
ControlNetUnionModel,
|
| 39 |
+
MultiControlNetModel,
|
| 40 |
+
UNet2DConditionModel,
|
| 41 |
+
)
|
| 42 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 43 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 44 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 45 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
|
| 46 |
+
from diffusers.utils import (
|
| 47 |
+
USE_PEFT_BACKEND,
|
| 48 |
+
deprecate,
|
| 49 |
+
logging,
|
| 50 |
+
replace_example_docstring,
|
| 51 |
+
scale_lora_layers,
|
| 52 |
+
unscale_lora_layers,
|
| 53 |
+
)
|
| 54 |
+
from diffusers.utils.import_utils import is_invisible_watermark_available
|
| 55 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
if is_invisible_watermark_available():
|
| 59 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 60 |
+
|
| 61 |
+
from diffusers.utils import is_torch_xla_available
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if is_torch_xla_available():
|
| 65 |
+
import torch_xla.core.xla_model as xm
|
| 66 |
+
|
| 67 |
+
XLA_AVAILABLE = True
|
| 68 |
+
else:
|
| 69 |
+
XLA_AVAILABLE = False
|
| 70 |
+
|
| 71 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
EXAMPLE_DOC_STRING = """
|
| 75 |
+
Examples:
|
| 76 |
+
```py
|
| 77 |
+
import torch
|
| 78 |
+
from diffusers import DiffusionPipeline, ControlNetUnionModel, AutoencoderKL, UniPCMultistepScheduler
|
| 79 |
+
from diffusers.utils import load_image
|
| 80 |
+
from PIL import Image
|
| 81 |
+
|
| 82 |
+
device = "cuda"
|
| 83 |
+
|
| 84 |
+
# Initialize the models and pipeline
|
| 85 |
+
controlnet = ControlNetUnionModel.from_pretrained(
|
| 86 |
+
"brad-twinkl/controlnet-union-sdxl-1.0-promax", torch_dtype=torch.float16
|
| 87 |
+
).to(device=device)
|
| 88 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device=device)
|
| 89 |
+
|
| 90 |
+
model_id = "SG161222/RealVisXL_V5.0"
|
| 91 |
+
pipe = StableDiffusionXLControlNetTileSRPipeline.from_pretrained(
|
| 92 |
+
model_id, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
|
| 93 |
+
).to(device)
|
| 94 |
+
|
| 95 |
+
pipe.enable_model_cpu_offload() # << Enable this if you have limited VRAM
|
| 96 |
+
pipe.enable_vae_tiling() # << Enable this if you have limited VRAM
|
| 97 |
+
pipe.enable_vae_slicing() # << Enable this if you have limited VRAM
|
| 98 |
+
|
| 99 |
+
# Set selected scheduler
|
| 100 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 101 |
+
|
| 102 |
+
# Load image
|
| 103 |
+
control_image = load_image("https://huggingface.co/datasets/DEVAIEXP/assets/resolve/main/1.jpg")
|
| 104 |
+
original_height = control_image.height
|
| 105 |
+
original_width = control_image.width
|
| 106 |
+
print(f"Current resolution: H:{original_height} x W:{original_width}")
|
| 107 |
+
|
| 108 |
+
# Pre-upscale image for tiling
|
| 109 |
+
resolution = 4096
|
| 110 |
+
tile_gaussian_sigma = 0.3
|
| 111 |
+
max_tile_size = 1024 # or 1280
|
| 112 |
+
|
| 113 |
+
current_size = max(control_image.size)
|
| 114 |
+
scale_factor = max(2, resolution / current_size)
|
| 115 |
+
new_size = (int(control_image.width * scale_factor), int(control_image.height * scale_factor))
|
| 116 |
+
image = control_image.resize(new_size, Image.LANCZOS)
|
| 117 |
+
|
| 118 |
+
# Update target height and width
|
| 119 |
+
target_height = image.height
|
| 120 |
+
target_width = image.width
|
| 121 |
+
print(f"Target resolution: H:{target_height} x W:{target_width}")
|
| 122 |
+
|
| 123 |
+
# Calculate overlap size
|
| 124 |
+
normal_tile_overlap, border_tile_overlap = calculate_overlap(target_width, target_height)
|
| 125 |
+
|
| 126 |
+
# Set other params
|
| 127 |
+
tile_weighting_method = TileWeightingMethod.COSINE.value
|
| 128 |
+
guidance_scale = 4
|
| 129 |
+
num_inference_steps = 35
|
| 130 |
+
denoising_strenght = 0.65
|
| 131 |
+
controlnet_strength = 1.0
|
| 132 |
+
prompt = "high-quality, noise-free edges, high quality, 4k, hd, 8k"
|
| 133 |
+
negative_prompt = "blurry, pixelated, noisy, low resolution, artifacts, poor details"
|
| 134 |
+
|
| 135 |
+
# Image generation
|
| 136 |
+
control_image = pipe(
|
| 137 |
+
image=image,
|
| 138 |
+
control_image=control_image,
|
| 139 |
+
control_mode=[6],
|
| 140 |
+
controlnet_conditioning_scale=float(controlnet_strength),
|
| 141 |
+
prompt=prompt,
|
| 142 |
+
negative_prompt=negative_prompt,
|
| 143 |
+
normal_tile_overlap=normal_tile_overlap,
|
| 144 |
+
border_tile_overlap=border_tile_overlap,
|
| 145 |
+
height=target_height,
|
| 146 |
+
width=target_width,
|
| 147 |
+
original_size=(original_width, original_height),
|
| 148 |
+
target_size=(target_width, target_height),
|
| 149 |
+
guidance_scale=guidance_scale,
|
| 150 |
+
strength=float(denoising_strenght),
|
| 151 |
+
tile_weighting_method=tile_weighting_method,
|
| 152 |
+
max_tile_size=max_tile_size,
|
| 153 |
+
tile_gaussian_sigma=float(tile_gaussian_sigma),
|
| 154 |
+
num_inference_steps=num_inference_steps,
|
| 155 |
+
)["images"][0]
|
| 156 |
+
```
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# This function was copied and adapted from https://huggingface.co/spaces/gokaygokay/TileUpscalerV2, licensed under Apache 2.0.
|
| 161 |
+
def _adaptive_tile_size(image_size, base_tile_size=512, max_tile_size=1280):
|
| 162 |
+
"""
|
| 163 |
+
Calculate the adaptive tile size based on the image dimensions, ensuring the tile
|
| 164 |
+
respects the aspect ratio and stays within the specified size limits.
|
| 165 |
+
"""
|
| 166 |
+
width, height = image_size
|
| 167 |
+
aspect_ratio = width / height
|
| 168 |
+
|
| 169 |
+
if aspect_ratio > 1:
|
| 170 |
+
# Landscape orientation
|
| 171 |
+
tile_width = min(width, max_tile_size)
|
| 172 |
+
tile_height = min(int(tile_width / aspect_ratio), max_tile_size)
|
| 173 |
+
else:
|
| 174 |
+
# Portrait or square orientation
|
| 175 |
+
tile_height = min(height, max_tile_size)
|
| 176 |
+
tile_width = min(int(tile_height * aspect_ratio), max_tile_size)
|
| 177 |
+
|
| 178 |
+
# Ensure the tile size is not smaller than the base_tile_size
|
| 179 |
+
tile_width = max(tile_width, base_tile_size)
|
| 180 |
+
tile_height = max(tile_height, base_tile_size)
|
| 181 |
+
|
| 182 |
+
return tile_width, tile_height
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py
|
| 186 |
+
def _tile2pixel_indices(
|
| 187 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height
|
| 188 |
+
):
|
| 189 |
+
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
| 190 |
+
|
| 191 |
+
Returns a tuple with:
|
| 192 |
+
- Starting coordinates of rows in pixel space
|
| 193 |
+
- Ending coordinates of rows in pixel space
|
| 194 |
+
- Starting coordinates of columns in pixel space
|
| 195 |
+
- Ending coordinates of columns in pixel space
|
| 196 |
+
"""
|
| 197 |
+
# Calculate initial indices
|
| 198 |
+
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
| 199 |
+
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
| 200 |
+
|
| 201 |
+
# Calculate end indices
|
| 202 |
+
px_row_end = px_row_init + tile_height
|
| 203 |
+
px_col_end = px_col_init + tile_width
|
| 204 |
+
|
| 205 |
+
# Ensure the last tile does not exceed the image dimensions
|
| 206 |
+
px_row_end = min(px_row_end, image_height)
|
| 207 |
+
px_col_end = min(px_col_end, image_width)
|
| 208 |
+
|
| 209 |
+
return px_row_init, px_row_end, px_col_init, px_col_end
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# Copied and adapted from https://github.com/huggingface/diffusers/blob/main/examples/community/mixture_tiling.py
|
| 213 |
+
def _tile2latent_indices(
|
| 214 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height
|
| 215 |
+
):
|
| 216 |
+
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
| 217 |
+
|
| 218 |
+
Returns a tuple with:
|
| 219 |
+
- Starting coordinates of rows in latent space
|
| 220 |
+
- Ending coordinates of rows in latent space
|
| 221 |
+
- Starting coordinates of columns in latent space
|
| 222 |
+
- Ending coordinates of columns in latent space
|
| 223 |
+
"""
|
| 224 |
+
# Get pixel indices
|
| 225 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 226 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, image_width, image_height
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# Convert to latent space
|
| 230 |
+
latent_row_init = px_row_init // 8
|
| 231 |
+
latent_row_end = px_row_end // 8
|
| 232 |
+
latent_col_init = px_col_init // 8
|
| 233 |
+
latent_col_end = px_col_end // 8
|
| 234 |
+
latent_height = image_height // 8
|
| 235 |
+
latent_width = image_width // 8
|
| 236 |
+
|
| 237 |
+
# Ensure the last tile does not exceed the latent dimensions
|
| 238 |
+
latent_row_end = min(latent_row_end, latent_height)
|
| 239 |
+
latent_col_end = min(latent_col_end, latent_width)
|
| 240 |
+
|
| 241 |
+
return latent_row_init, latent_row_end, latent_col_init, latent_col_end
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 245 |
+
def retrieve_latents(
|
| 246 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 247 |
+
):
|
| 248 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 249 |
+
return encoder_output.latent_dist.sample(generator)
|
| 250 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 251 |
+
return encoder_output.latent_dist.mode()
|
| 252 |
+
elif hasattr(encoder_output, "latents"):
|
| 253 |
+
return encoder_output.latents
|
| 254 |
+
else:
|
| 255 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
class StableDiffusionXLControlNetTileSRPipeline(
|
| 259 |
+
DiffusionPipeline,
|
| 260 |
+
StableDiffusionMixin,
|
| 261 |
+
TextualInversionLoaderMixin,
|
| 262 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 263 |
+
FromSingleFileMixin,
|
| 264 |
+
):
|
| 265 |
+
r"""
|
| 266 |
+
Pipeline for image-to-image generation using Stable Diffusion XL with ControlNet guidance.
|
| 267 |
+
|
| 268 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 269 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 270 |
+
|
| 271 |
+
The pipeline also inherits the following loading methods:
|
| 272 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 273 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 274 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
vae ([`AutoencoderKL`]):
|
| 278 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 279 |
+
text_encoder ([`CLIPTextModel`]):
|
| 280 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 281 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 282 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 283 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 284 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 285 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 286 |
+
specifically the
|
| 287 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 288 |
+
variant.
|
| 289 |
+
tokenizer (`CLIPTokenizer`):
|
| 290 |
+
Tokenizer of class
|
| 291 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 292 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 293 |
+
Second Tokenizer of class
|
| 294 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 295 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 296 |
+
controlnet ([`ControlNetUnionModel`]):
|
| 297 |
+
Provides additional conditioning to the unet during the denoising process.
|
| 298 |
+
scheduler ([`SchedulerMixin`]):
|
| 299 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 300 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 301 |
+
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
| 302 |
+
Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
|
| 303 |
+
config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
|
| 304 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 305 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 306 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 307 |
+
add_watermarker (`bool`, *optional*):
|
| 308 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 309 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 310 |
+
watermarker will be used.
|
| 311 |
+
"""
|
| 312 |
+
|
| 313 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 314 |
+
_optional_components = [
|
| 315 |
+
"tokenizer",
|
| 316 |
+
"tokenizer_2",
|
| 317 |
+
"text_encoder",
|
| 318 |
+
"text_encoder_2",
|
| 319 |
+
]
|
| 320 |
+
|
| 321 |
+
def __init__(
|
| 322 |
+
self,
|
| 323 |
+
vae: AutoencoderKL,
|
| 324 |
+
text_encoder: CLIPTextModel,
|
| 325 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 326 |
+
tokenizer: CLIPTokenizer,
|
| 327 |
+
tokenizer_2: CLIPTokenizer,
|
| 328 |
+
unet: UNet2DConditionModel,
|
| 329 |
+
controlnet: ControlNetUnionModel,
|
| 330 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 331 |
+
requires_aesthetics_score: bool = False,
|
| 332 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 333 |
+
add_watermarker: Optional[bool] = None,
|
| 334 |
+
):
|
| 335 |
+
super().__init__()
|
| 336 |
+
|
| 337 |
+
if not isinstance(controlnet, ControlNetUnionModel):
|
| 338 |
+
raise ValueError("Expected `controlnet` to be of type `ControlNetUnionModel`.")
|
| 339 |
+
|
| 340 |
+
self.register_modules(
|
| 341 |
+
vae=vae,
|
| 342 |
+
text_encoder=text_encoder,
|
| 343 |
+
text_encoder_2=text_encoder_2,
|
| 344 |
+
tokenizer=tokenizer,
|
| 345 |
+
tokenizer_2=tokenizer_2,
|
| 346 |
+
unet=unet,
|
| 347 |
+
controlnet=controlnet,
|
| 348 |
+
scheduler=scheduler,
|
| 349 |
+
)
|
| 350 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 351 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 352 |
+
self.control_image_processor = VaeImageProcessor(
|
| 353 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 354 |
+
)
|
| 355 |
+
self.mask_processor = VaeImageProcessor(
|
| 356 |
+
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
| 357 |
+
)
|
| 358 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 359 |
+
|
| 360 |
+
if add_watermarker:
|
| 361 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 362 |
+
else:
|
| 363 |
+
self.watermark = None
|
| 364 |
+
|
| 365 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 366 |
+
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 367 |
+
|
| 368 |
+
def calculate_overlap(self, width, height, base_overlap=128):
|
| 369 |
+
"""
|
| 370 |
+
Calculates dynamic overlap based on the image's aspect ratio.
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
width (int): Width of the image in pixels.
|
| 374 |
+
height (int): Height of the image in pixels.
|
| 375 |
+
base_overlap (int, optional): Base overlap value in pixels. Defaults to 128.
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
tuple: A tuple containing:
|
| 379 |
+
- row_overlap (int): Overlap between tiles in consecutive rows.
|
| 380 |
+
- col_overlap (int): Overlap between tiles in consecutive columns.
|
| 381 |
+
"""
|
| 382 |
+
ratio = height / width
|
| 383 |
+
if ratio < 1: # Image is wider than tall
|
| 384 |
+
return base_overlap // 2, base_overlap
|
| 385 |
+
else: # Image is taller than wide
|
| 386 |
+
return base_overlap, base_overlap * 2
|
| 387 |
+
|
| 388 |
+
class TileWeightingMethod(Enum):
|
| 389 |
+
"""Mode in which the tile weights will be generated"""
|
| 390 |
+
|
| 391 |
+
COSINE = "Cosine"
|
| 392 |
+
GAUSSIAN = "Gaussian"
|
| 393 |
+
|
| 394 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
|
| 395 |
+
def encode_prompt(
|
| 396 |
+
self,
|
| 397 |
+
prompt: str,
|
| 398 |
+
prompt_2: Optional[str] = None,
|
| 399 |
+
device: Optional[torch.device] = None,
|
| 400 |
+
num_images_per_prompt: int = 1,
|
| 401 |
+
do_classifier_free_guidance: bool = True,
|
| 402 |
+
negative_prompt: Optional[str] = None,
|
| 403 |
+
negative_prompt_2: Optional[str] = None,
|
| 404 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 405 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 406 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 407 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 408 |
+
lora_scale: Optional[float] = None,
|
| 409 |
+
clip_skip: Optional[int] = None,
|
| 410 |
+
):
|
| 411 |
+
r"""
|
| 412 |
+
Encodes the prompt into text encoder hidden states.
|
| 413 |
+
|
| 414 |
+
Args:
|
| 415 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 416 |
+
prompt to be encoded
|
| 417 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 418 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 419 |
+
used in both text-encoders
|
| 420 |
+
device: (`torch.device`):
|
| 421 |
+
torch device
|
| 422 |
+
num_images_per_prompt (`int`):
|
| 423 |
+
number of images that should be generated per prompt
|
| 424 |
+
do_classifier_free_guidance (`bool`):
|
| 425 |
+
whether to use classifier free guidance or not
|
| 426 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 427 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 428 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 429 |
+
less than `1`).
|
| 430 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 431 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 432 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 433 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 434 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 435 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 436 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 437 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 438 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 439 |
+
argument.
|
| 440 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 441 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 442 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 443 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 444 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 445 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 446 |
+
input argument.
|
| 447 |
+
lora_scale (`float`, *optional*):
|
| 448 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 449 |
+
clip_skip (`int`, *optional*):
|
| 450 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 451 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 452 |
+
"""
|
| 453 |
+
device = device or self._execution_device
|
| 454 |
+
|
| 455 |
+
# set lora scale so that monkey patched LoRA
|
| 456 |
+
# function of text encoder can correctly access it
|
| 457 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 458 |
+
self._lora_scale = lora_scale
|
| 459 |
+
|
| 460 |
+
# dynamically adjust the LoRA scale
|
| 461 |
+
if self.text_encoder is not None:
|
| 462 |
+
if not USE_PEFT_BACKEND:
|
| 463 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 464 |
+
else:
|
| 465 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 466 |
+
|
| 467 |
+
if self.text_encoder_2 is not None:
|
| 468 |
+
if not USE_PEFT_BACKEND:
|
| 469 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 470 |
+
else:
|
| 471 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 472 |
+
|
| 473 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
| 474 |
+
|
| 475 |
+
if prompt is not None:
|
| 476 |
+
batch_size = len(prompt)
|
| 477 |
+
else:
|
| 478 |
+
batch_size = prompt_embeds.shape[0]
|
| 479 |
+
|
| 480 |
+
# Define tokenizers and text encoders
|
| 481 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 482 |
+
text_encoders = (
|
| 483 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 484 |
+
)
|
| 485 |
+
dtype = text_encoders[0].dtype
|
| 486 |
+
if prompt_embeds is None:
|
| 487 |
+
prompt_2 = prompt_2 or prompt
|
| 488 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
| 489 |
+
|
| 490 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 491 |
+
prompt_embeds_list = []
|
| 492 |
+
prompts = [prompt, prompt_2]
|
| 493 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 494 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 495 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 496 |
+
|
| 497 |
+
text_inputs = tokenizer(
|
| 498 |
+
prompt,
|
| 499 |
+
padding="max_length",
|
| 500 |
+
max_length=tokenizer.model_max_length,
|
| 501 |
+
truncation=True,
|
| 502 |
+
return_tensors="pt",
|
| 503 |
+
)
|
| 504 |
+
|
| 505 |
+
text_input_ids = text_inputs.input_ids
|
| 506 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 507 |
+
|
| 508 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 509 |
+
text_input_ids, untruncated_ids
|
| 510 |
+
):
|
| 511 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 512 |
+
logger.warning(
|
| 513 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 514 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 515 |
+
)
|
| 516 |
+
text_encoder.to(dtype)
|
| 517 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
| 518 |
+
|
| 519 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 520 |
+
if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2:
|
| 521 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 522 |
+
|
| 523 |
+
if clip_skip is None:
|
| 524 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 525 |
+
else:
|
| 526 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 527 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
| 528 |
+
|
| 529 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 530 |
+
|
| 531 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 532 |
+
|
| 533 |
+
# get unconditional embeddings for classifier free guidance
|
| 534 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 535 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 536 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 537 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 538 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 539 |
+
negative_prompt = negative_prompt or ""
|
| 540 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 541 |
+
|
| 542 |
+
# normalize str to list
|
| 543 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 544 |
+
negative_prompt_2 = (
|
| 545 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
uncond_tokens: List[str]
|
| 549 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 550 |
+
raise TypeError(
|
| 551 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 552 |
+
f" {type(prompt)}."
|
| 553 |
+
)
|
| 554 |
+
elif batch_size != len(negative_prompt):
|
| 555 |
+
raise ValueError(
|
| 556 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 557 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 558 |
+
" the batch size of `prompt`."
|
| 559 |
+
)
|
| 560 |
+
else:
|
| 561 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 562 |
+
|
| 563 |
+
negative_prompt_embeds_list = []
|
| 564 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 565 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 566 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 567 |
+
|
| 568 |
+
max_length = prompt_embeds.shape[1]
|
| 569 |
+
uncond_input = tokenizer(
|
| 570 |
+
negative_prompt,
|
| 571 |
+
padding="max_length",
|
| 572 |
+
max_length=max_length,
|
| 573 |
+
truncation=True,
|
| 574 |
+
return_tensors="pt",
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
negative_prompt_embeds = text_encoder(
|
| 578 |
+
uncond_input.input_ids.to(device),
|
| 579 |
+
output_hidden_states=True,
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 583 |
+
if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2:
|
| 584 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 585 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 586 |
+
|
| 587 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 588 |
+
|
| 589 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 590 |
+
|
| 591 |
+
if self.text_encoder_2 is not None:
|
| 592 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 593 |
+
else:
|
| 594 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 595 |
+
|
| 596 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 597 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 598 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 599 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 600 |
+
|
| 601 |
+
if do_classifier_free_guidance:
|
| 602 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 603 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 604 |
+
|
| 605 |
+
if self.text_encoder_2 is not None:
|
| 606 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 607 |
+
else:
|
| 608 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
| 609 |
+
|
| 610 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 611 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 612 |
+
|
| 613 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 614 |
+
bs_embed * num_images_per_prompt, -1
|
| 615 |
+
)
|
| 616 |
+
if do_classifier_free_guidance:
|
| 617 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 618 |
+
bs_embed * num_images_per_prompt, -1
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
if self.text_encoder is not None:
|
| 622 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 623 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 624 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 625 |
+
|
| 626 |
+
if self.text_encoder_2 is not None:
|
| 627 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 628 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 629 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 630 |
+
|
| 631 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 632 |
+
|
| 633 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 634 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 635 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 636 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 637 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 638 |
+
# and should be between [0, 1]
|
| 639 |
+
|
| 640 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 641 |
+
extra_step_kwargs = {}
|
| 642 |
+
if accepts_eta:
|
| 643 |
+
extra_step_kwargs["eta"] = eta
|
| 644 |
+
|
| 645 |
+
# check if the scheduler accepts generator
|
| 646 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 647 |
+
if accepts_generator:
|
| 648 |
+
extra_step_kwargs["generator"] = generator
|
| 649 |
+
return extra_step_kwargs
|
| 650 |
+
|
| 651 |
+
def check_inputs(
|
| 652 |
+
self,
|
| 653 |
+
prompt,
|
| 654 |
+
height,
|
| 655 |
+
width,
|
| 656 |
+
image,
|
| 657 |
+
strength,
|
| 658 |
+
num_inference_steps,
|
| 659 |
+
normal_tile_overlap,
|
| 660 |
+
border_tile_overlap,
|
| 661 |
+
max_tile_size,
|
| 662 |
+
tile_gaussian_sigma,
|
| 663 |
+
tile_weighting_method,
|
| 664 |
+
controlnet_conditioning_scale=1.0,
|
| 665 |
+
control_guidance_start=0.0,
|
| 666 |
+
control_guidance_end=1.0,
|
| 667 |
+
):
|
| 668 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 669 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 670 |
+
|
| 671 |
+
if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 672 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 673 |
+
|
| 674 |
+
if strength < 0 or strength > 1:
|
| 675 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 676 |
+
if num_inference_steps is None:
|
| 677 |
+
raise ValueError("`num_inference_steps` cannot be None.")
|
| 678 |
+
elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
|
| 679 |
+
raise ValueError(
|
| 680 |
+
f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
|
| 681 |
+
f" {type(num_inference_steps)}."
|
| 682 |
+
)
|
| 683 |
+
if normal_tile_overlap is None:
|
| 684 |
+
raise ValueError("`normal_tile_overlap` cannot be None.")
|
| 685 |
+
elif not isinstance(normal_tile_overlap, int) or normal_tile_overlap < 64:
|
| 686 |
+
raise ValueError(
|
| 687 |
+
f"`normal_tile_overlap` has to be greater than 64 but is {normal_tile_overlap} of type"
|
| 688 |
+
f" {type(normal_tile_overlap)}."
|
| 689 |
+
)
|
| 690 |
+
if border_tile_overlap is None:
|
| 691 |
+
raise ValueError("`border_tile_overlap` cannot be None.")
|
| 692 |
+
elif not isinstance(border_tile_overlap, int) or border_tile_overlap < 128:
|
| 693 |
+
raise ValueError(
|
| 694 |
+
f"`border_tile_overlap` has to be greater than 128 but is {border_tile_overlap} of type"
|
| 695 |
+
f" {type(border_tile_overlap)}."
|
| 696 |
+
)
|
| 697 |
+
if max_tile_size is None:
|
| 698 |
+
raise ValueError("`max_tile_size` cannot be None.")
|
| 699 |
+
elif not isinstance(max_tile_size, int) or max_tile_size not in (1024, 1280):
|
| 700 |
+
raise ValueError(
|
| 701 |
+
f"`max_tile_size` has to be in 1024 or 1280 but is {max_tile_size} of type {type(max_tile_size)}."
|
| 702 |
+
)
|
| 703 |
+
if tile_gaussian_sigma is None:
|
| 704 |
+
raise ValueError("`tile_gaussian_sigma` cannot be None.")
|
| 705 |
+
elif not isinstance(tile_gaussian_sigma, float) or tile_gaussian_sigma <= 0:
|
| 706 |
+
raise ValueError(
|
| 707 |
+
f"`tile_gaussian_sigma` has to be a positive float but is {tile_gaussian_sigma} of type"
|
| 708 |
+
f" {type(tile_gaussian_sigma)}."
|
| 709 |
+
)
|
| 710 |
+
if tile_weighting_method is None:
|
| 711 |
+
raise ValueError("`tile_weighting_method` cannot be None.")
|
| 712 |
+
elif not isinstance(tile_weighting_method, str) or tile_weighting_method not in [
|
| 713 |
+
t.value for t in self.TileWeightingMethod
|
| 714 |
+
]:
|
| 715 |
+
raise ValueError(
|
| 716 |
+
f"`tile_weighting_method` has to be a string in ({[t.value for t in self.TileWeightingMethod]}) but is {tile_weighting_method} of type"
|
| 717 |
+
f" {type(tile_weighting_method)}."
|
| 718 |
+
)
|
| 719 |
+
|
| 720 |
+
# Check `image`
|
| 721 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 722 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 723 |
+
)
|
| 724 |
+
if (
|
| 725 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 726 |
+
or is_compiled
|
| 727 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 728 |
+
):
|
| 729 |
+
self.check_image(image, prompt)
|
| 730 |
+
elif (
|
| 731 |
+
isinstance(self.controlnet, ControlNetUnionModel)
|
| 732 |
+
or is_compiled
|
| 733 |
+
and isinstance(self.controlnet._orig_mod, ControlNetUnionModel)
|
| 734 |
+
):
|
| 735 |
+
self.check_image(image, prompt)
|
| 736 |
+
else:
|
| 737 |
+
assert False
|
| 738 |
+
|
| 739 |
+
# Check `controlnet_conditioning_scale`
|
| 740 |
+
if (
|
| 741 |
+
isinstance(self.controlnet, ControlNetUnionModel)
|
| 742 |
+
or is_compiled
|
| 743 |
+
and isinstance(self.controlnet._orig_mod, ControlNetUnionModel)
|
| 744 |
+
) or (
|
| 745 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 746 |
+
or is_compiled
|
| 747 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 748 |
+
):
|
| 749 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 750 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 751 |
+
elif (
|
| 752 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 753 |
+
or is_compiled
|
| 754 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 755 |
+
):
|
| 756 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 757 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 758 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 759 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 760 |
+
self.controlnet.nets
|
| 761 |
+
):
|
| 762 |
+
raise ValueError(
|
| 763 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 764 |
+
" the same length as the number of controlnets"
|
| 765 |
+
)
|
| 766 |
+
else:
|
| 767 |
+
assert False
|
| 768 |
+
|
| 769 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 770 |
+
control_guidance_start = [control_guidance_start]
|
| 771 |
+
|
| 772 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 773 |
+
control_guidance_end = [control_guidance_end]
|
| 774 |
+
|
| 775 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 776 |
+
raise ValueError(
|
| 777 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 781 |
+
if start >= end:
|
| 782 |
+
raise ValueError(
|
| 783 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 784 |
+
)
|
| 785 |
+
if start < 0.0:
|
| 786 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 787 |
+
if end > 1.0:
|
| 788 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 789 |
+
|
| 790 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
|
| 791 |
+
def check_image(self, image, prompt):
|
| 792 |
+
image_is_pil = isinstance(image, Image.Image)
|
| 793 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 794 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 795 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
|
| 796 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 797 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 798 |
+
|
| 799 |
+
if (
|
| 800 |
+
not image_is_pil
|
| 801 |
+
and not image_is_tensor
|
| 802 |
+
and not image_is_np
|
| 803 |
+
and not image_is_pil_list
|
| 804 |
+
and not image_is_tensor_list
|
| 805 |
+
and not image_is_np_list
|
| 806 |
+
):
|
| 807 |
+
raise TypeError(
|
| 808 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 809 |
+
)
|
| 810 |
+
|
| 811 |
+
if image_is_pil:
|
| 812 |
+
image_batch_size = 1
|
| 813 |
+
else:
|
| 814 |
+
image_batch_size = len(image)
|
| 815 |
+
|
| 816 |
+
if prompt is not None and isinstance(prompt, str):
|
| 817 |
+
prompt_batch_size = 1
|
| 818 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 819 |
+
prompt_batch_size = len(prompt)
|
| 820 |
+
|
| 821 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 822 |
+
raise ValueError(
|
| 823 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
|
| 827 |
+
def prepare_control_image(
|
| 828 |
+
self,
|
| 829 |
+
image,
|
| 830 |
+
width,
|
| 831 |
+
height,
|
| 832 |
+
batch_size,
|
| 833 |
+
num_images_per_prompt,
|
| 834 |
+
device,
|
| 835 |
+
dtype,
|
| 836 |
+
do_classifier_free_guidance=False,
|
| 837 |
+
guess_mode=False,
|
| 838 |
+
):
|
| 839 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 840 |
+
image_batch_size = image.shape[0]
|
| 841 |
+
|
| 842 |
+
if image_batch_size == 1:
|
| 843 |
+
repeat_by = batch_size
|
| 844 |
+
else:
|
| 845 |
+
# image batch size is the same as prompt batch size
|
| 846 |
+
repeat_by = num_images_per_prompt
|
| 847 |
+
|
| 848 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 849 |
+
|
| 850 |
+
image = image.to(device=device, dtype=dtype)
|
| 851 |
+
|
| 852 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 853 |
+
image = torch.cat([image] * 2)
|
| 854 |
+
|
| 855 |
+
return image
|
| 856 |
+
|
| 857 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 858 |
+
def get_timesteps(self, num_inference_steps, strength):
|
| 859 |
+
# get the original timestep using init_timestep
|
| 860 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 861 |
+
|
| 862 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 863 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 864 |
+
if hasattr(self.scheduler, "set_begin_index"):
|
| 865 |
+
self.scheduler.set_begin_index(t_start * self.scheduler.order)
|
| 866 |
+
|
| 867 |
+
return timesteps, num_inference_steps - t_start
|
| 868 |
+
|
| 869 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
|
| 870 |
+
def prepare_latents(
|
| 871 |
+
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
|
| 872 |
+
):
|
| 873 |
+
if not isinstance(image, (torch.Tensor, Image.Image, list)):
|
| 874 |
+
raise ValueError(
|
| 875 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 876 |
+
)
|
| 877 |
+
|
| 878 |
+
latents_mean = latents_std = None
|
| 879 |
+
if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None:
|
| 880 |
+
latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1)
|
| 881 |
+
if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None:
|
| 882 |
+
latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1)
|
| 883 |
+
|
| 884 |
+
# Offload text encoder if `enable_model_cpu_offload` was enabled
|
| 885 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 886 |
+
self.text_encoder_2.to("cpu")
|
| 887 |
+
torch.cuda.empty_cache()
|
| 888 |
+
|
| 889 |
+
image = image.to(device=device, dtype=dtype)
|
| 890 |
+
|
| 891 |
+
batch_size = batch_size * num_images_per_prompt
|
| 892 |
+
|
| 893 |
+
if image.shape[1] == 4:
|
| 894 |
+
init_latents = image
|
| 895 |
+
|
| 896 |
+
else:
|
| 897 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 898 |
+
if self.vae.config.force_upcast:
|
| 899 |
+
image = image.float()
|
| 900 |
+
self.vae.to(dtype=torch.float32)
|
| 901 |
+
|
| 902 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 903 |
+
raise ValueError(
|
| 904 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 905 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 906 |
+
)
|
| 907 |
+
|
| 908 |
+
elif isinstance(generator, list):
|
| 909 |
+
if image.shape[0] < batch_size and batch_size % image.shape[0] == 0:
|
| 910 |
+
image = torch.cat([image] * (batch_size // image.shape[0]), dim=0)
|
| 911 |
+
elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0:
|
| 912 |
+
raise ValueError(
|
| 913 |
+
f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} "
|
| 914 |
+
)
|
| 915 |
+
|
| 916 |
+
init_latents = [
|
| 917 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 918 |
+
for i in range(batch_size)
|
| 919 |
+
]
|
| 920 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 921 |
+
else:
|
| 922 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 923 |
+
|
| 924 |
+
if self.vae.config.force_upcast:
|
| 925 |
+
self.vae.to(dtype)
|
| 926 |
+
|
| 927 |
+
init_latents = init_latents.to(dtype)
|
| 928 |
+
if latents_mean is not None and latents_std is not None:
|
| 929 |
+
latents_mean = latents_mean.to(device=device, dtype=dtype)
|
| 930 |
+
latents_std = latents_std.to(device=device, dtype=dtype)
|
| 931 |
+
init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std
|
| 932 |
+
else:
|
| 933 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 934 |
+
|
| 935 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 936 |
+
# expand init_latents for batch_size
|
| 937 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 938 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 939 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 940 |
+
raise ValueError(
|
| 941 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 942 |
+
)
|
| 943 |
+
else:
|
| 944 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 945 |
+
|
| 946 |
+
if add_noise:
|
| 947 |
+
shape = init_latents.shape
|
| 948 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 949 |
+
# get latents
|
| 950 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 951 |
+
|
| 952 |
+
latents = init_latents
|
| 953 |
+
|
| 954 |
+
return latents
|
| 955 |
+
|
| 956 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
|
| 957 |
+
def _get_add_time_ids(
|
| 958 |
+
self,
|
| 959 |
+
original_size,
|
| 960 |
+
crops_coords_top_left,
|
| 961 |
+
target_size,
|
| 962 |
+
aesthetic_score,
|
| 963 |
+
negative_aesthetic_score,
|
| 964 |
+
negative_original_size,
|
| 965 |
+
negative_crops_coords_top_left,
|
| 966 |
+
negative_target_size,
|
| 967 |
+
dtype,
|
| 968 |
+
text_encoder_projection_dim=None,
|
| 969 |
+
):
|
| 970 |
+
if self.config.requires_aesthetics_score:
|
| 971 |
+
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
|
| 972 |
+
add_neg_time_ids = list(
|
| 973 |
+
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
|
| 974 |
+
)
|
| 975 |
+
else:
|
| 976 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 977 |
+
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
|
| 978 |
+
|
| 979 |
+
passed_add_embed_dim = (
|
| 980 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 981 |
+
)
|
| 982 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 983 |
+
|
| 984 |
+
if (
|
| 985 |
+
expected_add_embed_dim > passed_add_embed_dim
|
| 986 |
+
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 987 |
+
):
|
| 988 |
+
raise ValueError(
|
| 989 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
|
| 990 |
+
)
|
| 991 |
+
elif (
|
| 992 |
+
expected_add_embed_dim < passed_add_embed_dim
|
| 993 |
+
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 994 |
+
):
|
| 995 |
+
raise ValueError(
|
| 996 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
|
| 997 |
+
)
|
| 998 |
+
elif expected_add_embed_dim != passed_add_embed_dim:
|
| 999 |
+
raise ValueError(
|
| 1000 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 1001 |
+
)
|
| 1002 |
+
|
| 1003 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 1004 |
+
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
|
| 1005 |
+
|
| 1006 |
+
return add_time_ids, add_neg_time_ids
|
| 1007 |
+
|
| 1008 |
+
def _generate_cosine_weights(self, tile_width, tile_height, nbatches, device, dtype):
|
| 1009 |
+
"""
|
| 1010 |
+
Generates cosine weights as a PyTorch tensor for blending tiles.
|
| 1011 |
+
|
| 1012 |
+
Args:
|
| 1013 |
+
tile_width (int): Width of the tile in pixels.
|
| 1014 |
+
tile_height (int): Height of the tile in pixels.
|
| 1015 |
+
nbatches (int): Number of batches.
|
| 1016 |
+
device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu').
|
| 1017 |
+
dtype (torch.dtype): Data type of the tensor (e.g., torch.float32).
|
| 1018 |
+
|
| 1019 |
+
Returns:
|
| 1020 |
+
torch.Tensor: A tensor containing cosine weights for blending tiles, expanded to match batch and channel dimensions.
|
| 1021 |
+
"""
|
| 1022 |
+
# Convert tile dimensions to latent space
|
| 1023 |
+
latent_width = tile_width // 8
|
| 1024 |
+
latent_height = tile_height // 8
|
| 1025 |
+
|
| 1026 |
+
# Generate x and y coordinates in latent space
|
| 1027 |
+
x = np.arange(0, latent_width)
|
| 1028 |
+
y = np.arange(0, latent_height)
|
| 1029 |
+
|
| 1030 |
+
# Calculate midpoints
|
| 1031 |
+
midpoint_x = (latent_width - 1) / 2
|
| 1032 |
+
midpoint_y = (latent_height - 1) / 2
|
| 1033 |
+
|
| 1034 |
+
# Compute cosine probabilities for x and y
|
| 1035 |
+
x_probs = np.cos(np.pi * (x - midpoint_x) / latent_width)
|
| 1036 |
+
y_probs = np.cos(np.pi * (y - midpoint_y) / latent_height)
|
| 1037 |
+
|
| 1038 |
+
# Create a 2D weight matrix using the outer product
|
| 1039 |
+
weights_np = np.outer(y_probs, x_probs)
|
| 1040 |
+
|
| 1041 |
+
# Convert to a PyTorch tensor with the correct device and dtype
|
| 1042 |
+
weights_torch = torch.tensor(weights_np, device=device, dtype=dtype)
|
| 1043 |
+
|
| 1044 |
+
# Expand for batch and channel dimensions
|
| 1045 |
+
tile_weights_expanded = torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
|
| 1046 |
+
|
| 1047 |
+
return tile_weights_expanded
|
| 1048 |
+
|
| 1049 |
+
def _generate_gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype, sigma=0.05):
|
| 1050 |
+
"""
|
| 1051 |
+
Generates Gaussian weights as a PyTorch tensor for blending tiles in latent space.
|
| 1052 |
+
|
| 1053 |
+
Args:
|
| 1054 |
+
tile_width (int): Width of the tile in pixels.
|
| 1055 |
+
tile_height (int): Height of the tile in pixels.
|
| 1056 |
+
nbatches (int): Number of batches.
|
| 1057 |
+
device (torch.device): Device where the tensor will be allocated (e.g., 'cuda' or 'cpu').
|
| 1058 |
+
dtype (torch.dtype): Data type of the tensor (e.g., torch.float32).
|
| 1059 |
+
sigma (float, optional): Standard deviation of the Gaussian distribution. Controls the smoothness of the weights. Defaults to 0.05.
|
| 1060 |
+
|
| 1061 |
+
Returns:
|
| 1062 |
+
torch.Tensor: A tensor containing Gaussian weights for blending tiles, expanded to match batch and channel dimensions.
|
| 1063 |
+
"""
|
| 1064 |
+
# Convert tile dimensions to latent space
|
| 1065 |
+
latent_width = tile_width // 8
|
| 1066 |
+
latent_height = tile_height // 8
|
| 1067 |
+
|
| 1068 |
+
# Generate Gaussian weights in latent space
|
| 1069 |
+
x = np.linspace(-1, 1, latent_width)
|
| 1070 |
+
y = np.linspace(-1, 1, latent_height)
|
| 1071 |
+
xx, yy = np.meshgrid(x, y)
|
| 1072 |
+
gaussian_weight = np.exp(-(xx**2 + yy**2) / (2 * sigma**2))
|
| 1073 |
+
|
| 1074 |
+
# Convert to a PyTorch tensor with the correct device and dtype
|
| 1075 |
+
weights_torch = torch.tensor(gaussian_weight, device=device, dtype=dtype)
|
| 1076 |
+
|
| 1077 |
+
# Expand for batch and channel dimensions
|
| 1078 |
+
weights_expanded = weights_torch.unsqueeze(0).unsqueeze(0) # Add batch and channel dimensions
|
| 1079 |
+
weights_expanded = weights_expanded.expand(nbatches, -1, -1, -1) # Expand to the number of batches
|
| 1080 |
+
|
| 1081 |
+
return weights_expanded
|
| 1082 |
+
|
| 1083 |
+
def _get_num_tiles(self, height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap):
|
| 1084 |
+
"""
|
| 1085 |
+
Calculates the number of tiles needed to cover an image, choosing the appropriate formula based on the
|
| 1086 |
+
ratio between the image size and the tile size.
|
| 1087 |
+
|
| 1088 |
+
This function automatically selects between two formulas:
|
| 1089 |
+
1. A universal formula for typical cases (image-to-tile ratio <= 6:1).
|
| 1090 |
+
2. A specialized formula with border tile overlap for larger or atypical cases (image-to-tile ratio > 6:1).
|
| 1091 |
+
|
| 1092 |
+
Args:
|
| 1093 |
+
height (int): Height of the image in pixels.
|
| 1094 |
+
width (int): Width of the image in pixels.
|
| 1095 |
+
tile_height (int): Height of each tile in pixels.
|
| 1096 |
+
tile_width (int): Width of each tile in pixels.
|
| 1097 |
+
normal_tile_overlap (int): Overlap between tiles in pixels for normal (non-border) tiles.
|
| 1098 |
+
border_tile_overlap (int): Overlap between tiles in pixels for border tiles.
|
| 1099 |
+
|
| 1100 |
+
Returns:
|
| 1101 |
+
tuple: A tuple containing:
|
| 1102 |
+
- grid_rows (int): Number of rows in the tile grid.
|
| 1103 |
+
- grid_cols (int): Number of columns in the tile grid.
|
| 1104 |
+
|
| 1105 |
+
Notes:
|
| 1106 |
+
- The function uses the universal formula (without border_tile_overlap) for typical cases where the
|
| 1107 |
+
image-to-tile ratio is 6:1 or smaller.
|
| 1108 |
+
- For larger or atypical cases (image-to-tile ratio > 6:1), it uses a specialized formula that includes
|
| 1109 |
+
border_tile_overlap to ensure complete coverage of the image, especially at the edges.
|
| 1110 |
+
"""
|
| 1111 |
+
# Calculate the ratio between the image size and the tile size
|
| 1112 |
+
height_ratio = height / tile_height
|
| 1113 |
+
width_ratio = width / tile_width
|
| 1114 |
+
|
| 1115 |
+
# If the ratio is greater than 6:1, use the formula with border_tile_overlap
|
| 1116 |
+
if height_ratio > 6 or width_ratio > 6:
|
| 1117 |
+
grid_rows = int(np.ceil((height - border_tile_overlap) / (tile_height - normal_tile_overlap))) + 1
|
| 1118 |
+
grid_cols = int(np.ceil((width - border_tile_overlap) / (tile_width - normal_tile_overlap))) + 1
|
| 1119 |
+
else:
|
| 1120 |
+
# Otherwise, use the universal formula
|
| 1121 |
+
grid_rows = int(np.ceil((height - normal_tile_overlap) / (tile_height - normal_tile_overlap)))
|
| 1122 |
+
grid_cols = int(np.ceil((width - normal_tile_overlap) / (tile_width - normal_tile_overlap)))
|
| 1123 |
+
|
| 1124 |
+
return grid_rows, grid_cols
|
| 1125 |
+
|
| 1126 |
+
def prepare_tiles(
|
| 1127 |
+
self,
|
| 1128 |
+
grid_rows,
|
| 1129 |
+
grid_cols,
|
| 1130 |
+
tile_weighting_method,
|
| 1131 |
+
tile_width,
|
| 1132 |
+
tile_height,
|
| 1133 |
+
normal_tile_overlap,
|
| 1134 |
+
border_tile_overlap,
|
| 1135 |
+
width,
|
| 1136 |
+
height,
|
| 1137 |
+
tile_sigma,
|
| 1138 |
+
batch_size,
|
| 1139 |
+
device,
|
| 1140 |
+
dtype,
|
| 1141 |
+
):
|
| 1142 |
+
"""
|
| 1143 |
+
Processes image tiles by dynamically adjusting overlap and calculating Gaussian or cosine weights.
|
| 1144 |
+
|
| 1145 |
+
Args:
|
| 1146 |
+
grid_rows (int): Number of rows in the tile grid.
|
| 1147 |
+
grid_cols (int): Number of columns in the tile grid.
|
| 1148 |
+
tile_weighting_method (str): Method for weighting tiles. Options: "Gaussian" or "Cosine".
|
| 1149 |
+
tile_width (int): Width of each tile in pixels.
|
| 1150 |
+
tile_height (int): Height of each tile in pixels.
|
| 1151 |
+
normal_tile_overlap (int): Overlap between tiles in pixels for normal tiles.
|
| 1152 |
+
border_tile_overlap (int): Overlap between tiles in pixels for border tiles.
|
| 1153 |
+
width (int): Width of the image in pixels.
|
| 1154 |
+
height (int): Height of the image in pixels.
|
| 1155 |
+
tile_sigma (float): Sigma parameter for Gaussian weighting.
|
| 1156 |
+
batch_size (int): Batch size for weight tiles.
|
| 1157 |
+
device (torch.device): Device where tensors will be allocated (e.g., 'cuda' or 'cpu').
|
| 1158 |
+
dtype (torch.dtype): Data type of the tensors (e.g., torch.float32).
|
| 1159 |
+
|
| 1160 |
+
Returns:
|
| 1161 |
+
tuple: A tuple containing:
|
| 1162 |
+
- tile_weights (np.ndarray): Array of weights for each tile.
|
| 1163 |
+
- tile_row_overlaps (np.ndarray): Array of row overlaps for each tile.
|
| 1164 |
+
- tile_col_overlaps (np.ndarray): Array of column overlaps for each tile.
|
| 1165 |
+
"""
|
| 1166 |
+
|
| 1167 |
+
# Create arrays to store dynamic overlaps and weights
|
| 1168 |
+
tile_row_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap)
|
| 1169 |
+
tile_col_overlaps = np.full((grid_rows, grid_cols), normal_tile_overlap)
|
| 1170 |
+
tile_weights = np.empty((grid_rows, grid_cols), dtype=object) # Stores Gaussian or cosine weights
|
| 1171 |
+
|
| 1172 |
+
# Iterate over tiles to adjust overlap and calculate weights
|
| 1173 |
+
for row in range(grid_rows):
|
| 1174 |
+
for col in range(grid_cols):
|
| 1175 |
+
# Calculate the size of the current tile
|
| 1176 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 1177 |
+
row, col, tile_width, tile_height, normal_tile_overlap, normal_tile_overlap, width, height
|
| 1178 |
+
)
|
| 1179 |
+
current_tile_width = px_col_end - px_col_init
|
| 1180 |
+
current_tile_height = px_row_end - px_row_init
|
| 1181 |
+
sigma = tile_sigma
|
| 1182 |
+
|
| 1183 |
+
# Adjust overlap for smaller tiles
|
| 1184 |
+
if current_tile_width < tile_width:
|
| 1185 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 1186 |
+
row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height
|
| 1187 |
+
)
|
| 1188 |
+
current_tile_width = px_col_end - px_col_init
|
| 1189 |
+
tile_col_overlaps[row, col] = border_tile_overlap
|
| 1190 |
+
sigma = tile_sigma * 1.2
|
| 1191 |
+
if current_tile_height < tile_height:
|
| 1192 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
| 1193 |
+
row, col, tile_width, tile_height, border_tile_overlap, border_tile_overlap, width, height
|
| 1194 |
+
)
|
| 1195 |
+
current_tile_height = px_row_end - px_row_init
|
| 1196 |
+
tile_row_overlaps[row, col] = border_tile_overlap
|
| 1197 |
+
sigma = tile_sigma * 1.2
|
| 1198 |
+
|
| 1199 |
+
# Calculate weights for the current tile
|
| 1200 |
+
if tile_weighting_method == self.TileWeightingMethod.COSINE.value:
|
| 1201 |
+
tile_weights[row, col] = self._generate_cosine_weights(
|
| 1202 |
+
tile_width=current_tile_width,
|
| 1203 |
+
tile_height=current_tile_height,
|
| 1204 |
+
nbatches=batch_size,
|
| 1205 |
+
device=device,
|
| 1206 |
+
dtype=torch.float32,
|
| 1207 |
+
)
|
| 1208 |
+
else:
|
| 1209 |
+
tile_weights[row, col] = self._generate_gaussian_weights(
|
| 1210 |
+
tile_width=current_tile_width,
|
| 1211 |
+
tile_height=current_tile_height,
|
| 1212 |
+
nbatches=batch_size,
|
| 1213 |
+
device=device,
|
| 1214 |
+
dtype=dtype,
|
| 1215 |
+
sigma=sigma,
|
| 1216 |
+
)
|
| 1217 |
+
|
| 1218 |
+
return tile_weights, tile_row_overlaps, tile_col_overlaps
|
| 1219 |
+
|
| 1220 |
+
def upcast_vae(self):
|
| 1221 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 1222 |
+
self.vae.to(dtype=torch.float32)
|
| 1223 |
+
|
| 1224 |
+
@property
|
| 1225 |
+
def guidance_scale(self):
|
| 1226 |
+
return self._guidance_scale
|
| 1227 |
+
|
| 1228 |
+
@property
|
| 1229 |
+
def clip_skip(self):
|
| 1230 |
+
return self._clip_skip
|
| 1231 |
+
|
| 1232 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1233 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 1234 |
+
# corresponds to doing no classifier free guidance.
|
| 1235 |
+
@property
|
| 1236 |
+
def do_classifier_free_guidance(self):
|
| 1237 |
+
return self._guidance_scale > 1
|
| 1238 |
+
|
| 1239 |
+
@property
|
| 1240 |
+
def cross_attention_kwargs(self):
|
| 1241 |
+
return self._cross_attention_kwargs
|
| 1242 |
+
|
| 1243 |
+
@property
|
| 1244 |
+
def num_timesteps(self):
|
| 1245 |
+
return self._num_timesteps
|
| 1246 |
+
|
| 1247 |
+
@property
|
| 1248 |
+
def interrupt(self):
|
| 1249 |
+
return self._interrupt
|
| 1250 |
+
|
| 1251 |
+
@torch.no_grad()
|
| 1252 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 1253 |
+
def __call__(
|
| 1254 |
+
self,
|
| 1255 |
+
prompt: Union[str, List[str]] = None,
|
| 1256 |
+
image: PipelineImageInput = None,
|
| 1257 |
+
control_image: PipelineImageInput = None,
|
| 1258 |
+
height: Optional[int] = None,
|
| 1259 |
+
width: Optional[int] = None,
|
| 1260 |
+
strength: float = 0.9999,
|
| 1261 |
+
num_inference_steps: int = 50,
|
| 1262 |
+
guidance_scale: float = 5.0,
|
| 1263 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1264 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1265 |
+
eta: float = 0.0,
|
| 1266 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1267 |
+
latents: Optional[torch.Tensor] = None,
|
| 1268 |
+
output_type: Optional[str] = "pil",
|
| 1269 |
+
return_dict: bool = True,
|
| 1270 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1271 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 1272 |
+
guess_mode: bool = False,
|
| 1273 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 1274 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 1275 |
+
control_mode: Optional[Union[int, List[int]]] = None,
|
| 1276 |
+
original_size: Tuple[int, int] = None,
|
| 1277 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1278 |
+
target_size: Tuple[int, int] = None,
|
| 1279 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 1280 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1281 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 1282 |
+
aesthetic_score: float = 6.0,
|
| 1283 |
+
negative_aesthetic_score: float = 2.5,
|
| 1284 |
+
clip_skip: Optional[int] = None,
|
| 1285 |
+
normal_tile_overlap: int = 64,
|
| 1286 |
+
border_tile_overlap: int = 128,
|
| 1287 |
+
max_tile_size: int = 1024,
|
| 1288 |
+
tile_gaussian_sigma: float = 0.05,
|
| 1289 |
+
tile_weighting_method: str = "Cosine",
|
| 1290 |
+
**kwargs,
|
| 1291 |
+
):
|
| 1292 |
+
r"""
|
| 1293 |
+
Function invoked when calling the pipeline for generation.
|
| 1294 |
+
|
| 1295 |
+
Args:
|
| 1296 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1297 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 1298 |
+
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`, *optional*):
|
| 1299 |
+
The initial image to be used as the starting point for the image generation process. Can also accept
|
| 1300 |
+
image latents as `image`, if passing latents directly, they will not be encoded again.
|
| 1301 |
+
control_image (`PipelineImageInput`, *optional*):
|
| 1302 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance for Unet.
|
| 1303 |
+
If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
|
| 1304 |
+
be accepted as an image. The dimensions of the output image default to `image`'s dimensions. If height
|
| 1305 |
+
and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
|
| 1306 |
+
init, images must be passed as a list such that each element of the list can be correctly batched for
|
| 1307 |
+
input to a single ControlNet.
|
| 1308 |
+
height (`int`, *optional*):
|
| 1309 |
+
The height in pixels of the generated image. If not provided, defaults to the height of `control_image`.
|
| 1310 |
+
width (`int`, *optional*):
|
| 1311 |
+
The width in pixels of the generated image. If not provided, defaults to the width of `control_image`.
|
| 1312 |
+
strength (`float`, *optional*, defaults to 0.9999):
|
| 1313 |
+
Indicates the extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 1314 |
+
starting point, and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 1315 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum, and the denoising
|
| 1316 |
+
process runs for the full number of iterations specified in `num_inference_steps`.
|
| 1317 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1318 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1319 |
+
expense of slower inference.
|
| 1320 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 1321 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1322 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://huggingface.co/papers/2205.11487).
|
| 1323 |
+
Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages generating
|
| 1324 |
+
images closely linked to the text `prompt`, usually at the expense of lower image quality.
|
| 1325 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1326 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1327 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1328 |
+
less than `1`).
|
| 1329 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1330 |
+
The number of images to generate per prompt.
|
| 1331 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1332 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1333 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1334 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1335 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1336 |
+
to make generation deterministic.
|
| 1337 |
+
latents (`torch.Tensor`, *optional*):
|
| 1338 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1339 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1340 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 1341 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1342 |
+
The output format of the generated image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/):
|
| 1343 |
+
`PIL.Image.Image` or `np.array`.
|
| 1344 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1345 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1346 |
+
plain tuple.
|
| 1347 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1348 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1349 |
+
`self.processor` in
|
| 1350 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1351 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1352 |
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 1353 |
+
to the residual in the original UNet. If multiple ControlNets are specified in init, you can set the
|
| 1354 |
+
corresponding scale as a list.
|
| 1355 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 1356 |
+
In this mode, the ControlNet encoder will try to recognize the content of the input image even if
|
| 1357 |
+
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
|
| 1358 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 1359 |
+
The percentage of total steps at which the ControlNet starts applying.
|
| 1360 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1361 |
+
The percentage of total steps at which the ControlNet stops applying.
|
| 1362 |
+
control_mode (`int` or `List[int]`, *optional*):
|
| 1363 |
+
The mode of ControlNet guidance. Can be used to specify different behaviors for multiple ControlNets.
|
| 1364 |
+
original_size (`Tuple[int, int]`, *optional*):
|
| 1365 |
+
If `original_size` is not the same as `target_size`, the image will appear to be down- or upsampled.
|
| 1366 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning.
|
| 1367 |
+
crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)):
|
| 1368 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1369 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1370 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning.
|
| 1371 |
+
target_size (`Tuple[int, int]`, *optional*):
|
| 1372 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1373 |
+
not specified, it will default to `(height, width)`. Part of SDXL's micro-conditioning.
|
| 1374 |
+
negative_original_size (`Tuple[int, int]`, *optional*):
|
| 1375 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 1376 |
+
micro-conditioning.
|
| 1377 |
+
negative_crops_coords_top_left (`Tuple[int, int]`, *optional*, defaults to (0, 0)):
|
| 1378 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 1379 |
+
micro-conditioning.
|
| 1380 |
+
negative_target_size (`Tuple[int, int]`, *optional*):
|
| 1381 |
+
To negatively condition the generation process based on a target image resolution. It should be the same
|
| 1382 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning.
|
| 1383 |
+
aesthetic_score (`float`, *optional*, defaults to 6.0):
|
| 1384 |
+
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
|
| 1385 |
+
Part of SDXL's micro-conditioning.
|
| 1386 |
+
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
|
| 1387 |
+
Used to simulate an aesthetic score of the generated image by influencing the negative text condition.
|
| 1388 |
+
Part of SDXL's micro-conditioning.
|
| 1389 |
+
clip_skip (`int`, *optional*):
|
| 1390 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1391 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1392 |
+
normal_tile_overlap (`int`, *optional*, defaults to 64):
|
| 1393 |
+
Number of overlapping pixels between tiles in consecutive rows.
|
| 1394 |
+
border_tile_overlap (`int`, *optional*, defaults to 128):
|
| 1395 |
+
Number of overlapping pixels between tiles at the borders.
|
| 1396 |
+
max_tile_size (`int`, *optional*, defaults to 1024):
|
| 1397 |
+
Maximum size of a tile in pixels.
|
| 1398 |
+
tile_gaussian_sigma (`float`, *optional*, defaults to 0.3):
|
| 1399 |
+
Sigma parameter for Gaussian weighting of tiles.
|
| 1400 |
+
tile_weighting_method (`str`, *optional*, defaults to "Cosine"):
|
| 1401 |
+
Method for weighting tiles. Options: "Cosine" or "Gaussian".
|
| 1402 |
+
|
| 1403 |
+
Examples:
|
| 1404 |
+
|
| 1405 |
+
Returns:
|
| 1406 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1407 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
|
| 1408 |
+
containing the output images.
|
| 1409 |
+
"""
|
| 1410 |
+
|
| 1411 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 1412 |
+
|
| 1413 |
+
# align format for control guidance
|
| 1414 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 1415 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1416 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1417 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1418 |
+
|
| 1419 |
+
if not isinstance(control_image, list):
|
| 1420 |
+
control_image = [control_image]
|
| 1421 |
+
else:
|
| 1422 |
+
control_image = control_image.copy()
|
| 1423 |
+
|
| 1424 |
+
if control_mode is None or isinstance(control_mode, list) and len(control_mode) == 0:
|
| 1425 |
+
raise ValueError("The value for `control_mode` is expected!")
|
| 1426 |
+
|
| 1427 |
+
if not isinstance(control_mode, list):
|
| 1428 |
+
control_mode = [control_mode]
|
| 1429 |
+
|
| 1430 |
+
if len(control_image) != len(control_mode):
|
| 1431 |
+
raise ValueError("Expected len(control_image) == len(control_mode)")
|
| 1432 |
+
|
| 1433 |
+
num_control_type = controlnet.config.num_control_type
|
| 1434 |
+
|
| 1435 |
+
# 0. Set internal use parameters
|
| 1436 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 1437 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 1438 |
+
original_size = original_size or (height, width)
|
| 1439 |
+
target_size = target_size or (height, width)
|
| 1440 |
+
negative_original_size = negative_original_size or original_size
|
| 1441 |
+
negative_target_size = negative_target_size or target_size
|
| 1442 |
+
control_type = [0 for _ in range(num_control_type)]
|
| 1443 |
+
control_type = torch.Tensor(control_type)
|
| 1444 |
+
self._guidance_scale = guidance_scale
|
| 1445 |
+
self._clip_skip = clip_skip
|
| 1446 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1447 |
+
self._interrupt = False
|
| 1448 |
+
batch_size = 1
|
| 1449 |
+
device = self._execution_device
|
| 1450 |
+
global_pool_conditions = controlnet.config.global_pool_conditions
|
| 1451 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 1452 |
+
|
| 1453 |
+
# 1. Check inputs
|
| 1454 |
+
for _image, control_idx in zip(control_image, control_mode):
|
| 1455 |
+
control_type[control_idx] = 1
|
| 1456 |
+
self.check_inputs(
|
| 1457 |
+
prompt,
|
| 1458 |
+
height,
|
| 1459 |
+
width,
|
| 1460 |
+
_image,
|
| 1461 |
+
strength,
|
| 1462 |
+
num_inference_steps,
|
| 1463 |
+
normal_tile_overlap,
|
| 1464 |
+
border_tile_overlap,
|
| 1465 |
+
max_tile_size,
|
| 1466 |
+
tile_gaussian_sigma,
|
| 1467 |
+
tile_weighting_method,
|
| 1468 |
+
controlnet_conditioning_scale,
|
| 1469 |
+
control_guidance_start,
|
| 1470 |
+
control_guidance_end,
|
| 1471 |
+
)
|
| 1472 |
+
|
| 1473 |
+
# 2 Get tile width and tile height size
|
| 1474 |
+
tile_width, tile_height = _adaptive_tile_size((width, height), max_tile_size=max_tile_size)
|
| 1475 |
+
|
| 1476 |
+
# 2.1 Calculate the number of tiles needed
|
| 1477 |
+
grid_rows, grid_cols = self._get_num_tiles(
|
| 1478 |
+
height, width, tile_height, tile_width, normal_tile_overlap, border_tile_overlap
|
| 1479 |
+
)
|
| 1480 |
+
|
| 1481 |
+
# 2.2 Expand prompt to number of tiles
|
| 1482 |
+
if not isinstance(prompt, list):
|
| 1483 |
+
prompt = [[prompt] * grid_cols] * grid_rows
|
| 1484 |
+
|
| 1485 |
+
# 2.3 Update height and width tile size by tile size and tile overlap size
|
| 1486 |
+
width = (grid_cols - 1) * (tile_width - normal_tile_overlap) + min(
|
| 1487 |
+
tile_width, width - (grid_cols - 1) * (tile_width - normal_tile_overlap)
|
| 1488 |
+
)
|
| 1489 |
+
height = (grid_rows - 1) * (tile_height - normal_tile_overlap) + min(
|
| 1490 |
+
tile_height, height - (grid_rows - 1) * (tile_height - normal_tile_overlap)
|
| 1491 |
+
)
|
| 1492 |
+
|
| 1493 |
+
# 3. Encode input prompt
|
| 1494 |
+
text_encoder_lora_scale = (
|
| 1495 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1496 |
+
)
|
| 1497 |
+
text_embeddings = [
|
| 1498 |
+
[
|
| 1499 |
+
self.encode_prompt(
|
| 1500 |
+
prompt=col,
|
| 1501 |
+
device=device,
|
| 1502 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1503 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1504 |
+
negative_prompt=negative_prompt,
|
| 1505 |
+
prompt_embeds=None,
|
| 1506 |
+
negative_prompt_embeds=None,
|
| 1507 |
+
pooled_prompt_embeds=None,
|
| 1508 |
+
negative_pooled_prompt_embeds=None,
|
| 1509 |
+
lora_scale=text_encoder_lora_scale,
|
| 1510 |
+
clip_skip=self.clip_skip,
|
| 1511 |
+
)
|
| 1512 |
+
for col in row
|
| 1513 |
+
]
|
| 1514 |
+
for row in prompt
|
| 1515 |
+
]
|
| 1516 |
+
|
| 1517 |
+
# 4. Prepare latent image
|
| 1518 |
+
image_tensor = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 1519 |
+
|
| 1520 |
+
# 4.1 Prepare controlnet_conditioning_image
|
| 1521 |
+
control_image = self.prepare_control_image(
|
| 1522 |
+
image=image,
|
| 1523 |
+
width=width,
|
| 1524 |
+
height=height,
|
| 1525 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1526 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1527 |
+
device=device,
|
| 1528 |
+
dtype=controlnet.dtype,
|
| 1529 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1530 |
+
guess_mode=guess_mode,
|
| 1531 |
+
)
|
| 1532 |
+
control_type = (
|
| 1533 |
+
control_type.reshape(1, -1)
|
| 1534 |
+
.to(device, dtype=controlnet.dtype)
|
| 1535 |
+
.repeat(batch_size * num_images_per_prompt * 2, 1)
|
| 1536 |
+
)
|
| 1537 |
+
|
| 1538 |
+
# 5. Prepare timesteps
|
| 1539 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
| 1540 |
+
extra_set_kwargs = {}
|
| 1541 |
+
if accepts_offset:
|
| 1542 |
+
extra_set_kwargs["offset"] = 1
|
| 1543 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 1544 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength)
|
| 1545 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1546 |
+
self._num_timesteps = len(timesteps)
|
| 1547 |
+
|
| 1548 |
+
# 6. Prepare latent variables
|
| 1549 |
+
dtype = text_embeddings[0][0][0].dtype
|
| 1550 |
+
if latents is None:
|
| 1551 |
+
latents = self.prepare_latents(
|
| 1552 |
+
image_tensor,
|
| 1553 |
+
latent_timestep,
|
| 1554 |
+
batch_size,
|
| 1555 |
+
num_images_per_prompt,
|
| 1556 |
+
dtype,
|
| 1557 |
+
device,
|
| 1558 |
+
generator,
|
| 1559 |
+
True,
|
| 1560 |
+
)
|
| 1561 |
+
|
| 1562 |
+
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
| 1563 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
| 1564 |
+
latents = latents * self.scheduler.sigmas[0]
|
| 1565 |
+
|
| 1566 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1567 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1568 |
+
|
| 1569 |
+
# 8. Create tensor stating which controlnets to keep
|
| 1570 |
+
controlnet_keep = []
|
| 1571 |
+
for i in range(len(timesteps)):
|
| 1572 |
+
controlnet_keep.append(
|
| 1573 |
+
1.0
|
| 1574 |
+
- float(i / len(timesteps) < control_guidance_start or (i + 1) / len(timesteps) > control_guidance_end)
|
| 1575 |
+
)
|
| 1576 |
+
|
| 1577 |
+
# 8.1 Prepare added time ids & embeddings
|
| 1578 |
+
# text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 1579 |
+
embeddings_and_added_time = []
|
| 1580 |
+
crops_coords_top_left = negative_crops_coords_top_left = (tile_width, tile_height)
|
| 1581 |
+
for row in range(grid_rows):
|
| 1582 |
+
addition_embed_type_row = []
|
| 1583 |
+
for col in range(grid_cols):
|
| 1584 |
+
# extract generated values
|
| 1585 |
+
prompt_embeds = text_embeddings[row][col][0]
|
| 1586 |
+
negative_prompt_embeds = text_embeddings[row][col][1]
|
| 1587 |
+
pooled_prompt_embeds = text_embeddings[row][col][2]
|
| 1588 |
+
negative_pooled_prompt_embeds = text_embeddings[row][col][3]
|
| 1589 |
+
|
| 1590 |
+
if negative_original_size is None:
|
| 1591 |
+
negative_original_size = original_size
|
| 1592 |
+
if negative_target_size is None:
|
| 1593 |
+
negative_target_size = target_size
|
| 1594 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1595 |
+
|
| 1596 |
+
if self.text_encoder_2 is None:
|
| 1597 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1598 |
+
else:
|
| 1599 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1600 |
+
|
| 1601 |
+
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
| 1602 |
+
original_size,
|
| 1603 |
+
crops_coords_top_left,
|
| 1604 |
+
target_size,
|
| 1605 |
+
aesthetic_score,
|
| 1606 |
+
negative_aesthetic_score,
|
| 1607 |
+
negative_original_size,
|
| 1608 |
+
negative_crops_coords_top_left,
|
| 1609 |
+
negative_target_size,
|
| 1610 |
+
dtype=prompt_embeds.dtype,
|
| 1611 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1612 |
+
)
|
| 1613 |
+
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 1614 |
+
|
| 1615 |
+
if self.do_classifier_free_guidance:
|
| 1616 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1617 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1618 |
+
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
|
| 1619 |
+
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
|
| 1620 |
+
|
| 1621 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1622 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1623 |
+
add_time_ids = add_time_ids.to(device)
|
| 1624 |
+
addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids))
|
| 1625 |
+
|
| 1626 |
+
embeddings_and_added_time.append(addition_embed_type_row)
|
| 1627 |
+
|
| 1628 |
+
# 9. Prepare tiles weights and latent overlaps size to denoising process
|
| 1629 |
+
tile_weights, tile_row_overlaps, tile_col_overlaps = self.prepare_tiles(
|
| 1630 |
+
grid_rows,
|
| 1631 |
+
grid_cols,
|
| 1632 |
+
tile_weighting_method,
|
| 1633 |
+
tile_width,
|
| 1634 |
+
tile_height,
|
| 1635 |
+
normal_tile_overlap,
|
| 1636 |
+
border_tile_overlap,
|
| 1637 |
+
width,
|
| 1638 |
+
height,
|
| 1639 |
+
tile_gaussian_sigma,
|
| 1640 |
+
batch_size,
|
| 1641 |
+
device,
|
| 1642 |
+
dtype,
|
| 1643 |
+
)
|
| 1644 |
+
|
| 1645 |
+
# 10. Denoising loop
|
| 1646 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1647 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1648 |
+
for i, t in enumerate(timesteps):
|
| 1649 |
+
# Diffuse each tile
|
| 1650 |
+
noise_preds = []
|
| 1651 |
+
for row in range(grid_rows):
|
| 1652 |
+
noise_preds_row = []
|
| 1653 |
+
for col in range(grid_cols):
|
| 1654 |
+
if self.interrupt:
|
| 1655 |
+
continue
|
| 1656 |
+
tile_row_overlap = tile_row_overlaps[row, col]
|
| 1657 |
+
tile_col_overlap = tile_col_overlaps[row, col]
|
| 1658 |
+
|
| 1659 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 1660 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height
|
| 1661 |
+
)
|
| 1662 |
+
|
| 1663 |
+
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
| 1664 |
+
|
| 1665 |
+
# expand the latents if we are doing classifier free guidance
|
| 1666 |
+
latent_model_input = (
|
| 1667 |
+
torch.cat([tile_latents] * 2)
|
| 1668 |
+
if self.do_classifier_free_guidance
|
| 1669 |
+
else tile_latents # 1, 4, ...
|
| 1670 |
+
)
|
| 1671 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1672 |
+
|
| 1673 |
+
# predict the noise residual
|
| 1674 |
+
added_cond_kwargs = {
|
| 1675 |
+
"text_embeds": embeddings_and_added_time[row][col][1],
|
| 1676 |
+
"time_ids": embeddings_and_added_time[row][col][2],
|
| 1677 |
+
}
|
| 1678 |
+
|
| 1679 |
+
# controlnet(s) inference
|
| 1680 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1681 |
+
# Infer ControlNet only for the conditional batch.
|
| 1682 |
+
control_model_input = tile_latents
|
| 1683 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1684 |
+
controlnet_prompt_embeds = embeddings_and_added_time[row][col][0].chunk(2)[1]
|
| 1685 |
+
controlnet_added_cond_kwargs = {
|
| 1686 |
+
"text_embeds": embeddings_and_added_time[row][col][1].chunk(2)[1],
|
| 1687 |
+
"time_ids": embeddings_and_added_time[row][col][2].chunk(2)[1],
|
| 1688 |
+
}
|
| 1689 |
+
else:
|
| 1690 |
+
control_model_input = latent_model_input
|
| 1691 |
+
controlnet_prompt_embeds = embeddings_and_added_time[row][col][0]
|
| 1692 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1693 |
+
|
| 1694 |
+
if isinstance(controlnet_keep[i], list):
|
| 1695 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1696 |
+
else:
|
| 1697 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1698 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1699 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1700 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1701 |
+
|
| 1702 |
+
px_row_init_pixel, px_row_end_pixel, px_col_init_pixel, px_col_end_pixel = _tile2pixel_indices(
|
| 1703 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height
|
| 1704 |
+
)
|
| 1705 |
+
|
| 1706 |
+
tile_control_image = control_image[
|
| 1707 |
+
:, :, px_row_init_pixel:px_row_end_pixel, px_col_init_pixel:px_col_end_pixel
|
| 1708 |
+
]
|
| 1709 |
+
|
| 1710 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1711 |
+
control_model_input,
|
| 1712 |
+
t,
|
| 1713 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1714 |
+
controlnet_cond=[tile_control_image],
|
| 1715 |
+
control_type=control_type,
|
| 1716 |
+
control_type_idx=control_mode,
|
| 1717 |
+
conditioning_scale=cond_scale,
|
| 1718 |
+
guess_mode=guess_mode,
|
| 1719 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1720 |
+
return_dict=False,
|
| 1721 |
+
)
|
| 1722 |
+
|
| 1723 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1724 |
+
# Inferred ControlNet only for the conditional batch.
|
| 1725 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1726 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1727 |
+
down_block_res_samples = [
|
| 1728 |
+
torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples
|
| 1729 |
+
]
|
| 1730 |
+
mid_block_res_sample = torch.cat(
|
| 1731 |
+
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
|
| 1732 |
+
)
|
| 1733 |
+
|
| 1734 |
+
# predict the noise residual
|
| 1735 |
+
with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype != self.unet.dtype):
|
| 1736 |
+
noise_pred = self.unet(
|
| 1737 |
+
latent_model_input,
|
| 1738 |
+
t,
|
| 1739 |
+
encoder_hidden_states=embeddings_and_added_time[row][col][0],
|
| 1740 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1741 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1742 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1743 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1744 |
+
return_dict=False,
|
| 1745 |
+
)[0]
|
| 1746 |
+
|
| 1747 |
+
# perform guidance
|
| 1748 |
+
if self.do_classifier_free_guidance:
|
| 1749 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1750 |
+
noise_pred_tile = noise_pred_uncond + guidance_scale * (
|
| 1751 |
+
noise_pred_text - noise_pred_uncond
|
| 1752 |
+
)
|
| 1753 |
+
noise_preds_row.append(noise_pred_tile)
|
| 1754 |
+
noise_preds.append(noise_preds_row)
|
| 1755 |
+
|
| 1756 |
+
# Stitch noise predictions for all tiles
|
| 1757 |
+
noise_pred = torch.zeros(latents.shape, device=device)
|
| 1758 |
+
contributors = torch.zeros(latents.shape, device=device)
|
| 1759 |
+
|
| 1760 |
+
# Add each tile contribution to overall latents
|
| 1761 |
+
for row in range(grid_rows):
|
| 1762 |
+
for col in range(grid_cols):
|
| 1763 |
+
tile_row_overlap = tile_row_overlaps[row, col]
|
| 1764 |
+
tile_col_overlap = tile_col_overlaps[row, col]
|
| 1765 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
| 1766 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, width, height
|
| 1767 |
+
)
|
| 1768 |
+
tile_weights_resized = tile_weights[row, col]
|
| 1769 |
+
|
| 1770 |
+
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
| 1771 |
+
noise_preds[row][col] * tile_weights_resized
|
| 1772 |
+
)
|
| 1773 |
+
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights_resized
|
| 1774 |
+
|
| 1775 |
+
# Average overlapping areas with more than 1 contributor
|
| 1776 |
+
noise_pred /= contributors
|
| 1777 |
+
noise_pred = noise_pred.to(dtype)
|
| 1778 |
+
|
| 1779 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1780 |
+
latents_dtype = latents.dtype
|
| 1781 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1782 |
+
if latents.dtype != latents_dtype:
|
| 1783 |
+
if torch.backends.mps.is_available():
|
| 1784 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
| 1785 |
+
latents = latents.to(latents_dtype)
|
| 1786 |
+
|
| 1787 |
+
# update progress bar
|
| 1788 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1789 |
+
progress_bar.update()
|
| 1790 |
+
|
| 1791 |
+
if XLA_AVAILABLE:
|
| 1792 |
+
xm.mark_step()
|
| 1793 |
+
|
| 1794 |
+
# If we do sequential model offloading, let's offload unet and controlnet
|
| 1795 |
+
# manually for max memory savings
|
| 1796 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1797 |
+
self.unet.to("cpu")
|
| 1798 |
+
self.controlnet.to("cpu")
|
| 1799 |
+
torch.cuda.empty_cache()
|
| 1800 |
+
|
| 1801 |
+
if not output_type == "latent":
|
| 1802 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1803 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1804 |
+
|
| 1805 |
+
if needs_upcasting:
|
| 1806 |
+
self.upcast_vae()
|
| 1807 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1808 |
+
|
| 1809 |
+
# unscale/denormalize the latents
|
| 1810 |
+
# denormalize with the mean and std if available and not None
|
| 1811 |
+
has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
|
| 1812 |
+
has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
|
| 1813 |
+
if has_latents_mean and has_latents_std:
|
| 1814 |
+
latents_mean = (
|
| 1815 |
+
torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 1816 |
+
)
|
| 1817 |
+
latents_std = (
|
| 1818 |
+
torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
| 1819 |
+
)
|
| 1820 |
+
latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
|
| 1821 |
+
else:
|
| 1822 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1823 |
+
|
| 1824 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1825 |
+
|
| 1826 |
+
# cast back to fp16 if needed
|
| 1827 |
+
if needs_upcasting:
|
| 1828 |
+
self.vae.to(dtype=torch.float16)
|
| 1829 |
+
|
| 1830 |
+
# apply watermark if available
|
| 1831 |
+
if self.watermark is not None:
|
| 1832 |
+
image = self.watermark.apply_watermark(image)
|
| 1833 |
+
|
| 1834 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1835 |
+
else:
|
| 1836 |
+
image = latents
|
| 1837 |
+
|
| 1838 |
+
# Offload all models
|
| 1839 |
+
self.maybe_free_model_hooks()
|
| 1840 |
+
|
| 1841 |
+
result = StableDiffusionXLPipelineOutput(images=image)
|
| 1842 |
+
if not return_dict:
|
| 1843 |
+
return (image,)
|
| 1844 |
+
|
| 1845 |
+
return result
|
v0.36.0/multilingual_stable_diffusion.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
from typing import Callable, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
from transformers import (
|
| 6 |
+
CLIPImageProcessor,
|
| 7 |
+
CLIPTextModel,
|
| 8 |
+
CLIPTokenizer,
|
| 9 |
+
MBart50TokenizerFast,
|
| 10 |
+
MBartForConditionalGeneration,
|
| 11 |
+
pipeline,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
from diffusers.configuration_utils import FrozenDict
|
| 15 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 16 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 17 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 18 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 19 |
+
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
| 20 |
+
from diffusers.utils import deprecate, logging
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def detect_language(pipe, prompt, batch_size):
|
| 27 |
+
"""helper function to detect language(s) of prompt"""
|
| 28 |
+
|
| 29 |
+
if batch_size == 1:
|
| 30 |
+
preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
|
| 31 |
+
return preds[0]["label"]
|
| 32 |
+
else:
|
| 33 |
+
detected_languages = []
|
| 34 |
+
for p in prompt:
|
| 35 |
+
preds = pipe(p, top_k=1, truncation=True, max_length=128)
|
| 36 |
+
detected_languages.append(preds[0]["label"])
|
| 37 |
+
|
| 38 |
+
return detected_languages
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def translate_prompt(prompt, translation_tokenizer, translation_model, device):
|
| 42 |
+
"""helper function to translate prompt to English"""
|
| 43 |
+
|
| 44 |
+
encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
|
| 45 |
+
generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
|
| 46 |
+
en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
| 47 |
+
|
| 48 |
+
return en_trans[0]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
| 52 |
+
r"""
|
| 53 |
+
Pipeline for text-to-image generation using Stable Diffusion in different languages.
|
| 54 |
+
|
| 55 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 56 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
detection_pipeline ([`pipeline`]):
|
| 60 |
+
Transformers pipeline to detect prompt's language.
|
| 61 |
+
translation_model ([`MBartForConditionalGeneration`]):
|
| 62 |
+
Model to translate prompt to English, if necessary. Please refer to the
|
| 63 |
+
[model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
|
| 64 |
+
translation_tokenizer ([`MBart50TokenizerFast`]):
|
| 65 |
+
Tokenizer of the translation model.
|
| 66 |
+
vae ([`AutoencoderKL`]):
|
| 67 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 68 |
+
text_encoder ([`CLIPTextModel`]):
|
| 69 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 70 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 71 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 72 |
+
tokenizer (`CLIPTokenizer`):
|
| 73 |
+
Tokenizer of class
|
| 74 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 75 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 76 |
+
scheduler ([`SchedulerMixin`]):
|
| 77 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 78 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 82 |
+
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def __init__(
|
| 87 |
+
self,
|
| 88 |
+
detection_pipeline: pipeline,
|
| 89 |
+
translation_model: MBartForConditionalGeneration,
|
| 90 |
+
translation_tokenizer: MBart50TokenizerFast,
|
| 91 |
+
vae: AutoencoderKL,
|
| 92 |
+
text_encoder: CLIPTextModel,
|
| 93 |
+
tokenizer: CLIPTokenizer,
|
| 94 |
+
unet: UNet2DConditionModel,
|
| 95 |
+
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
| 96 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 97 |
+
feature_extractor: CLIPImageProcessor,
|
| 98 |
+
):
|
| 99 |
+
super().__init__()
|
| 100 |
+
|
| 101 |
+
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
|
| 102 |
+
deprecation_message = (
|
| 103 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 104 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 105 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 106 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 107 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 108 |
+
" file"
|
| 109 |
+
)
|
| 110 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 111 |
+
new_config = dict(scheduler.config)
|
| 112 |
+
new_config["steps_offset"] = 1
|
| 113 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 114 |
+
|
| 115 |
+
if safety_checker is None:
|
| 116 |
+
logger.warning(
|
| 117 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 118 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 119 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 120 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 121 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 122 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self.register_modules(
|
| 126 |
+
detection_pipeline=detection_pipeline,
|
| 127 |
+
translation_model=translation_model,
|
| 128 |
+
translation_tokenizer=translation_tokenizer,
|
| 129 |
+
vae=vae,
|
| 130 |
+
text_encoder=text_encoder,
|
| 131 |
+
tokenizer=tokenizer,
|
| 132 |
+
unet=unet,
|
| 133 |
+
scheduler=scheduler,
|
| 134 |
+
safety_checker=safety_checker,
|
| 135 |
+
feature_extractor=feature_extractor,
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
@torch.no_grad()
|
| 139 |
+
def __call__(
|
| 140 |
+
self,
|
| 141 |
+
prompt: Union[str, List[str]],
|
| 142 |
+
height: int = 512,
|
| 143 |
+
width: int = 512,
|
| 144 |
+
num_inference_steps: int = 50,
|
| 145 |
+
guidance_scale: float = 7.5,
|
| 146 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 147 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 148 |
+
eta: float = 0.0,
|
| 149 |
+
generator: Optional[torch.Generator] = None,
|
| 150 |
+
latents: Optional[torch.Tensor] = None,
|
| 151 |
+
output_type: Optional[str] = "pil",
|
| 152 |
+
return_dict: bool = True,
|
| 153 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 154 |
+
callback_steps: int = 1,
|
| 155 |
+
**kwargs,
|
| 156 |
+
):
|
| 157 |
+
r"""
|
| 158 |
+
Function invoked when calling the pipeline for generation.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
prompt (`str` or `List[str]`):
|
| 162 |
+
The prompt or prompts to guide the image generation. Can be in different languages.
|
| 163 |
+
height (`int`, *optional*, defaults to 512):
|
| 164 |
+
The height in pixels of the generated image.
|
| 165 |
+
width (`int`, *optional*, defaults to 512):
|
| 166 |
+
The width in pixels of the generated image.
|
| 167 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 168 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 169 |
+
expense of slower inference.
|
| 170 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 171 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 172 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 173 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 174 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 175 |
+
usually at the expense of lower image quality.
|
| 176 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 177 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 178 |
+
if `guidance_scale` is less than `1`).
|
| 179 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 180 |
+
The number of images to generate per prompt.
|
| 181 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 182 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 183 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 184 |
+
generator (`torch.Generator`, *optional*):
|
| 185 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 186 |
+
deterministic.
|
| 187 |
+
latents (`torch.Tensor`, *optional*):
|
| 188 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 189 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 190 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 191 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 192 |
+
The output format of the generate image. Choose between
|
| 193 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 194 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 195 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 196 |
+
plain tuple.
|
| 197 |
+
callback (`Callable`, *optional*):
|
| 198 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 199 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 200 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 201 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 202 |
+
called at every step.
|
| 203 |
+
|
| 204 |
+
Returns:
|
| 205 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 206 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
| 207 |
+
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
| 208 |
+
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
| 209 |
+
(nsfw) content, according to the `safety_checker`.
|
| 210 |
+
"""
|
| 211 |
+
if isinstance(prompt, str):
|
| 212 |
+
batch_size = 1
|
| 213 |
+
elif isinstance(prompt, list):
|
| 214 |
+
batch_size = len(prompt)
|
| 215 |
+
else:
|
| 216 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 217 |
+
|
| 218 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 219 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 220 |
+
|
| 221 |
+
if (callback_steps is None) or (
|
| 222 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 223 |
+
):
|
| 224 |
+
raise ValueError(
|
| 225 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 226 |
+
f" {type(callback_steps)}."
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# detect language and translate if necessary
|
| 230 |
+
prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
|
| 231 |
+
if batch_size == 1 and prompt_language != "en":
|
| 232 |
+
prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
|
| 233 |
+
|
| 234 |
+
if isinstance(prompt, list):
|
| 235 |
+
for index in range(batch_size):
|
| 236 |
+
if prompt_language[index] != "en":
|
| 237 |
+
p = translate_prompt(
|
| 238 |
+
prompt[index], self.translation_tokenizer, self.translation_model, self.device
|
| 239 |
+
)
|
| 240 |
+
prompt[index] = p
|
| 241 |
+
|
| 242 |
+
# get prompt text embeddings
|
| 243 |
+
text_inputs = self.tokenizer(
|
| 244 |
+
prompt,
|
| 245 |
+
padding="max_length",
|
| 246 |
+
max_length=self.tokenizer.model_max_length,
|
| 247 |
+
return_tensors="pt",
|
| 248 |
+
)
|
| 249 |
+
text_input_ids = text_inputs.input_ids
|
| 250 |
+
|
| 251 |
+
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
|
| 252 |
+
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
|
| 253 |
+
logger.warning(
|
| 254 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 255 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 256 |
+
)
|
| 257 |
+
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
| 258 |
+
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
|
| 259 |
+
|
| 260 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 261 |
+
bs_embed, seq_len, _ = text_embeddings.shape
|
| 262 |
+
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 263 |
+
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 264 |
+
|
| 265 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 266 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 267 |
+
# corresponds to doing no classifier free guidance.
|
| 268 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 269 |
+
# get unconditional embeddings for classifier free guidance
|
| 270 |
+
if do_classifier_free_guidance:
|
| 271 |
+
uncond_tokens: List[str]
|
| 272 |
+
if negative_prompt is None:
|
| 273 |
+
uncond_tokens = [""] * batch_size
|
| 274 |
+
elif type(prompt) is not type(negative_prompt):
|
| 275 |
+
raise TypeError(
|
| 276 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 277 |
+
f" {type(prompt)}."
|
| 278 |
+
)
|
| 279 |
+
elif isinstance(negative_prompt, str):
|
| 280 |
+
# detect language and translate it if necessary
|
| 281 |
+
negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
|
| 282 |
+
if negative_prompt_language != "en":
|
| 283 |
+
negative_prompt = translate_prompt(
|
| 284 |
+
negative_prompt, self.translation_tokenizer, self.translation_model, self.device
|
| 285 |
+
)
|
| 286 |
+
if isinstance(negative_prompt, str):
|
| 287 |
+
uncond_tokens = [negative_prompt]
|
| 288 |
+
elif batch_size != len(negative_prompt):
|
| 289 |
+
raise ValueError(
|
| 290 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 291 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 292 |
+
" the batch size of `prompt`."
|
| 293 |
+
)
|
| 294 |
+
else:
|
| 295 |
+
# detect language and translate it if necessary
|
| 296 |
+
if isinstance(negative_prompt, list):
|
| 297 |
+
negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
|
| 298 |
+
for index in range(batch_size):
|
| 299 |
+
if negative_prompt_languages[index] != "en":
|
| 300 |
+
p = translate_prompt(
|
| 301 |
+
negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
|
| 302 |
+
)
|
| 303 |
+
negative_prompt[index] = p
|
| 304 |
+
uncond_tokens = negative_prompt
|
| 305 |
+
|
| 306 |
+
max_length = text_input_ids.shape[-1]
|
| 307 |
+
uncond_input = self.tokenizer(
|
| 308 |
+
uncond_tokens,
|
| 309 |
+
padding="max_length",
|
| 310 |
+
max_length=max_length,
|
| 311 |
+
truncation=True,
|
| 312 |
+
return_tensors="pt",
|
| 313 |
+
)
|
| 314 |
+
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
|
| 315 |
+
|
| 316 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 317 |
+
seq_len = uncond_embeddings.shape[1]
|
| 318 |
+
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
| 319 |
+
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 320 |
+
|
| 321 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 322 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 323 |
+
# to avoid doing two forward passes
|
| 324 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
| 325 |
+
|
| 326 |
+
# get the initial random noise unless the user supplied it
|
| 327 |
+
|
| 328 |
+
# Unlike in other pipelines, latents need to be generated in the target device
|
| 329 |
+
# for 1-to-1 results reproducibility with the CompVis implementation.
|
| 330 |
+
# However this currently doesn't work in `mps`.
|
| 331 |
+
latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
|
| 332 |
+
latents_dtype = text_embeddings.dtype
|
| 333 |
+
if latents is None:
|
| 334 |
+
if self.device.type == "mps":
|
| 335 |
+
# randn does not work reproducibly on mps
|
| 336 |
+
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
|
| 337 |
+
self.device
|
| 338 |
+
)
|
| 339 |
+
else:
|
| 340 |
+
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
|
| 341 |
+
else:
|
| 342 |
+
if latents.shape != latents_shape:
|
| 343 |
+
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
| 344 |
+
latents = latents.to(self.device)
|
| 345 |
+
|
| 346 |
+
# set timesteps
|
| 347 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 348 |
+
|
| 349 |
+
# Some schedulers like PNDM have timesteps as arrays
|
| 350 |
+
# It's more optimized to move all timesteps to correct device beforehand
|
| 351 |
+
timesteps_tensor = self.scheduler.timesteps.to(self.device)
|
| 352 |
+
|
| 353 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 354 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 355 |
+
|
| 356 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 357 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 358 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 359 |
+
# and should be between [0, 1]
|
| 360 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 361 |
+
extra_step_kwargs = {}
|
| 362 |
+
if accepts_eta:
|
| 363 |
+
extra_step_kwargs["eta"] = eta
|
| 364 |
+
|
| 365 |
+
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
|
| 366 |
+
# expand the latents if we are doing classifier free guidance
|
| 367 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 368 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 369 |
+
|
| 370 |
+
# predict the noise residual
|
| 371 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
| 372 |
+
|
| 373 |
+
# perform guidance
|
| 374 |
+
if do_classifier_free_guidance:
|
| 375 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 376 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 377 |
+
|
| 378 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 379 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 380 |
+
|
| 381 |
+
# call the callback, if provided
|
| 382 |
+
if callback is not None and i % callback_steps == 0:
|
| 383 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 384 |
+
callback(step_idx, t, latents)
|
| 385 |
+
|
| 386 |
+
latents = 1 / 0.18215 * latents
|
| 387 |
+
image = self.vae.decode(latents).sample
|
| 388 |
+
|
| 389 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 390 |
+
|
| 391 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 392 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 393 |
+
|
| 394 |
+
if self.safety_checker is not None:
|
| 395 |
+
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
| 396 |
+
self.device
|
| 397 |
+
)
|
| 398 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 399 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
|
| 400 |
+
)
|
| 401 |
+
else:
|
| 402 |
+
has_nsfw_concept = None
|
| 403 |
+
|
| 404 |
+
if output_type == "pil":
|
| 405 |
+
image = self.numpy_to_pil(image)
|
| 406 |
+
|
| 407 |
+
if not return_dict:
|
| 408 |
+
return (image, has_nsfw_concept)
|
| 409 |
+
|
| 410 |
+
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
v0.36.0/one_step_unet.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from diffusers import DiffusionPipeline
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
|
| 8 |
+
def __init__(self, unet, scheduler):
|
| 9 |
+
super().__init__()
|
| 10 |
+
|
| 11 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 12 |
+
|
| 13 |
+
def __call__(self):
|
| 14 |
+
image = torch.randn(
|
| 15 |
+
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
| 16 |
+
)
|
| 17 |
+
timestep = 1
|
| 18 |
+
|
| 19 |
+
model_output = self.unet(image, timestep).sample
|
| 20 |
+
scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
|
| 21 |
+
|
| 22 |
+
result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
|
| 23 |
+
|
| 24 |
+
return result
|
v0.36.0/pipeline_animatediff_controlnet.py
ADDED
|
@@ -0,0 +1,1129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from PIL import Image
|
| 22 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 23 |
+
|
| 24 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 25 |
+
from diffusers.loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 26 |
+
from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
|
| 27 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 28 |
+
from diffusers.models.unets.unet_motion_model import MotionAdapter
|
| 29 |
+
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
|
| 30 |
+
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
|
| 31 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 32 |
+
from diffusers.schedulers import (
|
| 33 |
+
DDIMScheduler,
|
| 34 |
+
DPMSolverMultistepScheduler,
|
| 35 |
+
EulerAncestralDiscreteScheduler,
|
| 36 |
+
EulerDiscreteScheduler,
|
| 37 |
+
LMSDiscreteScheduler,
|
| 38 |
+
PNDMScheduler,
|
| 39 |
+
)
|
| 40 |
+
from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
|
| 41 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 45 |
+
|
| 46 |
+
EXAMPLE_DOC_STRING = """
|
| 47 |
+
Examples:
|
| 48 |
+
```py
|
| 49 |
+
>>> import torch
|
| 50 |
+
>>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
|
| 51 |
+
>>> from diffusers.pipelines import DiffusionPipeline
|
| 52 |
+
>>> from diffusers.schedulers import DPMSolverMultistepScheduler
|
| 53 |
+
>>> from PIL import Image
|
| 54 |
+
|
| 55 |
+
>>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
|
| 56 |
+
>>> adapter = MotionAdapter.from_pretrained(motion_id)
|
| 57 |
+
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
|
| 58 |
+
>>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
|
| 59 |
+
|
| 60 |
+
>>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 61 |
+
>>> pipe = DiffusionPipeline.from_pretrained(
|
| 62 |
+
... model_id,
|
| 63 |
+
... motion_adapter=adapter,
|
| 64 |
+
... controlnet=controlnet,
|
| 65 |
+
... vae=vae,
|
| 66 |
+
... custom_pipeline="pipeline_animatediff_controlnet",
|
| 67 |
+
... ).to(device="cuda", dtype=torch.float16)
|
| 68 |
+
>>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
|
| 69 |
+
... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
|
| 70 |
+
... )
|
| 71 |
+
>>> pipe.enable_vae_slicing()
|
| 72 |
+
|
| 73 |
+
>>> conditioning_frames = []
|
| 74 |
+
>>> for i in range(1, 16 + 1):
|
| 75 |
+
... conditioning_frames.append(Image.open(f"frame_{i}.png"))
|
| 76 |
+
|
| 77 |
+
>>> prompt = "astronaut in space, dancing"
|
| 78 |
+
>>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
|
| 79 |
+
>>> result = pipe(
|
| 80 |
+
... prompt=prompt,
|
| 81 |
+
... negative_prompt=negative_prompt,
|
| 82 |
+
... width=512,
|
| 83 |
+
... height=768,
|
| 84 |
+
... conditioning_frames=conditioning_frames,
|
| 85 |
+
... num_inference_steps=12,
|
| 86 |
+
... )
|
| 87 |
+
|
| 88 |
+
>>> from diffusers.utils import export_to_gif
|
| 89 |
+
>>> export_to_gif(result.frames[0], "result.gif")
|
| 90 |
+
```
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
|
| 95 |
+
def tensor2vid(video: torch.Tensor, processor, output_type="np"):
|
| 96 |
+
batch_size, channels, num_frames, height, width = video.shape
|
| 97 |
+
outputs = []
|
| 98 |
+
for batch_idx in range(batch_size):
|
| 99 |
+
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
| 100 |
+
batch_output = processor.postprocess(batch_vid, output_type)
|
| 101 |
+
|
| 102 |
+
outputs.append(batch_output)
|
| 103 |
+
|
| 104 |
+
if output_type == "np":
|
| 105 |
+
outputs = np.stack(outputs)
|
| 106 |
+
|
| 107 |
+
elif output_type == "pt":
|
| 108 |
+
outputs = torch.stack(outputs)
|
| 109 |
+
|
| 110 |
+
elif not output_type == "pil":
|
| 111 |
+
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
| 112 |
+
|
| 113 |
+
return outputs
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class AnimateDiffControlNetPipeline(
|
| 117 |
+
DiffusionPipeline,
|
| 118 |
+
StableDiffusionMixin,
|
| 119 |
+
TextualInversionLoaderMixin,
|
| 120 |
+
IPAdapterMixin,
|
| 121 |
+
StableDiffusionLoraLoaderMixin,
|
| 122 |
+
):
|
| 123 |
+
r"""
|
| 124 |
+
Pipeline for text-to-video generation.
|
| 125 |
+
|
| 126 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 127 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 128 |
+
|
| 129 |
+
The pipeline also inherits the following loading methods:
|
| 130 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 131 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 132 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 133 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
vae ([`AutoencoderKL`]):
|
| 137 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 138 |
+
text_encoder ([`CLIPTextModel`]):
|
| 139 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 140 |
+
tokenizer (`CLIPTokenizer`):
|
| 141 |
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
| 142 |
+
unet ([`UNet2DConditionModel`]):
|
| 143 |
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
| 144 |
+
motion_adapter ([`MotionAdapter`]):
|
| 145 |
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
| 146 |
+
scheduler ([`SchedulerMixin`]):
|
| 147 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 148 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 152 |
+
_optional_components = ["feature_extractor", "image_encoder"]
|
| 153 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 154 |
+
|
| 155 |
+
def __init__(
|
| 156 |
+
self,
|
| 157 |
+
vae: AutoencoderKL,
|
| 158 |
+
text_encoder: CLIPTextModel,
|
| 159 |
+
tokenizer: CLIPTokenizer,
|
| 160 |
+
unet: UNet2DConditionModel,
|
| 161 |
+
motion_adapter: MotionAdapter,
|
| 162 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 163 |
+
scheduler: Union[
|
| 164 |
+
DDIMScheduler,
|
| 165 |
+
PNDMScheduler,
|
| 166 |
+
LMSDiscreteScheduler,
|
| 167 |
+
EulerDiscreteScheduler,
|
| 168 |
+
EulerAncestralDiscreteScheduler,
|
| 169 |
+
DPMSolverMultistepScheduler,
|
| 170 |
+
],
|
| 171 |
+
feature_extractor: Optional[CLIPImageProcessor] = None,
|
| 172 |
+
image_encoder: Optional[CLIPVisionModelWithProjection] = None,
|
| 173 |
+
):
|
| 174 |
+
super().__init__()
|
| 175 |
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
| 176 |
+
|
| 177 |
+
if isinstance(controlnet, (list, tuple)):
|
| 178 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 179 |
+
|
| 180 |
+
self.register_modules(
|
| 181 |
+
vae=vae,
|
| 182 |
+
text_encoder=text_encoder,
|
| 183 |
+
tokenizer=tokenizer,
|
| 184 |
+
unet=unet,
|
| 185 |
+
motion_adapter=motion_adapter,
|
| 186 |
+
controlnet=controlnet,
|
| 187 |
+
scheduler=scheduler,
|
| 188 |
+
feature_extractor=feature_extractor,
|
| 189 |
+
image_encoder=image_encoder,
|
| 190 |
+
)
|
| 191 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 192 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 193 |
+
self.control_image_processor = VaeImageProcessor(
|
| 194 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
| 198 |
+
def encode_prompt(
|
| 199 |
+
self,
|
| 200 |
+
prompt,
|
| 201 |
+
device,
|
| 202 |
+
num_images_per_prompt,
|
| 203 |
+
do_classifier_free_guidance,
|
| 204 |
+
negative_prompt=None,
|
| 205 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 206 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 207 |
+
lora_scale: Optional[float] = None,
|
| 208 |
+
clip_skip: Optional[int] = None,
|
| 209 |
+
):
|
| 210 |
+
r"""
|
| 211 |
+
Encodes the prompt into text encoder hidden states.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 215 |
+
prompt to be encoded
|
| 216 |
+
device: (`torch.device`):
|
| 217 |
+
torch device
|
| 218 |
+
num_images_per_prompt (`int`):
|
| 219 |
+
number of images that should be generated per prompt
|
| 220 |
+
do_classifier_free_guidance (`bool`):
|
| 221 |
+
whether to use classifier free guidance or not
|
| 222 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 223 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 224 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 225 |
+
less than `1`).
|
| 226 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 227 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 228 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 229 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 230 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 231 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 232 |
+
argument.
|
| 233 |
+
lora_scale (`float`, *optional*):
|
| 234 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 235 |
+
clip_skip (`int`, *optional*):
|
| 236 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 237 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 238 |
+
"""
|
| 239 |
+
# set lora scale so that monkey patched LoRA
|
| 240 |
+
# function of text encoder can correctly access it
|
| 241 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 242 |
+
self._lora_scale = lora_scale
|
| 243 |
+
|
| 244 |
+
# dynamically adjust the LoRA scale
|
| 245 |
+
if not USE_PEFT_BACKEND:
|
| 246 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 247 |
+
else:
|
| 248 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 249 |
+
|
| 250 |
+
if prompt is not None and isinstance(prompt, str):
|
| 251 |
+
batch_size = 1
|
| 252 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 253 |
+
batch_size = len(prompt)
|
| 254 |
+
else:
|
| 255 |
+
batch_size = prompt_embeds.shape[0]
|
| 256 |
+
|
| 257 |
+
if prompt_embeds is None:
|
| 258 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 259 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 260 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 261 |
+
|
| 262 |
+
text_inputs = self.tokenizer(
|
| 263 |
+
prompt,
|
| 264 |
+
padding="max_length",
|
| 265 |
+
max_length=self.tokenizer.model_max_length,
|
| 266 |
+
truncation=True,
|
| 267 |
+
return_tensors="pt",
|
| 268 |
+
)
|
| 269 |
+
text_input_ids = text_inputs.input_ids
|
| 270 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 271 |
+
|
| 272 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 273 |
+
text_input_ids, untruncated_ids
|
| 274 |
+
):
|
| 275 |
+
removed_text = self.tokenizer.batch_decode(
|
| 276 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 277 |
+
)
|
| 278 |
+
logger.warning(
|
| 279 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 280 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 284 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 285 |
+
else:
|
| 286 |
+
attention_mask = None
|
| 287 |
+
|
| 288 |
+
if clip_skip is None:
|
| 289 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 290 |
+
prompt_embeds = prompt_embeds[0]
|
| 291 |
+
else:
|
| 292 |
+
prompt_embeds = self.text_encoder(
|
| 293 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 294 |
+
)
|
| 295 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 296 |
+
# all the hidden states from the encoder layers. Then index into
|
| 297 |
+
# the tuple to access the hidden states from the desired layer.
|
| 298 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 299 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 300 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 301 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 302 |
+
# layer.
|
| 303 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 304 |
+
|
| 305 |
+
if self.text_encoder is not None:
|
| 306 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 307 |
+
elif self.unet is not None:
|
| 308 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 309 |
+
else:
|
| 310 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 311 |
+
|
| 312 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 313 |
+
|
| 314 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 315 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 316 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 317 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 318 |
+
|
| 319 |
+
# get unconditional embeddings for classifier free guidance
|
| 320 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 321 |
+
uncond_tokens: List[str]
|
| 322 |
+
if negative_prompt is None:
|
| 323 |
+
uncond_tokens = [""] * batch_size
|
| 324 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 325 |
+
raise TypeError(
|
| 326 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 327 |
+
f" {type(prompt)}."
|
| 328 |
+
)
|
| 329 |
+
elif isinstance(negative_prompt, str):
|
| 330 |
+
uncond_tokens = [negative_prompt]
|
| 331 |
+
elif batch_size != len(negative_prompt):
|
| 332 |
+
raise ValueError(
|
| 333 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 334 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 335 |
+
" the batch size of `prompt`."
|
| 336 |
+
)
|
| 337 |
+
else:
|
| 338 |
+
uncond_tokens = negative_prompt
|
| 339 |
+
|
| 340 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 341 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 342 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 343 |
+
|
| 344 |
+
max_length = prompt_embeds.shape[1]
|
| 345 |
+
uncond_input = self.tokenizer(
|
| 346 |
+
uncond_tokens,
|
| 347 |
+
padding="max_length",
|
| 348 |
+
max_length=max_length,
|
| 349 |
+
truncation=True,
|
| 350 |
+
return_tensors="pt",
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 354 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 355 |
+
else:
|
| 356 |
+
attention_mask = None
|
| 357 |
+
|
| 358 |
+
negative_prompt_embeds = self.text_encoder(
|
| 359 |
+
uncond_input.input_ids.to(device),
|
| 360 |
+
attention_mask=attention_mask,
|
| 361 |
+
)
|
| 362 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 363 |
+
|
| 364 |
+
if do_classifier_free_guidance:
|
| 365 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 366 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 367 |
+
|
| 368 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 369 |
+
|
| 370 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 371 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 372 |
+
|
| 373 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 374 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 375 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 376 |
+
|
| 377 |
+
return prompt_embeds, negative_prompt_embeds
|
| 378 |
+
|
| 379 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 380 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 381 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 382 |
+
|
| 383 |
+
if not isinstance(image, torch.Tensor):
|
| 384 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 385 |
+
|
| 386 |
+
image = image.to(device=device, dtype=dtype)
|
| 387 |
+
if output_hidden_states:
|
| 388 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 389 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 390 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 391 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 392 |
+
).hidden_states[-2]
|
| 393 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 394 |
+
num_images_per_prompt, dim=0
|
| 395 |
+
)
|
| 396 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 397 |
+
else:
|
| 398 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 399 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 400 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 401 |
+
|
| 402 |
+
return image_embeds, uncond_image_embeds
|
| 403 |
+
|
| 404 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 405 |
+
def prepare_ip_adapter_image_embeds(
|
| 406 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
|
| 407 |
+
):
|
| 408 |
+
if ip_adapter_image_embeds is None:
|
| 409 |
+
if not isinstance(ip_adapter_image, list):
|
| 410 |
+
ip_adapter_image = [ip_adapter_image]
|
| 411 |
+
|
| 412 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 413 |
+
raise ValueError(
|
| 414 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
image_embeds = []
|
| 418 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 419 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 420 |
+
):
|
| 421 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 422 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 423 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 424 |
+
)
|
| 425 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 426 |
+
single_negative_image_embeds = torch.stack(
|
| 427 |
+
[single_negative_image_embeds] * num_images_per_prompt, dim=0
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
if self.do_classifier_free_guidance:
|
| 431 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 432 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 433 |
+
|
| 434 |
+
image_embeds.append(single_image_embeds)
|
| 435 |
+
else:
|
| 436 |
+
image_embeds = ip_adapter_image_embeds
|
| 437 |
+
return image_embeds
|
| 438 |
+
|
| 439 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
|
| 440 |
+
def decode_latents(self, latents):
|
| 441 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 442 |
+
|
| 443 |
+
batch_size, channels, num_frames, height, width = latents.shape
|
| 444 |
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 445 |
+
|
| 446 |
+
image = self.vae.decode(latents).sample
|
| 447 |
+
video = (
|
| 448 |
+
image[None, :]
|
| 449 |
+
.reshape(
|
| 450 |
+
(
|
| 451 |
+
batch_size,
|
| 452 |
+
num_frames,
|
| 453 |
+
-1,
|
| 454 |
+
)
|
| 455 |
+
+ image.shape[2:]
|
| 456 |
+
)
|
| 457 |
+
.permute(0, 2, 1, 3, 4)
|
| 458 |
+
)
|
| 459 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 460 |
+
video = video.float()
|
| 461 |
+
return video
|
| 462 |
+
|
| 463 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 464 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 465 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 466 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 467 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 468 |
+
# and should be between [0, 1]
|
| 469 |
+
|
| 470 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 471 |
+
extra_step_kwargs = {}
|
| 472 |
+
if accepts_eta:
|
| 473 |
+
extra_step_kwargs["eta"] = eta
|
| 474 |
+
|
| 475 |
+
# check if the scheduler accepts generator
|
| 476 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 477 |
+
if accepts_generator:
|
| 478 |
+
extra_step_kwargs["generator"] = generator
|
| 479 |
+
return extra_step_kwargs
|
| 480 |
+
|
| 481 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
| 482 |
+
def check_inputs(
|
| 483 |
+
self,
|
| 484 |
+
prompt,
|
| 485 |
+
height,
|
| 486 |
+
width,
|
| 487 |
+
num_frames,
|
| 488 |
+
callback_steps,
|
| 489 |
+
negative_prompt=None,
|
| 490 |
+
prompt_embeds=None,
|
| 491 |
+
negative_prompt_embeds=None,
|
| 492 |
+
callback_on_step_end_tensor_inputs=None,
|
| 493 |
+
image=None,
|
| 494 |
+
controlnet_conditioning_scale=1.0,
|
| 495 |
+
control_guidance_start=0.0,
|
| 496 |
+
control_guidance_end=1.0,
|
| 497 |
+
):
|
| 498 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 499 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 500 |
+
|
| 501 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 502 |
+
raise ValueError(
|
| 503 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 504 |
+
f" {type(callback_steps)}."
|
| 505 |
+
)
|
| 506 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 507 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 508 |
+
):
|
| 509 |
+
raise ValueError(
|
| 510 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
if prompt is not None and prompt_embeds is not None:
|
| 514 |
+
raise ValueError(
|
| 515 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 516 |
+
" only forward one of the two."
|
| 517 |
+
)
|
| 518 |
+
elif prompt is None and prompt_embeds is None:
|
| 519 |
+
raise ValueError(
|
| 520 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 521 |
+
)
|
| 522 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 523 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 524 |
+
|
| 525 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 526 |
+
raise ValueError(
|
| 527 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 528 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 532 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 533 |
+
raise ValueError(
|
| 534 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 535 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 536 |
+
f" {negative_prompt_embeds.shape}."
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 540 |
+
# conditionings.
|
| 541 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 542 |
+
if isinstance(prompt, list):
|
| 543 |
+
logger.warning(
|
| 544 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 545 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
# Check `image`
|
| 549 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 550 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 551 |
+
)
|
| 552 |
+
if (
|
| 553 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 554 |
+
or is_compiled
|
| 555 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 556 |
+
):
|
| 557 |
+
if not isinstance(image, list):
|
| 558 |
+
raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
|
| 559 |
+
if len(image) != num_frames:
|
| 560 |
+
raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
|
| 561 |
+
elif (
|
| 562 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 563 |
+
or is_compiled
|
| 564 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 565 |
+
):
|
| 566 |
+
if not isinstance(image, list) or not isinstance(image[0], list):
|
| 567 |
+
raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
|
| 568 |
+
if len(image[0]) != num_frames:
|
| 569 |
+
raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
|
| 570 |
+
if any(len(img) != len(image[0]) for img in image):
|
| 571 |
+
raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
|
| 572 |
+
else:
|
| 573 |
+
assert False
|
| 574 |
+
|
| 575 |
+
# Check `controlnet_conditioning_scale`
|
| 576 |
+
if (
|
| 577 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 578 |
+
or is_compiled
|
| 579 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 580 |
+
):
|
| 581 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 582 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 583 |
+
elif (
|
| 584 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 585 |
+
or is_compiled
|
| 586 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 587 |
+
):
|
| 588 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 589 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 590 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 591 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 592 |
+
self.controlnet.nets
|
| 593 |
+
):
|
| 594 |
+
raise ValueError(
|
| 595 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 596 |
+
" the same length as the number of controlnets"
|
| 597 |
+
)
|
| 598 |
+
else:
|
| 599 |
+
assert False
|
| 600 |
+
|
| 601 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 602 |
+
control_guidance_start = [control_guidance_start]
|
| 603 |
+
|
| 604 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 605 |
+
control_guidance_end = [control_guidance_end]
|
| 606 |
+
|
| 607 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 608 |
+
raise ValueError(
|
| 609 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 613 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 614 |
+
raise ValueError(
|
| 615 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 619 |
+
if start >= end:
|
| 620 |
+
raise ValueError(
|
| 621 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 622 |
+
)
|
| 623 |
+
if start < 0.0:
|
| 624 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 625 |
+
if end > 1.0:
|
| 626 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 627 |
+
|
| 628 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
|
| 629 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 630 |
+
image_is_pil = isinstance(image, Image.Image)
|
| 631 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 632 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 633 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
|
| 634 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 635 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 636 |
+
|
| 637 |
+
if (
|
| 638 |
+
not image_is_pil
|
| 639 |
+
and not image_is_tensor
|
| 640 |
+
and not image_is_np
|
| 641 |
+
and not image_is_pil_list
|
| 642 |
+
and not image_is_tensor_list
|
| 643 |
+
and not image_is_np_list
|
| 644 |
+
):
|
| 645 |
+
raise TypeError(
|
| 646 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
if image_is_pil:
|
| 650 |
+
image_batch_size = 1
|
| 651 |
+
else:
|
| 652 |
+
image_batch_size = len(image)
|
| 653 |
+
|
| 654 |
+
if prompt is not None and isinstance(prompt, str):
|
| 655 |
+
prompt_batch_size = 1
|
| 656 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 657 |
+
prompt_batch_size = len(prompt)
|
| 658 |
+
elif prompt_embeds is not None:
|
| 659 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 660 |
+
|
| 661 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 662 |
+
raise ValueError(
|
| 663 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 664 |
+
)
|
| 665 |
+
|
| 666 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
|
| 667 |
+
def prepare_latents(
|
| 668 |
+
self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
|
| 669 |
+
):
|
| 670 |
+
shape = (
|
| 671 |
+
batch_size,
|
| 672 |
+
num_channels_latents,
|
| 673 |
+
num_frames,
|
| 674 |
+
height // self.vae_scale_factor,
|
| 675 |
+
width // self.vae_scale_factor,
|
| 676 |
+
)
|
| 677 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 678 |
+
raise ValueError(
|
| 679 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 680 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 681 |
+
)
|
| 682 |
+
|
| 683 |
+
if latents is None:
|
| 684 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 685 |
+
else:
|
| 686 |
+
latents = latents.to(device)
|
| 687 |
+
|
| 688 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 689 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 690 |
+
return latents
|
| 691 |
+
|
| 692 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
|
| 693 |
+
def prepare_image(
|
| 694 |
+
self,
|
| 695 |
+
image,
|
| 696 |
+
width,
|
| 697 |
+
height,
|
| 698 |
+
batch_size,
|
| 699 |
+
num_images_per_prompt,
|
| 700 |
+
device,
|
| 701 |
+
dtype,
|
| 702 |
+
do_classifier_free_guidance=False,
|
| 703 |
+
guess_mode=False,
|
| 704 |
+
):
|
| 705 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 706 |
+
image_batch_size = image.shape[0]
|
| 707 |
+
|
| 708 |
+
if image_batch_size == 1:
|
| 709 |
+
repeat_by = batch_size
|
| 710 |
+
else:
|
| 711 |
+
# image batch size is the same as prompt batch size
|
| 712 |
+
repeat_by = num_images_per_prompt
|
| 713 |
+
|
| 714 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 715 |
+
|
| 716 |
+
image = image.to(device=device, dtype=dtype)
|
| 717 |
+
|
| 718 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 719 |
+
image = torch.cat([image] * 2)
|
| 720 |
+
|
| 721 |
+
return image
|
| 722 |
+
|
| 723 |
+
@property
|
| 724 |
+
def guidance_scale(self):
|
| 725 |
+
return self._guidance_scale
|
| 726 |
+
|
| 727 |
+
@property
|
| 728 |
+
def clip_skip(self):
|
| 729 |
+
return self._clip_skip
|
| 730 |
+
|
| 731 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 732 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 733 |
+
# corresponds to doing no classifier free guidance.
|
| 734 |
+
@property
|
| 735 |
+
def do_classifier_free_guidance(self):
|
| 736 |
+
return self._guidance_scale > 1
|
| 737 |
+
|
| 738 |
+
@property
|
| 739 |
+
def cross_attention_kwargs(self):
|
| 740 |
+
return self._cross_attention_kwargs
|
| 741 |
+
|
| 742 |
+
@property
|
| 743 |
+
def num_timesteps(self):
|
| 744 |
+
return self._num_timesteps
|
| 745 |
+
|
| 746 |
+
@torch.no_grad()
|
| 747 |
+
def __call__(
|
| 748 |
+
self,
|
| 749 |
+
prompt: Union[str, List[str]] = None,
|
| 750 |
+
num_frames: Optional[int] = 16,
|
| 751 |
+
height: Optional[int] = None,
|
| 752 |
+
width: Optional[int] = None,
|
| 753 |
+
num_inference_steps: int = 50,
|
| 754 |
+
guidance_scale: float = 7.5,
|
| 755 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 756 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 757 |
+
eta: float = 0.0,
|
| 758 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 759 |
+
latents: Optional[torch.Tensor] = None,
|
| 760 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 761 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 762 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 763 |
+
ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
|
| 764 |
+
conditioning_frames: Optional[List[PipelineImageInput]] = None,
|
| 765 |
+
output_type: Optional[str] = "pil",
|
| 766 |
+
return_dict: bool = True,
|
| 767 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 768 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
| 769 |
+
guess_mode: bool = False,
|
| 770 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 771 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 772 |
+
clip_skip: Optional[int] = None,
|
| 773 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 774 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 775 |
+
**kwargs,
|
| 776 |
+
):
|
| 777 |
+
r"""
|
| 778 |
+
The call function to the pipeline for generation.
|
| 779 |
+
|
| 780 |
+
Args:
|
| 781 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 782 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 783 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 784 |
+
The height in pixels of the generated video.
|
| 785 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 786 |
+
The width in pixels of the generated video.
|
| 787 |
+
num_frames (`int`, *optional*, defaults to 16):
|
| 788 |
+
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
|
| 789 |
+
amounts to 2 seconds of video.
|
| 790 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 791 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
| 792 |
+
expense of slower inference.
|
| 793 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 794 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 795 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 796 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 797 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 798 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 799 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 800 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 801 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 802 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 803 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 804 |
+
generation deterministic.
|
| 805 |
+
latents (`torch.Tensor`, *optional*):
|
| 806 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
| 807 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 808 |
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
| 809 |
+
`(batch_size, num_channel, num_frames, height, width)`.
|
| 810 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 811 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 812 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 813 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 814 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 815 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 816 |
+
ip_adapter_image (`PipelineImageInput`, *optional*):
|
| 817 |
+
Optional image input to work with IP Adapters.
|
| 818 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 819 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
|
| 820 |
+
Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
|
| 821 |
+
if `do_classifier_free_guidance` is set to `True`.
|
| 822 |
+
If not provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 823 |
+
conditioning_frames (`List[PipelineImageInput]`, *optional*):
|
| 824 |
+
The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
|
| 825 |
+
are specified, images must be passed as a list such that each element of the list can be correctly
|
| 826 |
+
batched for input to a single ControlNet.
|
| 827 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 828 |
+
The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or
|
| 829 |
+
`np.array`.
|
| 830 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 831 |
+
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
|
| 832 |
+
of a plain tuple.
|
| 833 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 834 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 835 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 836 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 837 |
+
The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 838 |
+
to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
|
| 839 |
+
the corresponding scale as a list.
|
| 840 |
+
guess_mode (`bool`, *optional*, defaults to `False`):
|
| 841 |
+
The ControlNet encoder tries to recognize the content of the input image even if you remove all
|
| 842 |
+
prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
|
| 843 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 844 |
+
The percentage of total steps at which the ControlNet starts applying.
|
| 845 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 846 |
+
The percentage of total steps at which the ControlNet stops applying.
|
| 847 |
+
clip_skip (`int`, *optional*):
|
| 848 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 849 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 850 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 851 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 852 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 853 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 854 |
+
`callback_on_step_end_tensor_inputs`.
|
| 855 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 856 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 857 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 858 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 859 |
+
|
| 860 |
+
Examples:
|
| 861 |
+
|
| 862 |
+
Returns:
|
| 863 |
+
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
| 864 |
+
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
| 865 |
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
| 866 |
+
"""
|
| 867 |
+
|
| 868 |
+
callback = kwargs.pop("callback", None)
|
| 869 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 870 |
+
|
| 871 |
+
if callback is not None:
|
| 872 |
+
deprecate(
|
| 873 |
+
"callback",
|
| 874 |
+
"1.0.0",
|
| 875 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 876 |
+
)
|
| 877 |
+
if callback_steps is not None:
|
| 878 |
+
deprecate(
|
| 879 |
+
"callback_steps",
|
| 880 |
+
"1.0.0",
|
| 881 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 885 |
+
|
| 886 |
+
# align format for control guidance
|
| 887 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 888 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 889 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 890 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 891 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 892 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 893 |
+
control_guidance_start, control_guidance_end = (
|
| 894 |
+
mult * [control_guidance_start],
|
| 895 |
+
mult * [control_guidance_end],
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
# 0. Default height and width to unet
|
| 899 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 900 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 901 |
+
|
| 902 |
+
num_videos_per_prompt = 1
|
| 903 |
+
|
| 904 |
+
# 1. Check inputs. Raise error if not correct
|
| 905 |
+
self.check_inputs(
|
| 906 |
+
prompt=prompt,
|
| 907 |
+
height=height,
|
| 908 |
+
width=width,
|
| 909 |
+
num_frames=num_frames,
|
| 910 |
+
callback_steps=callback_steps,
|
| 911 |
+
negative_prompt=negative_prompt,
|
| 912 |
+
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
| 913 |
+
prompt_embeds=prompt_embeds,
|
| 914 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 915 |
+
image=conditioning_frames,
|
| 916 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 917 |
+
control_guidance_start=control_guidance_start,
|
| 918 |
+
control_guidance_end=control_guidance_end,
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
self._guidance_scale = guidance_scale
|
| 922 |
+
self._clip_skip = clip_skip
|
| 923 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 924 |
+
|
| 925 |
+
# 2. Define call parameters
|
| 926 |
+
if prompt is not None and isinstance(prompt, str):
|
| 927 |
+
batch_size = 1
|
| 928 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 929 |
+
batch_size = len(prompt)
|
| 930 |
+
else:
|
| 931 |
+
batch_size = prompt_embeds.shape[0]
|
| 932 |
+
|
| 933 |
+
device = self._execution_device
|
| 934 |
+
|
| 935 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 936 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 937 |
+
|
| 938 |
+
global_pool_conditions = (
|
| 939 |
+
controlnet.config.global_pool_conditions
|
| 940 |
+
if isinstance(controlnet, ControlNetModel)
|
| 941 |
+
else controlnet.nets[0].config.global_pool_conditions
|
| 942 |
+
)
|
| 943 |
+
guess_mode = guess_mode or global_pool_conditions
|
| 944 |
+
|
| 945 |
+
# 3. Encode input prompt
|
| 946 |
+
text_encoder_lora_scale = (
|
| 947 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 948 |
+
)
|
| 949 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 950 |
+
prompt,
|
| 951 |
+
device,
|
| 952 |
+
num_videos_per_prompt,
|
| 953 |
+
self.do_classifier_free_guidance,
|
| 954 |
+
negative_prompt,
|
| 955 |
+
prompt_embeds=prompt_embeds,
|
| 956 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 957 |
+
lora_scale=text_encoder_lora_scale,
|
| 958 |
+
clip_skip=self.clip_skip,
|
| 959 |
+
)
|
| 960 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 961 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 962 |
+
# to avoid doing two forward passes
|
| 963 |
+
if self.do_classifier_free_guidance:
|
| 964 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 965 |
+
|
| 966 |
+
if ip_adapter_image is not None:
|
| 967 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 968 |
+
ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
|
| 969 |
+
)
|
| 970 |
+
|
| 971 |
+
if isinstance(controlnet, ControlNetModel):
|
| 972 |
+
conditioning_frames = self.prepare_image(
|
| 973 |
+
image=conditioning_frames,
|
| 974 |
+
width=width,
|
| 975 |
+
height=height,
|
| 976 |
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
| 977 |
+
num_images_per_prompt=num_videos_per_prompt,
|
| 978 |
+
device=device,
|
| 979 |
+
dtype=controlnet.dtype,
|
| 980 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 981 |
+
guess_mode=guess_mode,
|
| 982 |
+
)
|
| 983 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 984 |
+
cond_prepared_frames = []
|
| 985 |
+
for frame_ in conditioning_frames:
|
| 986 |
+
prepared_frame = self.prepare_image(
|
| 987 |
+
image=frame_,
|
| 988 |
+
width=width,
|
| 989 |
+
height=height,
|
| 990 |
+
batch_size=batch_size * num_videos_per_prompt * num_frames,
|
| 991 |
+
num_images_per_prompt=num_videos_per_prompt,
|
| 992 |
+
device=device,
|
| 993 |
+
dtype=controlnet.dtype,
|
| 994 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 995 |
+
guess_mode=guess_mode,
|
| 996 |
+
)
|
| 997 |
+
cond_prepared_frames.append(prepared_frame)
|
| 998 |
+
conditioning_frames = cond_prepared_frames
|
| 999 |
+
else:
|
| 1000 |
+
assert False
|
| 1001 |
+
|
| 1002 |
+
# 4. Prepare timesteps
|
| 1003 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1004 |
+
timesteps = self.scheduler.timesteps
|
| 1005 |
+
self._num_timesteps = len(timesteps)
|
| 1006 |
+
|
| 1007 |
+
# 5. Prepare latent variables
|
| 1008 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1009 |
+
latents = self.prepare_latents(
|
| 1010 |
+
batch_size * num_videos_per_prompt,
|
| 1011 |
+
num_channels_latents,
|
| 1012 |
+
num_frames,
|
| 1013 |
+
height,
|
| 1014 |
+
width,
|
| 1015 |
+
prompt_embeds.dtype,
|
| 1016 |
+
device,
|
| 1017 |
+
generator,
|
| 1018 |
+
latents,
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1022 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1023 |
+
|
| 1024 |
+
# 7. Add image embeds for IP-Adapter
|
| 1025 |
+
added_cond_kwargs = (
|
| 1026 |
+
{"image_embeds": image_embeds}
|
| 1027 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
| 1028 |
+
else None
|
| 1029 |
+
)
|
| 1030 |
+
|
| 1031 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 1032 |
+
controlnet_keep = []
|
| 1033 |
+
for i in range(len(timesteps)):
|
| 1034 |
+
keeps = [
|
| 1035 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1036 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1037 |
+
]
|
| 1038 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1039 |
+
|
| 1040 |
+
# 8. Denoising loop
|
| 1041 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1042 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1043 |
+
for i, t in enumerate(timesteps):
|
| 1044 |
+
# expand the latents if we are doing classifier free guidance
|
| 1045 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1046 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1047 |
+
|
| 1048 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1049 |
+
# Infer ControlNet only for the conditional batch.
|
| 1050 |
+
control_model_input = latents
|
| 1051 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1052 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1053 |
+
else:
|
| 1054 |
+
control_model_input = latent_model_input
|
| 1055 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1056 |
+
controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
|
| 1057 |
+
|
| 1058 |
+
if isinstance(controlnet_keep[i], list):
|
| 1059 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1060 |
+
else:
|
| 1061 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1062 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1063 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1064 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1065 |
+
|
| 1066 |
+
control_model_input = torch.transpose(control_model_input, 1, 2)
|
| 1067 |
+
control_model_input = control_model_input.reshape(
|
| 1068 |
+
(-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
|
| 1069 |
+
)
|
| 1070 |
+
|
| 1071 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1072 |
+
control_model_input,
|
| 1073 |
+
t,
|
| 1074 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1075 |
+
controlnet_cond=conditioning_frames,
|
| 1076 |
+
conditioning_scale=cond_scale,
|
| 1077 |
+
guess_mode=guess_mode,
|
| 1078 |
+
return_dict=False,
|
| 1079 |
+
)
|
| 1080 |
+
|
| 1081 |
+
# predict the noise residual
|
| 1082 |
+
noise_pred = self.unet(
|
| 1083 |
+
latent_model_input,
|
| 1084 |
+
t,
|
| 1085 |
+
encoder_hidden_states=prompt_embeds,
|
| 1086 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1087 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1088 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1089 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1090 |
+
).sample
|
| 1091 |
+
|
| 1092 |
+
# perform guidance
|
| 1093 |
+
if self.do_classifier_free_guidance:
|
| 1094 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1095 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1096 |
+
|
| 1097 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1098 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 1099 |
+
|
| 1100 |
+
if callback_on_step_end is not None:
|
| 1101 |
+
callback_kwargs = {}
|
| 1102 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1103 |
+
callback_kwargs[k] = locals()[k]
|
| 1104 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1105 |
+
|
| 1106 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1107 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1108 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1109 |
+
|
| 1110 |
+
# call the callback, if provided
|
| 1111 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1112 |
+
progress_bar.update()
|
| 1113 |
+
if callback is not None and i % callback_steps == 0:
|
| 1114 |
+
callback(i, t, latents)
|
| 1115 |
+
|
| 1116 |
+
# 9. Post processing
|
| 1117 |
+
if output_type == "latent":
|
| 1118 |
+
video = latents
|
| 1119 |
+
else:
|
| 1120 |
+
video_tensor = self.decode_latents(latents)
|
| 1121 |
+
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
| 1122 |
+
|
| 1123 |
+
# 10. Offload all models
|
| 1124 |
+
self.maybe_free_model_hooks()
|
| 1125 |
+
|
| 1126 |
+
if not return_dict:
|
| 1127 |
+
return (video,)
|
| 1128 |
+
|
| 1129 |
+
return AnimateDiffPipelineOutput(frames=video)
|
v0.36.0/pipeline_animatediff_img2video.py
ADDED
|
@@ -0,0 +1,984 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
#
|
| 15 |
+
# Note:
|
| 16 |
+
# This pipeline relies on a "hack" discovered by the community that allows
|
| 17 |
+
# the generation of videos given an input image with AnimateDiff. It works
|
| 18 |
+
# by creating a copy of the image `num_frames` times and progressively adding
|
| 19 |
+
# more noise to the image based on the strength and latent interpolation method.
|
| 20 |
+
|
| 21 |
+
import inspect
|
| 22 |
+
from types import FunctionType
|
| 23 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 24 |
+
|
| 25 |
+
import numpy as np
|
| 26 |
+
import torch
|
| 27 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 28 |
+
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import IPAdapterMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 31 |
+
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
|
| 32 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 33 |
+
from diffusers.models.unet_motion_model import MotionAdapter
|
| 34 |
+
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
|
| 35 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 36 |
+
from diffusers.schedulers import (
|
| 37 |
+
DDIMScheduler,
|
| 38 |
+
DPMSolverMultistepScheduler,
|
| 39 |
+
EulerAncestralDiscreteScheduler,
|
| 40 |
+
EulerDiscreteScheduler,
|
| 41 |
+
LMSDiscreteScheduler,
|
| 42 |
+
PNDMScheduler,
|
| 43 |
+
)
|
| 44 |
+
from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
|
| 45 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 49 |
+
|
| 50 |
+
EXAMPLE_DOC_STRING = """
|
| 51 |
+
Examples:
|
| 52 |
+
```py
|
| 53 |
+
>>> import torch
|
| 54 |
+
>>> from diffusers import MotionAdapter, DiffusionPipeline, DDIMScheduler
|
| 55 |
+
>>> from diffusers.utils import export_to_gif, load_image
|
| 56 |
+
|
| 57 |
+
>>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
|
| 58 |
+
>>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
|
| 59 |
+
>>> pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter, custom_pipeline="pipeline_animatediff_img2video").to("cuda")
|
| 60 |
+
>>> pipe.scheduler = pipe.scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1)
|
| 61 |
+
|
| 62 |
+
>>> image = load_image("snail.png")
|
| 63 |
+
>>> output = pipe(image=image, prompt="A snail moving on the ground", strength=0.8, latent_interpolation_method="slerp")
|
| 64 |
+
>>> frames = output.frames[0]
|
| 65 |
+
>>> export_to_gif(frames, "animation.gif")
|
| 66 |
+
```
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def lerp(
|
| 71 |
+
v0: torch.Tensor,
|
| 72 |
+
v1: torch.Tensor,
|
| 73 |
+
t: Union[float, torch.Tensor],
|
| 74 |
+
) -> torch.Tensor:
|
| 75 |
+
r"""
|
| 76 |
+
Linear Interpolation between two tensors.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
v0 (`torch.Tensor`): First tensor.
|
| 80 |
+
v1 (`torch.Tensor`): Second tensor.
|
| 81 |
+
t: (`float` or `torch.Tensor`): Interpolation factor.
|
| 82 |
+
"""
|
| 83 |
+
t_is_float = False
|
| 84 |
+
input_device = v0.device
|
| 85 |
+
v0 = v0.cpu().numpy()
|
| 86 |
+
v1 = v1.cpu().numpy()
|
| 87 |
+
|
| 88 |
+
if isinstance(t, torch.Tensor):
|
| 89 |
+
t = t.cpu().numpy()
|
| 90 |
+
else:
|
| 91 |
+
t_is_float = True
|
| 92 |
+
t = np.array([t], dtype=v0.dtype)
|
| 93 |
+
|
| 94 |
+
t = t[..., None]
|
| 95 |
+
v0 = v0[None, ...]
|
| 96 |
+
v1 = v1[None, ...]
|
| 97 |
+
v2 = (1 - t) * v0 + t * v1
|
| 98 |
+
|
| 99 |
+
if t_is_float and v0.ndim > 1:
|
| 100 |
+
assert v2.shape[0] == 1
|
| 101 |
+
v2 = np.squeeze(v2, axis=0)
|
| 102 |
+
|
| 103 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 104 |
+
return v2
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def slerp(
|
| 108 |
+
v0: torch.Tensor,
|
| 109 |
+
v1: torch.Tensor,
|
| 110 |
+
t: Union[float, torch.Tensor],
|
| 111 |
+
DOT_THRESHOLD: float = 0.9995,
|
| 112 |
+
) -> torch.Tensor:
|
| 113 |
+
r"""
|
| 114 |
+
Spherical Linear Interpolation between two tensors.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
v0 (`torch.Tensor`): First tensor.
|
| 118 |
+
v1 (`torch.Tensor`): Second tensor.
|
| 119 |
+
t: (`float` or `torch.Tensor`): Interpolation factor.
|
| 120 |
+
DOT_THRESHOLD (`float`):
|
| 121 |
+
Dot product threshold exceeding which linear interpolation will be used
|
| 122 |
+
because input tensors are close to parallel.
|
| 123 |
+
"""
|
| 124 |
+
t_is_float = False
|
| 125 |
+
input_device = v0.device
|
| 126 |
+
v0 = v0.cpu().numpy()
|
| 127 |
+
v1 = v1.cpu().numpy()
|
| 128 |
+
|
| 129 |
+
if isinstance(t, torch.Tensor):
|
| 130 |
+
t = t.cpu().numpy()
|
| 131 |
+
else:
|
| 132 |
+
t_is_float = True
|
| 133 |
+
t = np.array([t], dtype=v0.dtype)
|
| 134 |
+
|
| 135 |
+
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
| 136 |
+
|
| 137 |
+
if np.abs(dot) > DOT_THRESHOLD:
|
| 138 |
+
# v0 and v1 are close to parallel, so use linear interpolation instead
|
| 139 |
+
v2 = lerp(v0, v1, t)
|
| 140 |
+
else:
|
| 141 |
+
theta_0 = np.arccos(dot)
|
| 142 |
+
sin_theta_0 = np.sin(theta_0)
|
| 143 |
+
theta_t = theta_0 * t
|
| 144 |
+
sin_theta_t = np.sin(theta_t)
|
| 145 |
+
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
| 146 |
+
s1 = sin_theta_t / sin_theta_0
|
| 147 |
+
s0 = s0[..., None]
|
| 148 |
+
s1 = s1[..., None]
|
| 149 |
+
v0 = v0[None, ...]
|
| 150 |
+
v1 = v1[None, ...]
|
| 151 |
+
v2 = s0 * v0 + s1 * v1
|
| 152 |
+
|
| 153 |
+
if t_is_float and v0.ndim > 1:
|
| 154 |
+
assert v2.shape[0] == 1
|
| 155 |
+
v2 = np.squeeze(v2, axis=0)
|
| 156 |
+
|
| 157 |
+
v2 = torch.from_numpy(v2).to(input_device)
|
| 158 |
+
return v2
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
# Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
|
| 162 |
+
def tensor2vid(video: torch.Tensor, processor, output_type="np"):
|
| 163 |
+
batch_size, channels, num_frames, height, width = video.shape
|
| 164 |
+
outputs = []
|
| 165 |
+
for batch_idx in range(batch_size):
|
| 166 |
+
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
|
| 167 |
+
batch_output = processor.postprocess(batch_vid, output_type)
|
| 168 |
+
|
| 169 |
+
outputs.append(batch_output)
|
| 170 |
+
|
| 171 |
+
if output_type == "np":
|
| 172 |
+
outputs = np.stack(outputs)
|
| 173 |
+
|
| 174 |
+
elif output_type == "pt":
|
| 175 |
+
outputs = torch.stack(outputs)
|
| 176 |
+
|
| 177 |
+
elif not output_type == "pil":
|
| 178 |
+
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
|
| 179 |
+
|
| 180 |
+
return outputs
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 184 |
+
def retrieve_latents(
|
| 185 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 186 |
+
):
|
| 187 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 188 |
+
return encoder_output.latent_dist.sample(generator)
|
| 189 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 190 |
+
return encoder_output.latent_dist.mode()
|
| 191 |
+
elif hasattr(encoder_output, "latents"):
|
| 192 |
+
return encoder_output.latents
|
| 193 |
+
else:
|
| 194 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 198 |
+
def retrieve_timesteps(
|
| 199 |
+
scheduler,
|
| 200 |
+
num_inference_steps: Optional[int] = None,
|
| 201 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 202 |
+
timesteps: Optional[List[int]] = None,
|
| 203 |
+
**kwargs,
|
| 204 |
+
):
|
| 205 |
+
"""
|
| 206 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 207 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 208 |
+
|
| 209 |
+
Args:
|
| 210 |
+
scheduler (`SchedulerMixin`):
|
| 211 |
+
The scheduler to get timesteps from.
|
| 212 |
+
num_inference_steps (`int`):
|
| 213 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used,
|
| 214 |
+
`timesteps` must be `None`.
|
| 215 |
+
device (`str` or `torch.device`, *optional*):
|
| 216 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 217 |
+
timesteps (`List[int]`, *optional*):
|
| 218 |
+
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
|
| 219 |
+
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
|
| 220 |
+
must be `None`.
|
| 221 |
+
|
| 222 |
+
Returns:
|
| 223 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 224 |
+
second element is the number of inference steps.
|
| 225 |
+
"""
|
| 226 |
+
if timesteps is not None:
|
| 227 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 228 |
+
if not accepts_timesteps:
|
| 229 |
+
raise ValueError(
|
| 230 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 231 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 232 |
+
)
|
| 233 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 234 |
+
timesteps = scheduler.timesteps
|
| 235 |
+
num_inference_steps = len(timesteps)
|
| 236 |
+
else:
|
| 237 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 238 |
+
timesteps = scheduler.timesteps
|
| 239 |
+
return timesteps, num_inference_steps
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class AnimateDiffImgToVideoPipeline(
|
| 243 |
+
DiffusionPipeline,
|
| 244 |
+
StableDiffusionMixin,
|
| 245 |
+
TextualInversionLoaderMixin,
|
| 246 |
+
IPAdapterMixin,
|
| 247 |
+
StableDiffusionLoraLoaderMixin,
|
| 248 |
+
):
|
| 249 |
+
r"""
|
| 250 |
+
Pipeline for image-to-video generation.
|
| 251 |
+
|
| 252 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 253 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 254 |
+
|
| 255 |
+
The pipeline also inherits the following loading methods:
|
| 256 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 257 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 258 |
+
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 259 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
vae ([`AutoencoderKL`]):
|
| 263 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 264 |
+
text_encoder ([`CLIPTextModel`]):
|
| 265 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 266 |
+
tokenizer (`CLIPTokenizer`):
|
| 267 |
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
| 268 |
+
unet ([`UNet2DConditionModel`]):
|
| 269 |
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
| 270 |
+
motion_adapter ([`MotionAdapter`]):
|
| 271 |
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
| 272 |
+
scheduler ([`SchedulerMixin`]):
|
| 273 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 274 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 278 |
+
_optional_components = ["feature_extractor", "image_encoder"]
|
| 279 |
+
|
| 280 |
+
def __init__(
|
| 281 |
+
self,
|
| 282 |
+
vae: AutoencoderKL,
|
| 283 |
+
text_encoder: CLIPTextModel,
|
| 284 |
+
tokenizer: CLIPTokenizer,
|
| 285 |
+
unet: UNet2DConditionModel,
|
| 286 |
+
motion_adapter: MotionAdapter,
|
| 287 |
+
scheduler: Union[
|
| 288 |
+
DDIMScheduler,
|
| 289 |
+
PNDMScheduler,
|
| 290 |
+
LMSDiscreteScheduler,
|
| 291 |
+
EulerDiscreteScheduler,
|
| 292 |
+
EulerAncestralDiscreteScheduler,
|
| 293 |
+
DPMSolverMultistepScheduler,
|
| 294 |
+
],
|
| 295 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 296 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 297 |
+
):
|
| 298 |
+
super().__init__()
|
| 299 |
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
| 300 |
+
|
| 301 |
+
self.register_modules(
|
| 302 |
+
vae=vae,
|
| 303 |
+
text_encoder=text_encoder,
|
| 304 |
+
tokenizer=tokenizer,
|
| 305 |
+
unet=unet,
|
| 306 |
+
motion_adapter=motion_adapter,
|
| 307 |
+
scheduler=scheduler,
|
| 308 |
+
feature_extractor=feature_extractor,
|
| 309 |
+
image_encoder=image_encoder,
|
| 310 |
+
)
|
| 311 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 312 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 313 |
+
|
| 314 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
| 315 |
+
def encode_prompt(
|
| 316 |
+
self,
|
| 317 |
+
prompt,
|
| 318 |
+
device,
|
| 319 |
+
num_images_per_prompt,
|
| 320 |
+
do_classifier_free_guidance,
|
| 321 |
+
negative_prompt=None,
|
| 322 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 323 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 324 |
+
lora_scale: Optional[float] = None,
|
| 325 |
+
clip_skip: Optional[int] = None,
|
| 326 |
+
):
|
| 327 |
+
r"""
|
| 328 |
+
Encodes the prompt into text encoder hidden states.
|
| 329 |
+
|
| 330 |
+
Args:
|
| 331 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 332 |
+
prompt to be encoded
|
| 333 |
+
device: (`torch.device`):
|
| 334 |
+
torch device
|
| 335 |
+
num_images_per_prompt (`int`):
|
| 336 |
+
number of images that should be generated per prompt
|
| 337 |
+
do_classifier_free_guidance (`bool`):
|
| 338 |
+
whether to use classifier free guidance or not
|
| 339 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 340 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 341 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 342 |
+
less than `1`).
|
| 343 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 344 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 345 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 346 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 347 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 348 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 349 |
+
argument.
|
| 350 |
+
lora_scale (`float`, *optional*):
|
| 351 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 352 |
+
clip_skip (`int`, *optional*):
|
| 353 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 354 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 355 |
+
"""
|
| 356 |
+
# set lora scale so that monkey patched LoRA
|
| 357 |
+
# function of text encoder can correctly access it
|
| 358 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 359 |
+
self._lora_scale = lora_scale
|
| 360 |
+
|
| 361 |
+
# dynamically adjust the LoRA scale
|
| 362 |
+
if not USE_PEFT_BACKEND:
|
| 363 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 364 |
+
else:
|
| 365 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 366 |
+
|
| 367 |
+
if prompt is not None and isinstance(prompt, str):
|
| 368 |
+
batch_size = 1
|
| 369 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 370 |
+
batch_size = len(prompt)
|
| 371 |
+
else:
|
| 372 |
+
batch_size = prompt_embeds.shape[0]
|
| 373 |
+
|
| 374 |
+
if prompt_embeds is None:
|
| 375 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 376 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 377 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 378 |
+
|
| 379 |
+
text_inputs = self.tokenizer(
|
| 380 |
+
prompt,
|
| 381 |
+
padding="max_length",
|
| 382 |
+
max_length=self.tokenizer.model_max_length,
|
| 383 |
+
truncation=True,
|
| 384 |
+
return_tensors="pt",
|
| 385 |
+
)
|
| 386 |
+
text_input_ids = text_inputs.input_ids
|
| 387 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 388 |
+
|
| 389 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 390 |
+
text_input_ids, untruncated_ids
|
| 391 |
+
):
|
| 392 |
+
removed_text = self.tokenizer.batch_decode(
|
| 393 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 394 |
+
)
|
| 395 |
+
logger.warning(
|
| 396 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 397 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 401 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 402 |
+
else:
|
| 403 |
+
attention_mask = None
|
| 404 |
+
|
| 405 |
+
if clip_skip is None:
|
| 406 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 407 |
+
prompt_embeds = prompt_embeds[0]
|
| 408 |
+
else:
|
| 409 |
+
prompt_embeds = self.text_encoder(
|
| 410 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 411 |
+
)
|
| 412 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 413 |
+
# all the hidden states from the encoder layers. Then index into
|
| 414 |
+
# the tuple to access the hidden states from the desired layer.
|
| 415 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 416 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 417 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 418 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 419 |
+
# layer.
|
| 420 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 421 |
+
|
| 422 |
+
if self.text_encoder is not None:
|
| 423 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 424 |
+
elif self.unet is not None:
|
| 425 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 426 |
+
else:
|
| 427 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 428 |
+
|
| 429 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 430 |
+
|
| 431 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 432 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 433 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 434 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 435 |
+
|
| 436 |
+
# get unconditional embeddings for classifier free guidance
|
| 437 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 438 |
+
uncond_tokens: List[str]
|
| 439 |
+
if negative_prompt is None:
|
| 440 |
+
uncond_tokens = [""] * batch_size
|
| 441 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 442 |
+
raise TypeError(
|
| 443 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 444 |
+
f" {type(prompt)}."
|
| 445 |
+
)
|
| 446 |
+
elif isinstance(negative_prompt, str):
|
| 447 |
+
uncond_tokens = [negative_prompt]
|
| 448 |
+
elif batch_size != len(negative_prompt):
|
| 449 |
+
raise ValueError(
|
| 450 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 451 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 452 |
+
" the batch size of `prompt`."
|
| 453 |
+
)
|
| 454 |
+
else:
|
| 455 |
+
uncond_tokens = negative_prompt
|
| 456 |
+
|
| 457 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 458 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 459 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 460 |
+
|
| 461 |
+
max_length = prompt_embeds.shape[1]
|
| 462 |
+
uncond_input = self.tokenizer(
|
| 463 |
+
uncond_tokens,
|
| 464 |
+
padding="max_length",
|
| 465 |
+
max_length=max_length,
|
| 466 |
+
truncation=True,
|
| 467 |
+
return_tensors="pt",
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 471 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 472 |
+
else:
|
| 473 |
+
attention_mask = None
|
| 474 |
+
|
| 475 |
+
negative_prompt_embeds = self.text_encoder(
|
| 476 |
+
uncond_input.input_ids.to(device),
|
| 477 |
+
attention_mask=attention_mask,
|
| 478 |
+
)
|
| 479 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 480 |
+
|
| 481 |
+
if do_classifier_free_guidance:
|
| 482 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 483 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 484 |
+
|
| 485 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 486 |
+
|
| 487 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 488 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 489 |
+
|
| 490 |
+
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 491 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 492 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 493 |
+
|
| 494 |
+
return prompt_embeds, negative_prompt_embeds
|
| 495 |
+
|
| 496 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 497 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 498 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 499 |
+
|
| 500 |
+
if not isinstance(image, torch.Tensor):
|
| 501 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 502 |
+
|
| 503 |
+
image = image.to(device=device, dtype=dtype)
|
| 504 |
+
if output_hidden_states:
|
| 505 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 506 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 507 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 508 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 509 |
+
).hidden_states[-2]
|
| 510 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 511 |
+
num_images_per_prompt, dim=0
|
| 512 |
+
)
|
| 513 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 514 |
+
else:
|
| 515 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 516 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 517 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 518 |
+
|
| 519 |
+
return image_embeds, uncond_image_embeds
|
| 520 |
+
|
| 521 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 522 |
+
def prepare_ip_adapter_image_embeds(
|
| 523 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt
|
| 524 |
+
):
|
| 525 |
+
if ip_adapter_image_embeds is None:
|
| 526 |
+
if not isinstance(ip_adapter_image, list):
|
| 527 |
+
ip_adapter_image = [ip_adapter_image]
|
| 528 |
+
|
| 529 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 530 |
+
raise ValueError(
|
| 531 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
image_embeds = []
|
| 535 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 536 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 537 |
+
):
|
| 538 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 539 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 540 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 541 |
+
)
|
| 542 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 543 |
+
single_negative_image_embeds = torch.stack(
|
| 544 |
+
[single_negative_image_embeds] * num_images_per_prompt, dim=0
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
if self.do_classifier_free_guidance:
|
| 548 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 549 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 550 |
+
|
| 551 |
+
image_embeds.append(single_image_embeds)
|
| 552 |
+
else:
|
| 553 |
+
image_embeds = ip_adapter_image_embeds
|
| 554 |
+
return image_embeds
|
| 555 |
+
|
| 556 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
|
| 557 |
+
def decode_latents(self, latents):
|
| 558 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 559 |
+
|
| 560 |
+
batch_size, channels, num_frames, height, width = latents.shape
|
| 561 |
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 562 |
+
|
| 563 |
+
image = self.vae.decode(latents).sample
|
| 564 |
+
video = (
|
| 565 |
+
image[None, :]
|
| 566 |
+
.reshape(
|
| 567 |
+
(
|
| 568 |
+
batch_size,
|
| 569 |
+
num_frames,
|
| 570 |
+
-1,
|
| 571 |
+
)
|
| 572 |
+
+ image.shape[2:]
|
| 573 |
+
)
|
| 574 |
+
.permute(0, 2, 1, 3, 4)
|
| 575 |
+
)
|
| 576 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 577 |
+
video = video.float()
|
| 578 |
+
return video
|
| 579 |
+
|
| 580 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 581 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 582 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 583 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 584 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 585 |
+
# and should be between [0, 1]
|
| 586 |
+
|
| 587 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 588 |
+
extra_step_kwargs = {}
|
| 589 |
+
if accepts_eta:
|
| 590 |
+
extra_step_kwargs["eta"] = eta
|
| 591 |
+
|
| 592 |
+
# check if the scheduler accepts generator
|
| 593 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 594 |
+
if accepts_generator:
|
| 595 |
+
extra_step_kwargs["generator"] = generator
|
| 596 |
+
return extra_step_kwargs
|
| 597 |
+
|
| 598 |
+
def check_inputs(
|
| 599 |
+
self,
|
| 600 |
+
prompt,
|
| 601 |
+
height,
|
| 602 |
+
width,
|
| 603 |
+
callback_steps,
|
| 604 |
+
negative_prompt=None,
|
| 605 |
+
prompt_embeds=None,
|
| 606 |
+
negative_prompt_embeds=None,
|
| 607 |
+
callback_on_step_end_tensor_inputs=None,
|
| 608 |
+
latent_interpolation_method=None,
|
| 609 |
+
):
|
| 610 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 611 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 612 |
+
|
| 613 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 614 |
+
raise ValueError(
|
| 615 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 616 |
+
f" {type(callback_steps)}."
|
| 617 |
+
)
|
| 618 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 619 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 620 |
+
):
|
| 621 |
+
raise ValueError(
|
| 622 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
if prompt is not None and prompt_embeds is not None:
|
| 626 |
+
raise ValueError(
|
| 627 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 628 |
+
" only forward one of the two."
|
| 629 |
+
)
|
| 630 |
+
elif prompt is None and prompt_embeds is None:
|
| 631 |
+
raise ValueError(
|
| 632 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 633 |
+
)
|
| 634 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 635 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 636 |
+
|
| 637 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 638 |
+
raise ValueError(
|
| 639 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 640 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 644 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 645 |
+
raise ValueError(
|
| 646 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 647 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 648 |
+
f" {negative_prompt_embeds.shape}."
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
if latent_interpolation_method is not None:
|
| 652 |
+
if latent_interpolation_method not in ["lerp", "slerp"] and not isinstance(
|
| 653 |
+
latent_interpolation_method, FunctionType
|
| 654 |
+
):
|
| 655 |
+
raise ValueError(
|
| 656 |
+
"`latent_interpolation_method` must be one of `lerp`, `slerp` or a Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]"
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
def prepare_latents(
|
| 660 |
+
self,
|
| 661 |
+
image,
|
| 662 |
+
strength,
|
| 663 |
+
batch_size,
|
| 664 |
+
num_channels_latents,
|
| 665 |
+
num_frames,
|
| 666 |
+
height,
|
| 667 |
+
width,
|
| 668 |
+
dtype,
|
| 669 |
+
device,
|
| 670 |
+
generator,
|
| 671 |
+
latents=None,
|
| 672 |
+
latent_interpolation_method="slerp",
|
| 673 |
+
):
|
| 674 |
+
shape = (
|
| 675 |
+
batch_size,
|
| 676 |
+
num_channels_latents,
|
| 677 |
+
num_frames,
|
| 678 |
+
height // self.vae_scale_factor,
|
| 679 |
+
width // self.vae_scale_factor,
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
if latents is None:
|
| 683 |
+
image = image.to(device=device, dtype=dtype)
|
| 684 |
+
|
| 685 |
+
if image.shape[1] == 4:
|
| 686 |
+
latents = image
|
| 687 |
+
else:
|
| 688 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 689 |
+
if self.vae.config.force_upcast:
|
| 690 |
+
image = image.float()
|
| 691 |
+
self.vae.to(dtype=torch.float32)
|
| 692 |
+
|
| 693 |
+
if isinstance(generator, list):
|
| 694 |
+
if len(generator) != batch_size:
|
| 695 |
+
raise ValueError(
|
| 696 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 697 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 698 |
+
)
|
| 699 |
+
|
| 700 |
+
init_latents = [
|
| 701 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 702 |
+
for i in range(batch_size)
|
| 703 |
+
]
|
| 704 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 705 |
+
else:
|
| 706 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 707 |
+
|
| 708 |
+
if self.vae.config.force_upcast:
|
| 709 |
+
self.vae.to(dtype)
|
| 710 |
+
|
| 711 |
+
init_latents = init_latents.to(dtype)
|
| 712 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 713 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 714 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 715 |
+
|
| 716 |
+
if latent_interpolation_method == "lerp":
|
| 717 |
+
|
| 718 |
+
def latent_cls(v0, v1, index):
|
| 719 |
+
return lerp(v0, v1, index / num_frames * (1 - strength))
|
| 720 |
+
elif latent_interpolation_method == "slerp":
|
| 721 |
+
|
| 722 |
+
def latent_cls(v0, v1, index):
|
| 723 |
+
return slerp(v0, v1, index / num_frames * (1 - strength))
|
| 724 |
+
else:
|
| 725 |
+
latent_cls = latent_interpolation_method
|
| 726 |
+
|
| 727 |
+
for i in range(num_frames):
|
| 728 |
+
latents[:, :, i, :, :] = latent_cls(latents[:, :, i, :, :], init_latents, i)
|
| 729 |
+
else:
|
| 730 |
+
if shape != latents.shape:
|
| 731 |
+
# [B, C, F, H, W]
|
| 732 |
+
raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
|
| 733 |
+
latents = latents.to(device, dtype=dtype)
|
| 734 |
+
|
| 735 |
+
return latents
|
| 736 |
+
|
| 737 |
+
@torch.no_grad()
|
| 738 |
+
def __call__(
|
| 739 |
+
self,
|
| 740 |
+
image: PipelineImageInput,
|
| 741 |
+
prompt: Optional[Union[str, List[str]]] = None,
|
| 742 |
+
height: Optional[int] = None,
|
| 743 |
+
width: Optional[int] = None,
|
| 744 |
+
num_frames: int = 16,
|
| 745 |
+
num_inference_steps: int = 50,
|
| 746 |
+
timesteps: Optional[List[int]] = None,
|
| 747 |
+
guidance_scale: float = 7.5,
|
| 748 |
+
strength: float = 0.8,
|
| 749 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 750 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 751 |
+
eta: float = 0.0,
|
| 752 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 753 |
+
latents: Optional[torch.Tensor] = None,
|
| 754 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 755 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 756 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 757 |
+
ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
|
| 758 |
+
output_type: Optional[str] = "pil",
|
| 759 |
+
return_dict: bool = True,
|
| 760 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 761 |
+
callback_steps: Optional[int] = 1,
|
| 762 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 763 |
+
clip_skip: Optional[int] = None,
|
| 764 |
+
latent_interpolation_method: Union[str, Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]] = "slerp",
|
| 765 |
+
):
|
| 766 |
+
r"""
|
| 767 |
+
The call function to the pipeline for generation.
|
| 768 |
+
|
| 769 |
+
Args:
|
| 770 |
+
image (`PipelineImageInput`):
|
| 771 |
+
The input image to condition the generation on.
|
| 772 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 773 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 774 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 775 |
+
The height in pixels of the generated video.
|
| 776 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 777 |
+
The width in pixels of the generated video.
|
| 778 |
+
num_frames (`int`, *optional*, defaults to 16):
|
| 779 |
+
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
|
| 780 |
+
amounts to 2 seconds of video.
|
| 781 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 782 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
| 783 |
+
expense of slower inference.
|
| 784 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 785 |
+
Higher strength leads to more differences between original image and generated video.
|
| 786 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 787 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 788 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 789 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 790 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 791 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 792 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 793 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 794 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 795 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 796 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 797 |
+
generation deterministic.
|
| 798 |
+
latents (`torch.Tensor`, *optional*):
|
| 799 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
| 800 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 801 |
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
| 802 |
+
`(batch_size, num_channel, num_frames, height, width)`.
|
| 803 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 804 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 805 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 806 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 807 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 808 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 809 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
| 810 |
+
Optional image input to work with IP Adapters.
|
| 811 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 812 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
|
| 813 |
+
Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
|
| 814 |
+
if `do_classifier_free_guidance` is set to `True`.
|
| 815 |
+
If not provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 816 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 817 |
+
The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or
|
| 818 |
+
`np.array`.
|
| 819 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 820 |
+
Whether or not to return a [`AnimateDiffImgToVideoPipelineOutput`] instead
|
| 821 |
+
of a plain tuple.
|
| 822 |
+
callback (`Callable`, *optional*):
|
| 823 |
+
A function that calls every `callback_steps` steps during inference. The function is called with the
|
| 824 |
+
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 825 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 826 |
+
The frequency at which the `callback` function is called. If not specified, the callback is called at
|
| 827 |
+
every step.
|
| 828 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 829 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 830 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 831 |
+
clip_skip (`int`, *optional*):
|
| 832 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 833 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 834 |
+
latent_interpolation_method (`str` or `Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]]`, *optional*):
|
| 835 |
+
Must be one of "lerp", "slerp" or a callable that takes in a random noisy latent, image latent and a frame index
|
| 836 |
+
as input and returns an initial latent for sampling.
|
| 837 |
+
Examples:
|
| 838 |
+
|
| 839 |
+
Returns:
|
| 840 |
+
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
| 841 |
+
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
| 842 |
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
| 843 |
+
"""
|
| 844 |
+
# 0. Default height and width to unet
|
| 845 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 846 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 847 |
+
|
| 848 |
+
num_videos_per_prompt = 1
|
| 849 |
+
|
| 850 |
+
# 1. Check inputs. Raise error if not correct
|
| 851 |
+
self.check_inputs(
|
| 852 |
+
prompt=prompt,
|
| 853 |
+
height=height,
|
| 854 |
+
width=width,
|
| 855 |
+
callback_steps=callback_steps,
|
| 856 |
+
negative_prompt=negative_prompt,
|
| 857 |
+
prompt_embeds=prompt_embeds,
|
| 858 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 859 |
+
latent_interpolation_method=latent_interpolation_method,
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
# 2. Define call parameters
|
| 863 |
+
if prompt is not None and isinstance(prompt, str):
|
| 864 |
+
batch_size = 1
|
| 865 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 866 |
+
batch_size = len(prompt)
|
| 867 |
+
else:
|
| 868 |
+
batch_size = prompt_embeds.shape[0]
|
| 869 |
+
|
| 870 |
+
device = self._execution_device
|
| 871 |
+
|
| 872 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 873 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 874 |
+
# corresponds to doing no classifier free guidance.
|
| 875 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 876 |
+
|
| 877 |
+
# 3. Encode input prompt
|
| 878 |
+
text_encoder_lora_scale = (
|
| 879 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 880 |
+
)
|
| 881 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 882 |
+
prompt,
|
| 883 |
+
device,
|
| 884 |
+
num_videos_per_prompt,
|
| 885 |
+
do_classifier_free_guidance,
|
| 886 |
+
negative_prompt,
|
| 887 |
+
prompt_embeds=prompt_embeds,
|
| 888 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 889 |
+
lora_scale=text_encoder_lora_scale,
|
| 890 |
+
clip_skip=clip_skip,
|
| 891 |
+
)
|
| 892 |
+
|
| 893 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 894 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 895 |
+
# to avoid doing two forward passes
|
| 896 |
+
if do_classifier_free_guidance:
|
| 897 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 898 |
+
|
| 899 |
+
if ip_adapter_image is not None:
|
| 900 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 901 |
+
ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
|
| 902 |
+
)
|
| 903 |
+
|
| 904 |
+
# 4. Preprocess image
|
| 905 |
+
image = self.image_processor.preprocess(image, height=height, width=width)
|
| 906 |
+
|
| 907 |
+
# 5. Prepare timesteps
|
| 908 |
+
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
|
| 909 |
+
|
| 910 |
+
# 6. Prepare latent variables
|
| 911 |
+
num_channels_latents = self.unet.config.in_channels
|
| 912 |
+
latents = self.prepare_latents(
|
| 913 |
+
image=image,
|
| 914 |
+
strength=strength,
|
| 915 |
+
batch_size=batch_size * num_videos_per_prompt,
|
| 916 |
+
num_channels_latents=num_channels_latents,
|
| 917 |
+
num_frames=num_frames,
|
| 918 |
+
height=height,
|
| 919 |
+
width=width,
|
| 920 |
+
dtype=prompt_embeds.dtype,
|
| 921 |
+
device=device,
|
| 922 |
+
generator=generator,
|
| 923 |
+
latents=latents,
|
| 924 |
+
latent_interpolation_method=latent_interpolation_method,
|
| 925 |
+
)
|
| 926 |
+
|
| 927 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 928 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 929 |
+
|
| 930 |
+
# 8. Add image embeds for IP-Adapter
|
| 931 |
+
added_cond_kwargs = (
|
| 932 |
+
{"image_embeds": image_embeds}
|
| 933 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
| 934 |
+
else None
|
| 935 |
+
)
|
| 936 |
+
|
| 937 |
+
# 9. Denoising loop
|
| 938 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 939 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 940 |
+
for i, t in enumerate(timesteps):
|
| 941 |
+
# expand the latents if we are doing classifier free guidance
|
| 942 |
+
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
| 943 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 944 |
+
|
| 945 |
+
# predict the noise residual
|
| 946 |
+
noise_pred = self.unet(
|
| 947 |
+
latent_model_input,
|
| 948 |
+
t,
|
| 949 |
+
encoder_hidden_states=prompt_embeds,
|
| 950 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 951 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 952 |
+
).sample
|
| 953 |
+
|
| 954 |
+
# perform guidance
|
| 955 |
+
if do_classifier_free_guidance:
|
| 956 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 957 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 958 |
+
|
| 959 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 960 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 961 |
+
|
| 962 |
+
# call the callback, if provided
|
| 963 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 964 |
+
progress_bar.update()
|
| 965 |
+
if callback is not None and i % callback_steps == 0:
|
| 966 |
+
callback(i, t, latents)
|
| 967 |
+
|
| 968 |
+
if output_type == "latent":
|
| 969 |
+
return AnimateDiffPipelineOutput(frames=latents)
|
| 970 |
+
|
| 971 |
+
# 10. Post-processing
|
| 972 |
+
if output_type == "latent":
|
| 973 |
+
video = latents
|
| 974 |
+
else:
|
| 975 |
+
video_tensor = self.decode_latents(latents)
|
| 976 |
+
video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
|
| 977 |
+
|
| 978 |
+
# 11. Offload all models
|
| 979 |
+
self.maybe_free_model_hooks()
|
| 980 |
+
|
| 981 |
+
if not return_dict:
|
| 982 |
+
return (video,)
|
| 983 |
+
|
| 984 |
+
return AnimateDiffPipelineOutput(frames=video)
|
v0.36.0/pipeline_animatediff_ipex.py
ADDED
|
@@ -0,0 +1,1002 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 17 |
+
|
| 18 |
+
import intel_extension_for_pytorch as ipex
|
| 19 |
+
import torch
|
| 20 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
|
| 21 |
+
|
| 22 |
+
from diffusers.image_processor import PipelineImageInput
|
| 23 |
+
from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 24 |
+
from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
|
| 25 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 26 |
+
from diffusers.models.unets.unet_motion_model import MotionAdapter
|
| 27 |
+
from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
|
| 28 |
+
from diffusers.pipelines.free_init_utils import FreeInitMixin
|
| 29 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 30 |
+
from diffusers.schedulers import (
|
| 31 |
+
DDIMScheduler,
|
| 32 |
+
DPMSolverMultistepScheduler,
|
| 33 |
+
EulerAncestralDiscreteScheduler,
|
| 34 |
+
EulerDiscreteScheduler,
|
| 35 |
+
LMSDiscreteScheduler,
|
| 36 |
+
PNDMScheduler,
|
| 37 |
+
)
|
| 38 |
+
from diffusers.utils import (
|
| 39 |
+
USE_PEFT_BACKEND,
|
| 40 |
+
logging,
|
| 41 |
+
replace_example_docstring,
|
| 42 |
+
scale_lora_layers,
|
| 43 |
+
unscale_lora_layers,
|
| 44 |
+
)
|
| 45 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 46 |
+
from diffusers.video_processor import VideoProcessor
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 50 |
+
|
| 51 |
+
EXAMPLE_DOC_STRING = """
|
| 52 |
+
Examples:
|
| 53 |
+
```py
|
| 54 |
+
>>> import torch
|
| 55 |
+
>>> from diffusers import MotionAdapter, AnimateDiffPipelineIpex, EulerDiscreteScheduler
|
| 56 |
+
>>> from diffusers.utils import export_to_gif
|
| 57 |
+
>>> from safetensors.torch import load_file
|
| 58 |
+
|
| 59 |
+
>>> device = "cpu"
|
| 60 |
+
>>> dtype = torch.float32
|
| 61 |
+
|
| 62 |
+
>>> # ByteDance/AnimateDiff-Lightning, a distilled version of AnimateDiff SD1.5 v2,
|
| 63 |
+
>>> # a lightning-fast text-to-video generation model which can generate videos
|
| 64 |
+
>>> # more than ten times faster than the original AnimateDiff.
|
| 65 |
+
>>> step = 8 # Options: [1,2,4,8]
|
| 66 |
+
>>> repo = "ByteDance/AnimateDiff-Lightning"
|
| 67 |
+
>>> ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
| 68 |
+
>>> base = "emilianJR/epiCRealism" # Choose to your favorite base model.
|
| 69 |
+
|
| 70 |
+
>>> adapter = MotionAdapter().to(device, dtype)
|
| 71 |
+
>>> adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device))
|
| 72 |
+
|
| 73 |
+
>>> pipe = AnimateDiffPipelineIpex.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
|
| 74 |
+
>>> pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
| 75 |
+
|
| 76 |
+
>>> # For Float32
|
| 77 |
+
>>> pipe.prepare_for_ipex(torch.float32, prompt = "A girl smiling")
|
| 78 |
+
>>> # For BFloat16
|
| 79 |
+
>>> pipe.prepare_for_ipex(torch.bfloat16, prompt = "A girl smiling")
|
| 80 |
+
|
| 81 |
+
>>> # For Float32
|
| 82 |
+
>>> output = pipe(prompt = "A girl smiling", guidance_scale=1.0, num_inference_steps = step)
|
| 83 |
+
>>> # For BFloat16
|
| 84 |
+
>>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
|
| 85 |
+
>>> output = pipe(prompt = "A girl smiling", guidance_scale=1.0, num_inference_steps = step)
|
| 86 |
+
|
| 87 |
+
>>> frames = output.frames[0]
|
| 88 |
+
>>> export_to_gif(frames, "animation.gif")
|
| 89 |
+
```
|
| 90 |
+
"""
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class AnimateDiffPipelineIpex(
|
| 94 |
+
DiffusionPipeline,
|
| 95 |
+
StableDiffusionMixin,
|
| 96 |
+
TextualInversionLoaderMixin,
|
| 97 |
+
IPAdapterMixin,
|
| 98 |
+
LoraLoaderMixin,
|
| 99 |
+
FreeInitMixin,
|
| 100 |
+
):
|
| 101 |
+
r"""
|
| 102 |
+
Pipeline for text-to-video generation.
|
| 103 |
+
|
| 104 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 105 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 106 |
+
|
| 107 |
+
The pipeline also inherits the following loading methods:
|
| 108 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
| 109 |
+
- [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 110 |
+
- [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 111 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
vae ([`AutoencoderKL`]):
|
| 115 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 116 |
+
text_encoder ([`CLIPTextModel`]):
|
| 117 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 118 |
+
tokenizer (`CLIPTokenizer`):
|
| 119 |
+
A [`~transformers.CLIPTokenizer`] to tokenize text.
|
| 120 |
+
unet ([`UNet2DConditionModel`]):
|
| 121 |
+
A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
|
| 122 |
+
motion_adapter ([`MotionAdapter`]):
|
| 123 |
+
A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
|
| 124 |
+
scheduler ([`SchedulerMixin`]):
|
| 125 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 126 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 130 |
+
_optional_components = ["feature_extractor", "image_encoder", "motion_adapter"]
|
| 131 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 132 |
+
|
| 133 |
+
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
vae: AutoencoderKL,
|
| 136 |
+
text_encoder: CLIPTextModel,
|
| 137 |
+
tokenizer: CLIPTokenizer,
|
| 138 |
+
unet: Union[UNet2DConditionModel, UNetMotionModel],
|
| 139 |
+
motion_adapter: MotionAdapter,
|
| 140 |
+
scheduler: Union[
|
| 141 |
+
DDIMScheduler,
|
| 142 |
+
PNDMScheduler,
|
| 143 |
+
LMSDiscreteScheduler,
|
| 144 |
+
EulerDiscreteScheduler,
|
| 145 |
+
EulerAncestralDiscreteScheduler,
|
| 146 |
+
DPMSolverMultistepScheduler,
|
| 147 |
+
],
|
| 148 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 149 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 150 |
+
):
|
| 151 |
+
super().__init__()
|
| 152 |
+
if isinstance(unet, UNet2DConditionModel):
|
| 153 |
+
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
|
| 154 |
+
|
| 155 |
+
self.register_modules(
|
| 156 |
+
vae=vae,
|
| 157 |
+
text_encoder=text_encoder,
|
| 158 |
+
tokenizer=tokenizer,
|
| 159 |
+
unet=unet,
|
| 160 |
+
motion_adapter=motion_adapter,
|
| 161 |
+
scheduler=scheduler,
|
| 162 |
+
feature_extractor=feature_extractor,
|
| 163 |
+
image_encoder=image_encoder,
|
| 164 |
+
)
|
| 165 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 166 |
+
self.video_processor = VideoProcessor(do_resize=False, vae_scale_factor=self.vae_scale_factor)
|
| 167 |
+
|
| 168 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
|
| 169 |
+
def encode_prompt(
|
| 170 |
+
self,
|
| 171 |
+
prompt,
|
| 172 |
+
device,
|
| 173 |
+
num_images_per_prompt,
|
| 174 |
+
do_classifier_free_guidance,
|
| 175 |
+
negative_prompt=None,
|
| 176 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 177 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 178 |
+
lora_scale: Optional[float] = None,
|
| 179 |
+
clip_skip: Optional[int] = None,
|
| 180 |
+
):
|
| 181 |
+
r"""
|
| 182 |
+
Encodes the prompt into text encoder hidden states.
|
| 183 |
+
|
| 184 |
+
Args:
|
| 185 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 186 |
+
prompt to be encoded
|
| 187 |
+
device: (`torch.device`):
|
| 188 |
+
torch device
|
| 189 |
+
num_images_per_prompt (`int`):
|
| 190 |
+
number of images that should be generated per prompt
|
| 191 |
+
do_classifier_free_guidance (`bool`):
|
| 192 |
+
whether to use classifier free guidance or not
|
| 193 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 194 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 195 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 196 |
+
less than `1`).
|
| 197 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 198 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 199 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 200 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 201 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 202 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 203 |
+
argument.
|
| 204 |
+
lora_scale (`float`, *optional*):
|
| 205 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 206 |
+
clip_skip (`int`, *optional*):
|
| 207 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 208 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 209 |
+
"""
|
| 210 |
+
# set lora scale so that monkey patched LoRA
|
| 211 |
+
# function of text encoder can correctly access it
|
| 212 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 213 |
+
self._lora_scale = lora_scale
|
| 214 |
+
|
| 215 |
+
# dynamically adjust the LoRA scale
|
| 216 |
+
if not USE_PEFT_BACKEND:
|
| 217 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 218 |
+
else:
|
| 219 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 220 |
+
|
| 221 |
+
if prompt is not None and isinstance(prompt, str):
|
| 222 |
+
batch_size = 1
|
| 223 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 224 |
+
batch_size = len(prompt)
|
| 225 |
+
else:
|
| 226 |
+
batch_size = prompt_embeds.shape[0]
|
| 227 |
+
|
| 228 |
+
if prompt_embeds is None:
|
| 229 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 230 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 231 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 232 |
+
|
| 233 |
+
text_inputs = self.tokenizer(
|
| 234 |
+
prompt,
|
| 235 |
+
padding="max_length",
|
| 236 |
+
max_length=self.tokenizer.model_max_length,
|
| 237 |
+
truncation=True,
|
| 238 |
+
return_tensors="pt",
|
| 239 |
+
)
|
| 240 |
+
text_input_ids = text_inputs.input_ids
|
| 241 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 242 |
+
|
| 243 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 244 |
+
text_input_ids, untruncated_ids
|
| 245 |
+
):
|
| 246 |
+
removed_text = self.tokenizer.batch_decode(
|
| 247 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 248 |
+
)
|
| 249 |
+
logger.warning(
|
| 250 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 251 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 255 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 256 |
+
else:
|
| 257 |
+
attention_mask = None
|
| 258 |
+
|
| 259 |
+
if clip_skip is None:
|
| 260 |
+
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 261 |
+
prompt_embeds = prompt_embeds[0]
|
| 262 |
+
else:
|
| 263 |
+
prompt_embeds = self.text_encoder(
|
| 264 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 265 |
+
)
|
| 266 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 267 |
+
# all the hidden states from the encoder layers. Then index into
|
| 268 |
+
# the tuple to access the hidden states from the desired layer.
|
| 269 |
+
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
|
| 270 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 271 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 272 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 273 |
+
# layer.
|
| 274 |
+
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
|
| 275 |
+
|
| 276 |
+
if self.text_encoder is not None:
|
| 277 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 278 |
+
elif self.unet is not None:
|
| 279 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 280 |
+
else:
|
| 281 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 282 |
+
|
| 283 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 284 |
+
|
| 285 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 286 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 287 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 288 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 289 |
+
|
| 290 |
+
# get unconditional embeddings for classifier free guidance
|
| 291 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 292 |
+
uncond_tokens: List[str]
|
| 293 |
+
if negative_prompt is None:
|
| 294 |
+
uncond_tokens = [""] * batch_size
|
| 295 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 296 |
+
raise TypeError(
|
| 297 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 298 |
+
f" {type(prompt)}."
|
| 299 |
+
)
|
| 300 |
+
elif isinstance(negative_prompt, str):
|
| 301 |
+
uncond_tokens = [negative_prompt]
|
| 302 |
+
elif batch_size != len(negative_prompt):
|
| 303 |
+
raise ValueError(
|
| 304 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 305 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 306 |
+
" the batch size of `prompt`."
|
| 307 |
+
)
|
| 308 |
+
else:
|
| 309 |
+
uncond_tokens = negative_prompt
|
| 310 |
+
|
| 311 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 312 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 313 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 314 |
+
|
| 315 |
+
max_length = prompt_embeds.shape[1]
|
| 316 |
+
uncond_input = self.tokenizer(
|
| 317 |
+
uncond_tokens,
|
| 318 |
+
padding="max_length",
|
| 319 |
+
max_length=max_length,
|
| 320 |
+
truncation=True,
|
| 321 |
+
return_tensors="pt",
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 325 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 326 |
+
else:
|
| 327 |
+
attention_mask = None
|
| 328 |
+
|
| 329 |
+
negative_prompt_embeds = self.text_encoder(
|
| 330 |
+
uncond_input.input_ids.to(device),
|
| 331 |
+
attention_mask=attention_mask,
|
| 332 |
+
)
|
| 333 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 334 |
+
|
| 335 |
+
if do_classifier_free_guidance:
|
| 336 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 337 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 338 |
+
|
| 339 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 340 |
+
|
| 341 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 342 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 343 |
+
|
| 344 |
+
if self.text_encoder is not None:
|
| 345 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 346 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 347 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 348 |
+
|
| 349 |
+
return prompt_embeds, negative_prompt_embeds
|
| 350 |
+
|
| 351 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 352 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 353 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 354 |
+
|
| 355 |
+
if not isinstance(image, torch.Tensor):
|
| 356 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 357 |
+
|
| 358 |
+
image = image.to(device=device, dtype=dtype)
|
| 359 |
+
if output_hidden_states:
|
| 360 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 361 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 362 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 363 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 364 |
+
).hidden_states[-2]
|
| 365 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 366 |
+
num_images_per_prompt, dim=0
|
| 367 |
+
)
|
| 368 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 369 |
+
else:
|
| 370 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 371 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 372 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 373 |
+
|
| 374 |
+
return image_embeds, uncond_image_embeds
|
| 375 |
+
|
| 376 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 377 |
+
def prepare_ip_adapter_image_embeds(
|
| 378 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
|
| 379 |
+
):
|
| 380 |
+
if ip_adapter_image_embeds is None:
|
| 381 |
+
if not isinstance(ip_adapter_image, list):
|
| 382 |
+
ip_adapter_image = [ip_adapter_image]
|
| 383 |
+
|
| 384 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 385 |
+
raise ValueError(
|
| 386 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
image_embeds = []
|
| 390 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 391 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 392 |
+
):
|
| 393 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 394 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 395 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 396 |
+
)
|
| 397 |
+
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 398 |
+
single_negative_image_embeds = torch.stack(
|
| 399 |
+
[single_negative_image_embeds] * num_images_per_prompt, dim=0
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
if do_classifier_free_guidance:
|
| 403 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 404 |
+
single_image_embeds = single_image_embeds.to(device)
|
| 405 |
+
|
| 406 |
+
image_embeds.append(single_image_embeds)
|
| 407 |
+
else:
|
| 408 |
+
repeat_dims = [1]
|
| 409 |
+
image_embeds = []
|
| 410 |
+
for single_image_embeds in ip_adapter_image_embeds:
|
| 411 |
+
if do_classifier_free_guidance:
|
| 412 |
+
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
|
| 413 |
+
single_image_embeds = single_image_embeds.repeat(
|
| 414 |
+
num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
|
| 415 |
+
)
|
| 416 |
+
single_negative_image_embeds = single_negative_image_embeds.repeat(
|
| 417 |
+
num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
|
| 418 |
+
)
|
| 419 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
|
| 420 |
+
else:
|
| 421 |
+
single_image_embeds = single_image_embeds.repeat(
|
| 422 |
+
num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
|
| 423 |
+
)
|
| 424 |
+
image_embeds.append(single_image_embeds)
|
| 425 |
+
|
| 426 |
+
return image_embeds
|
| 427 |
+
|
| 428 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
|
| 429 |
+
def decode_latents(self, latents):
|
| 430 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 431 |
+
|
| 432 |
+
batch_size, channels, num_frames, height, width = latents.shape
|
| 433 |
+
latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 434 |
+
|
| 435 |
+
image = self.vae.decode(latents).sample
|
| 436 |
+
video = image[None, :].reshape((batch_size, num_frames, -1) + image.shape[2:]).permute(0, 2, 1, 3, 4)
|
| 437 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 438 |
+
video = video.float()
|
| 439 |
+
return video
|
| 440 |
+
|
| 441 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 442 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 443 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 444 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 445 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 446 |
+
# and should be between [0, 1]
|
| 447 |
+
|
| 448 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 449 |
+
extra_step_kwargs = {}
|
| 450 |
+
if accepts_eta:
|
| 451 |
+
extra_step_kwargs["eta"] = eta
|
| 452 |
+
|
| 453 |
+
# check if the scheduler accepts generator
|
| 454 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 455 |
+
if accepts_generator:
|
| 456 |
+
extra_step_kwargs["generator"] = generator
|
| 457 |
+
return extra_step_kwargs
|
| 458 |
+
|
| 459 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
| 460 |
+
def check_inputs(
|
| 461 |
+
self,
|
| 462 |
+
prompt,
|
| 463 |
+
height,
|
| 464 |
+
width,
|
| 465 |
+
negative_prompt=None,
|
| 466 |
+
prompt_embeds=None,
|
| 467 |
+
negative_prompt_embeds=None,
|
| 468 |
+
ip_adapter_image=None,
|
| 469 |
+
ip_adapter_image_embeds=None,
|
| 470 |
+
callback_on_step_end_tensor_inputs=None,
|
| 471 |
+
):
|
| 472 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 473 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 474 |
+
|
| 475 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 476 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 477 |
+
):
|
| 478 |
+
raise ValueError(
|
| 479 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
if prompt is not None and prompt_embeds is not None:
|
| 483 |
+
raise ValueError(
|
| 484 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 485 |
+
" only forward one of the two."
|
| 486 |
+
)
|
| 487 |
+
elif prompt is None and prompt_embeds is None:
|
| 488 |
+
raise ValueError(
|
| 489 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 490 |
+
)
|
| 491 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 492 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 493 |
+
|
| 494 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 495 |
+
raise ValueError(
|
| 496 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 497 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 501 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 502 |
+
raise ValueError(
|
| 503 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 504 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 505 |
+
f" {negative_prompt_embeds.shape}."
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
| 509 |
+
raise ValueError(
|
| 510 |
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
if ip_adapter_image_embeds is not None:
|
| 514 |
+
if not isinstance(ip_adapter_image_embeds, list):
|
| 515 |
+
raise ValueError(
|
| 516 |
+
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
|
| 517 |
+
)
|
| 518 |
+
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
|
| 519 |
+
raise ValueError(
|
| 520 |
+
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
# Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
|
| 524 |
+
def prepare_latents(
|
| 525 |
+
self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
|
| 526 |
+
):
|
| 527 |
+
shape = (
|
| 528 |
+
batch_size,
|
| 529 |
+
num_channels_latents,
|
| 530 |
+
num_frames,
|
| 531 |
+
height // self.vae_scale_factor,
|
| 532 |
+
width // self.vae_scale_factor,
|
| 533 |
+
)
|
| 534 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 535 |
+
raise ValueError(
|
| 536 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 537 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
if latents is None:
|
| 541 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=torch.float32)
|
| 542 |
+
else:
|
| 543 |
+
latents = latents.to(device)
|
| 544 |
+
|
| 545 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 546 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 547 |
+
return latents
|
| 548 |
+
|
| 549 |
+
@property
|
| 550 |
+
def guidance_scale(self):
|
| 551 |
+
return self._guidance_scale
|
| 552 |
+
|
| 553 |
+
@property
|
| 554 |
+
def clip_skip(self):
|
| 555 |
+
return self._clip_skip
|
| 556 |
+
|
| 557 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 558 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 559 |
+
# corresponds to doing no classifier free guidance.
|
| 560 |
+
@property
|
| 561 |
+
def do_classifier_free_guidance(self):
|
| 562 |
+
return self._guidance_scale > 1
|
| 563 |
+
|
| 564 |
+
@property
|
| 565 |
+
def cross_attention_kwargs(self):
|
| 566 |
+
return self._cross_attention_kwargs
|
| 567 |
+
|
| 568 |
+
@property
|
| 569 |
+
def num_timesteps(self):
|
| 570 |
+
return self._num_timesteps
|
| 571 |
+
|
| 572 |
+
@torch.no_grad()
|
| 573 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 574 |
+
def __call__(
|
| 575 |
+
self,
|
| 576 |
+
prompt: Union[str, List[str]] = None,
|
| 577 |
+
num_frames: Optional[int] = 16,
|
| 578 |
+
height: Optional[int] = None,
|
| 579 |
+
width: Optional[int] = None,
|
| 580 |
+
num_inference_steps: int = 50,
|
| 581 |
+
guidance_scale: float = 7.5,
|
| 582 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 583 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 584 |
+
eta: float = 0.0,
|
| 585 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 586 |
+
latents: Optional[torch.Tensor] = None,
|
| 587 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 588 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 589 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 590 |
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
| 591 |
+
output_type: Optional[str] = "pil",
|
| 592 |
+
return_dict: bool = True,
|
| 593 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 594 |
+
clip_skip: Optional[int] = None,
|
| 595 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 596 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 597 |
+
):
|
| 598 |
+
r"""
|
| 599 |
+
The call function to the pipeline for generation.
|
| 600 |
+
|
| 601 |
+
Args:
|
| 602 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 603 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
|
| 604 |
+
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 605 |
+
The height in pixels of the generated video.
|
| 606 |
+
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
|
| 607 |
+
The width in pixels of the generated video.
|
| 608 |
+
num_frames (`int`, *optional*, defaults to 16):
|
| 609 |
+
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
|
| 610 |
+
amounts to 2 seconds of video.
|
| 611 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 612 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
|
| 613 |
+
expense of slower inference.
|
| 614 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 615 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 616 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 617 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 618 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 619 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 620 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 621 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 622 |
+
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 623 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 624 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
| 625 |
+
generation deterministic.
|
| 626 |
+
latents (`torch.Tensor`, *optional*):
|
| 627 |
+
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
|
| 628 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 629 |
+
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
|
| 630 |
+
`(batch_size, num_channel, num_frames, height, width)`.
|
| 631 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 632 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
|
| 633 |
+
provided, text embeddings are generated from the `prompt` input argument.
|
| 634 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 635 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 636 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 637 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
| 638 |
+
Optional image input to work with IP Adapters.
|
| 639 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 640 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
|
| 641 |
+
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
|
| 642 |
+
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
|
| 643 |
+
provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 644 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 645 |
+
The output format of the generated video. Choose between `torch.Tensor`, `PIL.Image` or `np.array`.
|
| 646 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 647 |
+
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
|
| 648 |
+
of a plain tuple.
|
| 649 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 650 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 651 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 652 |
+
clip_skip (`int`, *optional*):
|
| 653 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 654 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 655 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 656 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 657 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 658 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 659 |
+
`callback_on_step_end_tensor_inputs`.
|
| 660 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 661 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 662 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 663 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 664 |
+
|
| 665 |
+
Examples:
|
| 666 |
+
|
| 667 |
+
Returns:
|
| 668 |
+
[`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
|
| 669 |
+
If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
|
| 670 |
+
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
|
| 671 |
+
"""
|
| 672 |
+
|
| 673 |
+
# 0. Default height and width to unet
|
| 674 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 675 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 676 |
+
|
| 677 |
+
num_videos_per_prompt = 1
|
| 678 |
+
|
| 679 |
+
# 1. Check inputs. Raise error if not correct
|
| 680 |
+
self.check_inputs(
|
| 681 |
+
prompt,
|
| 682 |
+
height,
|
| 683 |
+
width,
|
| 684 |
+
negative_prompt,
|
| 685 |
+
prompt_embeds,
|
| 686 |
+
negative_prompt_embeds,
|
| 687 |
+
ip_adapter_image,
|
| 688 |
+
ip_adapter_image_embeds,
|
| 689 |
+
callback_on_step_end_tensor_inputs,
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
self._guidance_scale = guidance_scale
|
| 693 |
+
self._clip_skip = clip_skip
|
| 694 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 695 |
+
|
| 696 |
+
# 2. Define call parameters
|
| 697 |
+
if prompt is not None and isinstance(prompt, str):
|
| 698 |
+
batch_size = 1
|
| 699 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 700 |
+
batch_size = len(prompt)
|
| 701 |
+
else:
|
| 702 |
+
batch_size = prompt_embeds.shape[0]
|
| 703 |
+
|
| 704 |
+
device = self._execution_device
|
| 705 |
+
|
| 706 |
+
# 3. Encode input prompt
|
| 707 |
+
text_encoder_lora_scale = (
|
| 708 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 709 |
+
)
|
| 710 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 711 |
+
prompt,
|
| 712 |
+
device,
|
| 713 |
+
num_videos_per_prompt,
|
| 714 |
+
self.do_classifier_free_guidance,
|
| 715 |
+
negative_prompt,
|
| 716 |
+
prompt_embeds=prompt_embeds,
|
| 717 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 718 |
+
lora_scale=text_encoder_lora_scale,
|
| 719 |
+
clip_skip=self.clip_skip,
|
| 720 |
+
)
|
| 721 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 722 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 723 |
+
# to avoid doing two forward passes
|
| 724 |
+
if self.do_classifier_free_guidance:
|
| 725 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 726 |
+
|
| 727 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 728 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 729 |
+
ip_adapter_image,
|
| 730 |
+
ip_adapter_image_embeds,
|
| 731 |
+
device,
|
| 732 |
+
batch_size * num_videos_per_prompt,
|
| 733 |
+
self.do_classifier_free_guidance,
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
# 4. Prepare timesteps
|
| 737 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 738 |
+
timesteps = self.scheduler.timesteps
|
| 739 |
+
|
| 740 |
+
# 5. Prepare latent variables
|
| 741 |
+
num_channels_latents = self.unet.config.in_channels
|
| 742 |
+
latents = self.prepare_latents(
|
| 743 |
+
batch_size * num_videos_per_prompt,
|
| 744 |
+
num_channels_latents,
|
| 745 |
+
num_frames,
|
| 746 |
+
height,
|
| 747 |
+
width,
|
| 748 |
+
prompt_embeds.dtype,
|
| 749 |
+
device,
|
| 750 |
+
generator,
|
| 751 |
+
latents,
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 755 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 756 |
+
|
| 757 |
+
# 7. Add image embeds for IP-Adapter
|
| 758 |
+
added_cond_kwargs = (
|
| 759 |
+
{"image_embeds": image_embeds}
|
| 760 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None
|
| 761 |
+
else None
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
| 765 |
+
for free_init_iter in range(num_free_init_iters):
|
| 766 |
+
if self.free_init_enabled:
|
| 767 |
+
latents, timesteps = self._apply_free_init(
|
| 768 |
+
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
self._num_timesteps = len(timesteps)
|
| 772 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 773 |
+
|
| 774 |
+
# 8. Denoising loop
|
| 775 |
+
with self.progress_bar(total=self._num_timesteps) as progress_bar:
|
| 776 |
+
for i, t in enumerate(timesteps):
|
| 777 |
+
# expand the latents if we are doing classifier free guidance
|
| 778 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 779 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 780 |
+
|
| 781 |
+
# predict the noise residual
|
| 782 |
+
noise_pred = self.unet(
|
| 783 |
+
latent_model_input,
|
| 784 |
+
t,
|
| 785 |
+
encoder_hidden_states=prompt_embeds,
|
| 786 |
+
# cross_attention_kwargs=cross_attention_kwargs,
|
| 787 |
+
# added_cond_kwargs=added_cond_kwargs,
|
| 788 |
+
# ).sample
|
| 789 |
+
)["sample"]
|
| 790 |
+
|
| 791 |
+
# perform guidance
|
| 792 |
+
if self.do_classifier_free_guidance:
|
| 793 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 794 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 795 |
+
|
| 796 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 797 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
| 798 |
+
|
| 799 |
+
if callback_on_step_end is not None:
|
| 800 |
+
callback_kwargs = {}
|
| 801 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 802 |
+
callback_kwargs[k] = locals()[k]
|
| 803 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 804 |
+
|
| 805 |
+
latents = callback_outputs.pop("latents", latents)
|
| 806 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 807 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 808 |
+
|
| 809 |
+
# call the callback, if provided
|
| 810 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 811 |
+
progress_bar.update()
|
| 812 |
+
|
| 813 |
+
# 9. Post processing
|
| 814 |
+
if output_type == "latent":
|
| 815 |
+
video = latents
|
| 816 |
+
else:
|
| 817 |
+
video_tensor = self.decode_latents(latents)
|
| 818 |
+
video = self.video_processor.postprocess_video(video=video_tensor, output_type=output_type)
|
| 819 |
+
|
| 820 |
+
# 10. Offload all models
|
| 821 |
+
self.maybe_free_model_hooks()
|
| 822 |
+
|
| 823 |
+
if not return_dict:
|
| 824 |
+
return (video,)
|
| 825 |
+
|
| 826 |
+
return AnimateDiffPipelineOutput(frames=video)
|
| 827 |
+
|
| 828 |
+
@torch.no_grad()
|
| 829 |
+
def prepare_for_ipex(
|
| 830 |
+
self,
|
| 831 |
+
dtype=torch.float32,
|
| 832 |
+
prompt: Union[str, List[str]] = None,
|
| 833 |
+
num_frames: Optional[int] = 16,
|
| 834 |
+
height: Optional[int] = None,
|
| 835 |
+
width: Optional[int] = None,
|
| 836 |
+
num_inference_steps: int = 50,
|
| 837 |
+
guidance_scale: float = 7.5,
|
| 838 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 839 |
+
num_videos_per_prompt: Optional[int] = 1,
|
| 840 |
+
eta: float = 0.0,
|
| 841 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 842 |
+
latents: Optional[torch.Tensor] = None,
|
| 843 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 844 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 845 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 846 |
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
| 847 |
+
output_type: Optional[str] = "pil",
|
| 848 |
+
return_dict: bool = True,
|
| 849 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 850 |
+
clip_skip: Optional[int] = None,
|
| 851 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 852 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 853 |
+
):
|
| 854 |
+
# 0. Default height and width to unet
|
| 855 |
+
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
| 856 |
+
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
| 857 |
+
|
| 858 |
+
num_videos_per_prompt = 1
|
| 859 |
+
|
| 860 |
+
# 1. Check inputs. Raise error if not correct
|
| 861 |
+
self.check_inputs(
|
| 862 |
+
prompt,
|
| 863 |
+
height,
|
| 864 |
+
width,
|
| 865 |
+
negative_prompt,
|
| 866 |
+
prompt_embeds,
|
| 867 |
+
negative_prompt_embeds,
|
| 868 |
+
ip_adapter_image,
|
| 869 |
+
ip_adapter_image_embeds,
|
| 870 |
+
callback_on_step_end_tensor_inputs,
|
| 871 |
+
)
|
| 872 |
+
|
| 873 |
+
self._guidance_scale = guidance_scale
|
| 874 |
+
self._clip_skip = clip_skip
|
| 875 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 876 |
+
|
| 877 |
+
# 2. Define call parameters
|
| 878 |
+
if prompt is not None and isinstance(prompt, str):
|
| 879 |
+
batch_size = 1
|
| 880 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 881 |
+
batch_size = len(prompt)
|
| 882 |
+
else:
|
| 883 |
+
batch_size = prompt_embeds.shape[0]
|
| 884 |
+
|
| 885 |
+
device = self._execution_device
|
| 886 |
+
|
| 887 |
+
# 3. Encode input prompt
|
| 888 |
+
text_encoder_lora_scale = (
|
| 889 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 890 |
+
)
|
| 891 |
+
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
|
| 892 |
+
prompt,
|
| 893 |
+
device,
|
| 894 |
+
num_videos_per_prompt,
|
| 895 |
+
self.do_classifier_free_guidance,
|
| 896 |
+
negative_prompt,
|
| 897 |
+
prompt_embeds=prompt_embeds,
|
| 898 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 899 |
+
lora_scale=text_encoder_lora_scale,
|
| 900 |
+
clip_skip=self.clip_skip,
|
| 901 |
+
)
|
| 902 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 903 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 904 |
+
# to avoid doing two forward passes
|
| 905 |
+
if self.do_classifier_free_guidance:
|
| 906 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 907 |
+
|
| 908 |
+
# 4. Prepare timesteps
|
| 909 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 910 |
+
timesteps = self.scheduler.timesteps
|
| 911 |
+
|
| 912 |
+
# 5. Prepare latent variables
|
| 913 |
+
num_channels_latents = self.unet.config.in_channels
|
| 914 |
+
latents = self.prepare_latents(
|
| 915 |
+
batch_size * num_videos_per_prompt,
|
| 916 |
+
num_channels_latents,
|
| 917 |
+
num_frames,
|
| 918 |
+
height,
|
| 919 |
+
width,
|
| 920 |
+
prompt_embeds.dtype,
|
| 921 |
+
device,
|
| 922 |
+
generator,
|
| 923 |
+
latents,
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
num_free_init_iters = self._free_init_num_iters if self.free_init_enabled else 1
|
| 927 |
+
for free_init_iter in range(num_free_init_iters):
|
| 928 |
+
if self.free_init_enabled:
|
| 929 |
+
latents, timesteps = self._apply_free_init(
|
| 930 |
+
latents, free_init_iter, num_inference_steps, device, latents.dtype, generator
|
| 931 |
+
)
|
| 932 |
+
|
| 933 |
+
self._num_timesteps = len(timesteps)
|
| 934 |
+
|
| 935 |
+
dummy = timesteps[0]
|
| 936 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 937 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
|
| 938 |
+
|
| 939 |
+
self.unet = self.unet.to(memory_format=torch.channels_last)
|
| 940 |
+
self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
|
| 941 |
+
self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
|
| 942 |
+
|
| 943 |
+
unet_input_example = {
|
| 944 |
+
"sample": latent_model_input,
|
| 945 |
+
"timestep": dummy,
|
| 946 |
+
"encoder_hidden_states": prompt_embeds,
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
fake_latents = 1 / self.vae.config.scaling_factor * latents
|
| 950 |
+
batch_size, channels, num_frames, height, width = fake_latents.shape
|
| 951 |
+
fake_latents = fake_latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
|
| 952 |
+
vae_decoder_input_example = fake_latents
|
| 953 |
+
|
| 954 |
+
# optimize with ipex
|
| 955 |
+
if dtype == torch.bfloat16:
|
| 956 |
+
self.unet = ipex.optimize(self.unet.eval(), dtype=torch.bfloat16, inplace=True)
|
| 957 |
+
self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
|
| 958 |
+
self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
|
| 959 |
+
elif dtype == torch.float32:
|
| 960 |
+
self.unet = ipex.optimize(
|
| 961 |
+
self.unet.eval(),
|
| 962 |
+
dtype=torch.float32,
|
| 963 |
+
inplace=True,
|
| 964 |
+
# sample_input=unet_input_example,
|
| 965 |
+
level="O1",
|
| 966 |
+
weights_prepack=True,
|
| 967 |
+
auto_kernel_selection=False,
|
| 968 |
+
)
|
| 969 |
+
self.vae.decoder = ipex.optimize(
|
| 970 |
+
self.vae.decoder.eval(),
|
| 971 |
+
dtype=torch.float32,
|
| 972 |
+
inplace=True,
|
| 973 |
+
level="O1",
|
| 974 |
+
weights_prepack=True,
|
| 975 |
+
auto_kernel_selection=False,
|
| 976 |
+
)
|
| 977 |
+
self.text_encoder = ipex.optimize(
|
| 978 |
+
self.text_encoder.eval(),
|
| 979 |
+
dtype=torch.float32,
|
| 980 |
+
inplace=True,
|
| 981 |
+
level="O1",
|
| 982 |
+
weights_prepack=True,
|
| 983 |
+
auto_kernel_selection=False,
|
| 984 |
+
)
|
| 985 |
+
else:
|
| 986 |
+
raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
|
| 987 |
+
|
| 988 |
+
# trace unet model to get better performance on IPEX
|
| 989 |
+
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
| 990 |
+
unet_trace_model = torch.jit.trace(
|
| 991 |
+
self.unet, example_kwarg_inputs=unet_input_example, check_trace=False, strict=False
|
| 992 |
+
)
|
| 993 |
+
unet_trace_model = torch.jit.freeze(unet_trace_model)
|
| 994 |
+
self.unet.forward = unet_trace_model.forward
|
| 995 |
+
|
| 996 |
+
# trace vae.decoder model to get better performance on IPEX
|
| 997 |
+
with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
|
| 998 |
+
vae_decoder_trace_model = torch.jit.trace(
|
| 999 |
+
self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
|
| 1000 |
+
)
|
| 1001 |
+
vae_decoder_trace_model = torch.jit.freeze(vae_decoder_trace_model)
|
| 1002 |
+
self.vae.decoder.forward = vae_decoder_trace_model.forward
|
v0.36.0/pipeline_controlnet_xl_kolors.py
ADDED
|
@@ -0,0 +1,1338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import PIL.Image
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from transformers import (
|
| 24 |
+
CLIPImageProcessor,
|
| 25 |
+
CLIPVisionModelWithProjection,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import (
|
| 31 |
+
FromSingleFileMixin,
|
| 32 |
+
IPAdapterMixin,
|
| 33 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 34 |
+
TextualInversionLoaderMixin,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.models import (
|
| 37 |
+
AutoencoderKL,
|
| 38 |
+
ControlNetModel,
|
| 39 |
+
ImageProjection,
|
| 40 |
+
MultiControlNetModel,
|
| 41 |
+
UNet2DConditionModel,
|
| 42 |
+
)
|
| 43 |
+
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
|
| 44 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 45 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 46 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 47 |
+
from diffusers.utils import (
|
| 48 |
+
deprecate,
|
| 49 |
+
is_invisible_watermark_available,
|
| 50 |
+
logging,
|
| 51 |
+
replace_example_docstring,
|
| 52 |
+
)
|
| 53 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
if is_invisible_watermark_available():
|
| 57 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 58 |
+
|
| 59 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
EXAMPLE_DOC_STRING = """
|
| 63 |
+
Examples:
|
| 64 |
+
```py
|
| 65 |
+
>>> import torch
|
| 66 |
+
>>> from diffusers import KolorsControlNetPipeline, ControlNetModel
|
| 67 |
+
>>> from diffusers.utils import load_image
|
| 68 |
+
>>> import numpy as np
|
| 69 |
+
>>> import cv2
|
| 70 |
+
>>> from PIL import Image
|
| 71 |
+
|
| 72 |
+
>>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
|
| 73 |
+
>>> negative_prompt = "low quality, bad quality, sketches"
|
| 74 |
+
|
| 75 |
+
>>> # download an image
|
| 76 |
+
>>> image = load_image(
|
| 77 |
+
... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
|
| 78 |
+
... )
|
| 79 |
+
|
| 80 |
+
>>> # initialize the models and pipeline
|
| 81 |
+
>>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
|
| 82 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 83 |
+
... "Kwai-Kolors/Kolors-ControlNet-Canny",
|
| 84 |
+
... use_safetensors=True,
|
| 85 |
+
... torch_dtype=torch.float16
|
| 86 |
+
... )
|
| 87 |
+
|
| 88 |
+
>>> pipe = KolorsControlNetPipeline.from_pretrained(
|
| 89 |
+
... "Kwai-Kolors/Kolors-diffusers",
|
| 90 |
+
... controlnet=controlnet,
|
| 91 |
+
... variant="fp16",
|
| 92 |
+
... use_safetensors=True,
|
| 93 |
+
... torch_dtype=torch.float16
|
| 94 |
+
... )
|
| 95 |
+
>>> pipe.enable_model_cpu_offload()
|
| 96 |
+
|
| 97 |
+
>>> # get canny image
|
| 98 |
+
>>> image = np.array(image)
|
| 99 |
+
>>> image = cv2.Canny(image, 100, 200)
|
| 100 |
+
>>> image = image[:, :, None]
|
| 101 |
+
>>> image = np.concatenate([image, image, image], axis=2)
|
| 102 |
+
>>> canny_image = Image.fromarray(image)
|
| 103 |
+
|
| 104 |
+
>>> # generate image
|
| 105 |
+
>>> image = pipe(
|
| 106 |
+
... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
|
| 107 |
+
... ).images[0]
|
| 108 |
+
```
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 113 |
+
def retrieve_latents(
|
| 114 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 115 |
+
):
|
| 116 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 117 |
+
return encoder_output.latent_dist.sample(generator)
|
| 118 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 119 |
+
return encoder_output.latent_dist.mode()
|
| 120 |
+
elif hasattr(encoder_output, "latents"):
|
| 121 |
+
return encoder_output.latents
|
| 122 |
+
else:
|
| 123 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class KolorsControlNetPipeline(
|
| 127 |
+
DiffusionPipeline,
|
| 128 |
+
StableDiffusionMixin,
|
| 129 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 130 |
+
FromSingleFileMixin,
|
| 131 |
+
IPAdapterMixin,
|
| 132 |
+
):
|
| 133 |
+
r"""
|
| 134 |
+
Pipeline for image-to-image generation using Kolors with ControlNet guidance.
|
| 135 |
+
|
| 136 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 137 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 138 |
+
|
| 139 |
+
The pipeline also inherits the following loading methods:
|
| 140 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
|
| 141 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 142 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 143 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
vae ([`AutoencoderKL`]):
|
| 147 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 148 |
+
text_encoder ([`ChatGLMModel`]):
|
| 149 |
+
Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
|
| 150 |
+
tokenizer (`ChatGLMTokenizer`):
|
| 151 |
+
Tokenizer of class
|
| 152 |
+
[ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
|
| 153 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 154 |
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
| 155 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
| 156 |
+
as a list, the outputs from each ControlNet are added together to create one combined additional
|
| 157 |
+
conditioning.
|
| 158 |
+
scheduler ([`SchedulerMixin`]):
|
| 159 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 160 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 161 |
+
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
| 162 |
+
Whether the `unet` requires an `aesthetic_score` condition to be passed during inference.
|
| 163 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 164 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 165 |
+
`Kwai-Kolors/Kolors-diffusers`.
|
| 166 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 167 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 171 |
+
|
| 172 |
+
_optional_components = [
|
| 173 |
+
"tokenizer",
|
| 174 |
+
"text_encoder",
|
| 175 |
+
"feature_extractor",
|
| 176 |
+
"image_encoder",
|
| 177 |
+
]
|
| 178 |
+
_callback_tensor_inputs = [
|
| 179 |
+
"latents",
|
| 180 |
+
"prompt_embeds",
|
| 181 |
+
"negative_prompt_embeds",
|
| 182 |
+
"add_text_embeds",
|
| 183 |
+
"add_time_ids",
|
| 184 |
+
"negative_pooled_prompt_embeds",
|
| 185 |
+
"negative_add_time_ids",
|
| 186 |
+
"image",
|
| 187 |
+
]
|
| 188 |
+
|
| 189 |
+
def __init__(
|
| 190 |
+
self,
|
| 191 |
+
vae: AutoencoderKL,
|
| 192 |
+
text_encoder: ChatGLMModel,
|
| 193 |
+
tokenizer: ChatGLMTokenizer,
|
| 194 |
+
unet: UNet2DConditionModel,
|
| 195 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 196 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 197 |
+
requires_aesthetics_score: bool = False,
|
| 198 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 199 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 200 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 201 |
+
add_watermarker: Optional[bool] = None,
|
| 202 |
+
):
|
| 203 |
+
super().__init__()
|
| 204 |
+
|
| 205 |
+
if isinstance(controlnet, (list, tuple)):
|
| 206 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 207 |
+
|
| 208 |
+
self.register_modules(
|
| 209 |
+
vae=vae,
|
| 210 |
+
text_encoder=text_encoder,
|
| 211 |
+
tokenizer=tokenizer,
|
| 212 |
+
unet=unet,
|
| 213 |
+
controlnet=controlnet,
|
| 214 |
+
scheduler=scheduler,
|
| 215 |
+
feature_extractor=feature_extractor,
|
| 216 |
+
image_encoder=image_encoder,
|
| 217 |
+
)
|
| 218 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 219 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 220 |
+
self.control_image_processor = VaeImageProcessor(
|
| 221 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
if add_watermarker:
|
| 225 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 226 |
+
else:
|
| 227 |
+
self.watermark = None
|
| 228 |
+
|
| 229 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 230 |
+
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 231 |
+
|
| 232 |
+
def encode_prompt(
|
| 233 |
+
self,
|
| 234 |
+
prompt,
|
| 235 |
+
device: Optional[torch.device] = None,
|
| 236 |
+
num_images_per_prompt: int = 1,
|
| 237 |
+
do_classifier_free_guidance: bool = True,
|
| 238 |
+
negative_prompt=None,
|
| 239 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 240 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 241 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 242 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 243 |
+
lora_scale: Optional[float] = None,
|
| 244 |
+
):
|
| 245 |
+
r"""
|
| 246 |
+
Encodes the prompt into text encoder hidden states.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 250 |
+
prompt to be encoded
|
| 251 |
+
device: (`torch.device`):
|
| 252 |
+
torch device
|
| 253 |
+
num_images_per_prompt (`int`):
|
| 254 |
+
number of images that should be generated per prompt
|
| 255 |
+
do_classifier_free_guidance (`bool`):
|
| 256 |
+
whether to use classifier free guidance or not
|
| 257 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 258 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 259 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 260 |
+
less than `1`).
|
| 261 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 262 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 263 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 264 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 265 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 266 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 267 |
+
argument.
|
| 268 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 269 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 270 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 271 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 272 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 273 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 274 |
+
input argument.
|
| 275 |
+
lora_scale (`float`, *optional*):
|
| 276 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 277 |
+
"""
|
| 278 |
+
device = device or self._execution_device
|
| 279 |
+
|
| 280 |
+
# set lora scale so that monkey patched LoRA
|
| 281 |
+
# function of text encoder can correctly access it
|
| 282 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 283 |
+
self._lora_scale = lora_scale
|
| 284 |
+
|
| 285 |
+
if prompt is not None and isinstance(prompt, str):
|
| 286 |
+
batch_size = 1
|
| 287 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 288 |
+
batch_size = len(prompt)
|
| 289 |
+
else:
|
| 290 |
+
batch_size = prompt_embeds.shape[0]
|
| 291 |
+
|
| 292 |
+
# Define tokenizers and text encoders
|
| 293 |
+
tokenizers = [self.tokenizer]
|
| 294 |
+
text_encoders = [self.text_encoder]
|
| 295 |
+
|
| 296 |
+
if prompt_embeds is None:
|
| 297 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 298 |
+
prompt_embeds_list = []
|
| 299 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 300 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 301 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 302 |
+
|
| 303 |
+
text_inputs = tokenizer(
|
| 304 |
+
prompt,
|
| 305 |
+
padding="max_length",
|
| 306 |
+
max_length=256,
|
| 307 |
+
truncation=True,
|
| 308 |
+
return_tensors="pt",
|
| 309 |
+
).to(self._execution_device)
|
| 310 |
+
output = text_encoder(
|
| 311 |
+
input_ids=text_inputs["input_ids"],
|
| 312 |
+
attention_mask=text_inputs["attention_mask"],
|
| 313 |
+
position_ids=text_inputs["position_ids"],
|
| 314 |
+
output_hidden_states=True,
|
| 315 |
+
)
|
| 316 |
+
prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 317 |
+
pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 318 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 319 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 320 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 321 |
+
|
| 322 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 323 |
+
|
| 324 |
+
prompt_embeds = prompt_embeds_list[0]
|
| 325 |
+
|
| 326 |
+
# get unconditional embeddings for classifier free guidance
|
| 327 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 328 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 329 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 330 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 331 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 332 |
+
uncond_tokens: List[str]
|
| 333 |
+
if negative_prompt is None:
|
| 334 |
+
uncond_tokens = [""] * batch_size
|
| 335 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 336 |
+
raise TypeError(
|
| 337 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 338 |
+
f" {type(prompt)}."
|
| 339 |
+
)
|
| 340 |
+
elif isinstance(negative_prompt, str):
|
| 341 |
+
uncond_tokens = [negative_prompt]
|
| 342 |
+
elif batch_size != len(negative_prompt):
|
| 343 |
+
raise ValueError(
|
| 344 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 345 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 346 |
+
" the batch size of `prompt`."
|
| 347 |
+
)
|
| 348 |
+
else:
|
| 349 |
+
uncond_tokens = negative_prompt
|
| 350 |
+
|
| 351 |
+
negative_prompt_embeds_list = []
|
| 352 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 353 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 354 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 355 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
|
| 356 |
+
|
| 357 |
+
max_length = prompt_embeds.shape[1]
|
| 358 |
+
uncond_input = tokenizer(
|
| 359 |
+
uncond_tokens,
|
| 360 |
+
padding="max_length",
|
| 361 |
+
max_length=max_length,
|
| 362 |
+
truncation=True,
|
| 363 |
+
return_tensors="pt",
|
| 364 |
+
).to(self._execution_device)
|
| 365 |
+
output = text_encoder(
|
| 366 |
+
input_ids=uncond_input["input_ids"],
|
| 367 |
+
attention_mask=uncond_input["attention_mask"],
|
| 368 |
+
position_ids=uncond_input["position_ids"],
|
| 369 |
+
output_hidden_states=True,
|
| 370 |
+
)
|
| 371 |
+
negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 372 |
+
negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 373 |
+
|
| 374 |
+
if do_classifier_free_guidance:
|
| 375 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 376 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 377 |
+
|
| 378 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
|
| 379 |
+
|
| 380 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 381 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
| 382 |
+
batch_size * num_images_per_prompt, seq_len, -1
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 386 |
+
|
| 387 |
+
negative_prompt_embeds = negative_prompt_embeds_list[0]
|
| 388 |
+
|
| 389 |
+
bs_embed = pooled_prompt_embeds.shape[0]
|
| 390 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 391 |
+
bs_embed * num_images_per_prompt, -1
|
| 392 |
+
)
|
| 393 |
+
if do_classifier_free_guidance:
|
| 394 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 395 |
+
bs_embed * num_images_per_prompt, -1
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 399 |
+
|
| 400 |
+
def prepare_ip_adapter_image_embeds(
|
| 401 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
|
| 402 |
+
):
|
| 403 |
+
image_embeds = []
|
| 404 |
+
if do_classifier_free_guidance:
|
| 405 |
+
negative_image_embeds = []
|
| 406 |
+
if ip_adapter_image_embeds is None:
|
| 407 |
+
if not isinstance(ip_adapter_image, list):
|
| 408 |
+
ip_adapter_image = [ip_adapter_image]
|
| 409 |
+
|
| 410 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 411 |
+
raise ValueError(
|
| 412 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. "
|
| 413 |
+
f"Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 417 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 418 |
+
):
|
| 419 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 420 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 421 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
image_embeds.append(single_image_embeds[None, :])
|
| 425 |
+
if do_classifier_free_guidance:
|
| 426 |
+
negative_image_embeds.append(single_negative_image_embeds[None, :])
|
| 427 |
+
else:
|
| 428 |
+
for single_image_embeds in ip_adapter_image_embeds:
|
| 429 |
+
if do_classifier_free_guidance:
|
| 430 |
+
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
|
| 431 |
+
|
| 432 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 433 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 434 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 435 |
+
|
| 436 |
+
if not isinstance(image, torch.Tensor):
|
| 437 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 438 |
+
|
| 439 |
+
image = image.to(device=device, dtype=dtype)
|
| 440 |
+
if output_hidden_states:
|
| 441 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 442 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 443 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 444 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 445 |
+
).hidden_states[-2]
|
| 446 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 447 |
+
num_images_per_prompt, dim=0
|
| 448 |
+
)
|
| 449 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 450 |
+
else:
|
| 451 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 452 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 453 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 454 |
+
|
| 455 |
+
return image_embeds, uncond_image_embeds
|
| 456 |
+
|
| 457 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 458 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 459 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 460 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 461 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 462 |
+
# and should be between [0, 1]
|
| 463 |
+
|
| 464 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 465 |
+
extra_step_kwargs = {}
|
| 466 |
+
if accepts_eta:
|
| 467 |
+
extra_step_kwargs["eta"] = eta
|
| 468 |
+
|
| 469 |
+
# check if the scheduler accepts generator
|
| 470 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 471 |
+
if accepts_generator:
|
| 472 |
+
extra_step_kwargs["generator"] = generator
|
| 473 |
+
return extra_step_kwargs
|
| 474 |
+
|
| 475 |
+
def check_inputs(
|
| 476 |
+
self,
|
| 477 |
+
prompt,
|
| 478 |
+
image,
|
| 479 |
+
num_inference_steps,
|
| 480 |
+
callback_steps,
|
| 481 |
+
negative_prompt=None,
|
| 482 |
+
prompt_embeds=None,
|
| 483 |
+
negative_prompt_embeds=None,
|
| 484 |
+
pooled_prompt_embeds=None,
|
| 485 |
+
negative_pooled_prompt_embeds=None,
|
| 486 |
+
ip_adapter_image=None,
|
| 487 |
+
ip_adapter_image_embeds=None,
|
| 488 |
+
controlnet_conditioning_scale=1.0,
|
| 489 |
+
control_guidance_start=0.0,
|
| 490 |
+
control_guidance_end=1.0,
|
| 491 |
+
callback_on_step_end_tensor_inputs=None,
|
| 492 |
+
):
|
| 493 |
+
if num_inference_steps is None:
|
| 494 |
+
raise ValueError("`num_inference_steps` cannot be None.")
|
| 495 |
+
elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
|
| 496 |
+
raise ValueError(
|
| 497 |
+
f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
|
| 498 |
+
f" {type(num_inference_steps)}."
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 502 |
+
raise ValueError(
|
| 503 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 504 |
+
f" {type(callback_steps)}."
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 508 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 509 |
+
):
|
| 510 |
+
raise ValueError(
|
| 511 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
if prompt is not None and prompt_embeds is not None:
|
| 515 |
+
raise ValueError(
|
| 516 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 517 |
+
" only forward one of the two."
|
| 518 |
+
)
|
| 519 |
+
elif prompt is None and prompt_embeds is None:
|
| 520 |
+
raise ValueError(
|
| 521 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 522 |
+
)
|
| 523 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 524 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 525 |
+
|
| 526 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 527 |
+
raise ValueError(
|
| 528 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 529 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 530 |
+
)
|
| 531 |
+
|
| 532 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 533 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 534 |
+
raise ValueError(
|
| 535 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 536 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 537 |
+
f" {negative_prompt_embeds.shape}."
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 541 |
+
raise ValueError(
|
| 542 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 546 |
+
raise ValueError(
|
| 547 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 551 |
+
# conditionings.
|
| 552 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 553 |
+
if isinstance(prompt, list):
|
| 554 |
+
logger.warning(
|
| 555 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 556 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 560 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
# Check `controlnet_conditioning_scale`
|
| 564 |
+
if (
|
| 565 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 566 |
+
or is_compiled
|
| 567 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 568 |
+
):
|
| 569 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 570 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 571 |
+
elif (
|
| 572 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 573 |
+
or is_compiled
|
| 574 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 575 |
+
):
|
| 576 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 577 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 578 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 579 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 580 |
+
self.controlnet.nets
|
| 581 |
+
):
|
| 582 |
+
raise ValueError(
|
| 583 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 584 |
+
" the same length as the number of controlnets"
|
| 585 |
+
)
|
| 586 |
+
else:
|
| 587 |
+
assert False
|
| 588 |
+
|
| 589 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 590 |
+
control_guidance_start = [control_guidance_start]
|
| 591 |
+
|
| 592 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 593 |
+
control_guidance_end = [control_guidance_end]
|
| 594 |
+
|
| 595 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 596 |
+
raise ValueError(
|
| 597 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 601 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 602 |
+
raise ValueError(
|
| 603 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 607 |
+
if start >= end:
|
| 608 |
+
raise ValueError(
|
| 609 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 610 |
+
)
|
| 611 |
+
if start < 0.0:
|
| 612 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 613 |
+
if end > 1.0:
|
| 614 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 615 |
+
|
| 616 |
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
| 617 |
+
raise ValueError(
|
| 618 |
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
if ip_adapter_image_embeds is not None:
|
| 622 |
+
if not isinstance(ip_adapter_image_embeds, list):
|
| 623 |
+
raise ValueError(
|
| 624 |
+
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
|
| 625 |
+
)
|
| 626 |
+
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
|
| 627 |
+
raise ValueError(
|
| 628 |
+
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
|
| 632 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 633 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 634 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 635 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 636 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 637 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 638 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 639 |
+
|
| 640 |
+
if (
|
| 641 |
+
not image_is_pil
|
| 642 |
+
and not image_is_tensor
|
| 643 |
+
and not image_is_np
|
| 644 |
+
and not image_is_pil_list
|
| 645 |
+
and not image_is_tensor_list
|
| 646 |
+
and not image_is_np_list
|
| 647 |
+
):
|
| 648 |
+
raise TypeError(
|
| 649 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
if image_is_pil:
|
| 653 |
+
image_batch_size = 1
|
| 654 |
+
else:
|
| 655 |
+
image_batch_size = len(image)
|
| 656 |
+
|
| 657 |
+
if prompt is not None and isinstance(prompt, str):
|
| 658 |
+
prompt_batch_size = 1
|
| 659 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 660 |
+
prompt_batch_size = len(prompt)
|
| 661 |
+
elif prompt_embeds is not None:
|
| 662 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 663 |
+
|
| 664 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 665 |
+
raise ValueError(
|
| 666 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
|
| 670 |
+
def prepare_control_image(
|
| 671 |
+
self,
|
| 672 |
+
image,
|
| 673 |
+
width,
|
| 674 |
+
height,
|
| 675 |
+
batch_size,
|
| 676 |
+
num_images_per_prompt,
|
| 677 |
+
device,
|
| 678 |
+
dtype,
|
| 679 |
+
do_classifier_free_guidance=False,
|
| 680 |
+
guess_mode=False,
|
| 681 |
+
):
|
| 682 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 683 |
+
image_batch_size = image.shape[0]
|
| 684 |
+
|
| 685 |
+
if image_batch_size == 1:
|
| 686 |
+
repeat_by = batch_size
|
| 687 |
+
else:
|
| 688 |
+
# image batch size is the same as prompt batch size
|
| 689 |
+
repeat_by = num_images_per_prompt
|
| 690 |
+
|
| 691 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 692 |
+
|
| 693 |
+
image = image.to(device=device, dtype=dtype)
|
| 694 |
+
|
| 695 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 696 |
+
image = torch.cat([image] * 2)
|
| 697 |
+
|
| 698 |
+
return image
|
| 699 |
+
|
| 700 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 701 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 702 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 703 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 704 |
+
raise ValueError(
|
| 705 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 706 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
if latents is None:
|
| 710 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 711 |
+
else:
|
| 712 |
+
latents = latents.to(device)
|
| 713 |
+
|
| 714 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 715 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 716 |
+
return latents
|
| 717 |
+
|
| 718 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 719 |
+
def prepare_latents_t2i(
|
| 720 |
+
self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
|
| 721 |
+
):
|
| 722 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 723 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 724 |
+
raise ValueError(
|
| 725 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 726 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 727 |
+
)
|
| 728 |
+
|
| 729 |
+
if latents is None:
|
| 730 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 731 |
+
else:
|
| 732 |
+
latents = latents.to(device)
|
| 733 |
+
|
| 734 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 735 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 736 |
+
return latents
|
| 737 |
+
|
| 738 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
|
| 739 |
+
def _get_add_time_ids(
|
| 740 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
| 741 |
+
):
|
| 742 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 743 |
+
|
| 744 |
+
passed_add_embed_dim = (
|
| 745 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 746 |
+
)
|
| 747 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 748 |
+
|
| 749 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 750 |
+
raise ValueError(
|
| 751 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 755 |
+
return add_time_ids
|
| 756 |
+
|
| 757 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 758 |
+
def upcast_vae(self):
|
| 759 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 760 |
+
self.vae.to(dtype=torch.float32)
|
| 761 |
+
|
| 762 |
+
@property
|
| 763 |
+
def guidance_scale(self):
|
| 764 |
+
return self._guidance_scale
|
| 765 |
+
|
| 766 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 767 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 768 |
+
# corresponds to doing no classifier free guidance.
|
| 769 |
+
@property
|
| 770 |
+
def do_classifier_free_guidance(self):
|
| 771 |
+
return self._guidance_scale > 1
|
| 772 |
+
|
| 773 |
+
@property
|
| 774 |
+
def cross_attention_kwargs(self):
|
| 775 |
+
return self._cross_attention_kwargs
|
| 776 |
+
|
| 777 |
+
@property
|
| 778 |
+
def num_timesteps(self):
|
| 779 |
+
return self._num_timesteps
|
| 780 |
+
|
| 781 |
+
@torch.no_grad()
|
| 782 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 783 |
+
def __call__(
|
| 784 |
+
self,
|
| 785 |
+
prompt: Union[str, List[str]] = None,
|
| 786 |
+
image: PipelineImageInput = None,
|
| 787 |
+
height: Optional[int] = None,
|
| 788 |
+
width: Optional[int] = None,
|
| 789 |
+
num_inference_steps: int = 50,
|
| 790 |
+
guidance_scale: float = 5.0,
|
| 791 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 792 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 793 |
+
eta: float = 0.0,
|
| 794 |
+
guess_mode: bool = False,
|
| 795 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 796 |
+
latents: Optional[torch.Tensor] = None,
|
| 797 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 798 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 799 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 800 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 801 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 802 |
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
| 803 |
+
output_type: Optional[str] = "pil",
|
| 804 |
+
return_dict: bool = True,
|
| 805 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 806 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 807 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 808 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 809 |
+
original_size: Tuple[int, int] = None,
|
| 810 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 811 |
+
target_size: Tuple[int, int] = None,
|
| 812 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 813 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 814 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 815 |
+
callback_on_step_end: Optional[
|
| 816 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 817 |
+
] = None,
|
| 818 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 819 |
+
**kwargs,
|
| 820 |
+
):
|
| 821 |
+
r"""
|
| 822 |
+
Function invoked when calling the pipeline for generation.
|
| 823 |
+
|
| 824 |
+
Args:
|
| 825 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 826 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 827 |
+
instead.
|
| 828 |
+
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 829 |
+
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 830 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
| 831 |
+
the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
|
| 832 |
+
be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 833 |
+
and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
|
| 834 |
+
init, images must be passed as a list such that each element of the list can be correctly batched for
|
| 835 |
+
input to a single controlnet.
|
| 836 |
+
height (`int`, *optional*, defaults to the size of image):
|
| 837 |
+
The height in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 838 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 839 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 840 |
+
width (`int`, *optional*, defaults to the size of image):
|
| 841 |
+
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 842 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 843 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 844 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 845 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 846 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 847 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 848 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 849 |
+
essentially ignores `image`.
|
| 850 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 851 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 852 |
+
expense of slower inference.
|
| 853 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 854 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 855 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 856 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 857 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 858 |
+
usually at the expense of lower image quality.
|
| 859 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 860 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 861 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 862 |
+
less than `1`).
|
| 863 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 864 |
+
The number of images to generate per prompt.
|
| 865 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 866 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 867 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 868 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 869 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 870 |
+
to make generation deterministic.
|
| 871 |
+
latents (`torch.Tensor`, *optional*):
|
| 872 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 873 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 874 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 875 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 876 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 877 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 878 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 879 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 880 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 881 |
+
argument.
|
| 882 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 883 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 884 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 885 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 886 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 887 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 888 |
+
input argument.
|
| 889 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 890 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 891 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
|
| 892 |
+
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
|
| 893 |
+
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
|
| 894 |
+
provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 895 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 896 |
+
The output format of the generate image. Choose between
|
| 897 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 898 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 899 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 900 |
+
plain tuple.
|
| 901 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 902 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 903 |
+
`self.processor` in
|
| 904 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 905 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 906 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 907 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 908 |
+
corresponding scale as a list.
|
| 909 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 910 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 911 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 912 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 913 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 914 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 915 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 916 |
+
explained in section 2.2 of
|
| 917 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 918 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 919 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 920 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 921 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 922 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 923 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 924 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 925 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 926 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 927 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 928 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 929 |
+
micro-conditioning as explained in section 2.2 of
|
| 930 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 931 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 932 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 933 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 934 |
+
micro-conditioning as explained in section 2.2 of
|
| 935 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 936 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 937 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 938 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 939 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 940 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 941 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 942 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 943 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 944 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 945 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 946 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 947 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 948 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 949 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 950 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 951 |
+
|
| 952 |
+
Examples:
|
| 953 |
+
|
| 954 |
+
Returns:
|
| 955 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 956 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
|
| 957 |
+
containing the output images.
|
| 958 |
+
"""
|
| 959 |
+
|
| 960 |
+
callback = kwargs.pop("callback", None)
|
| 961 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 962 |
+
|
| 963 |
+
if callback is not None:
|
| 964 |
+
deprecate(
|
| 965 |
+
"callback",
|
| 966 |
+
"1.0.0",
|
| 967 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 968 |
+
)
|
| 969 |
+
if callback_steps is not None:
|
| 970 |
+
deprecate(
|
| 971 |
+
"callback_steps",
|
| 972 |
+
"1.0.0",
|
| 973 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 974 |
+
)
|
| 975 |
+
|
| 976 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 977 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 978 |
+
|
| 979 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 980 |
+
|
| 981 |
+
# align format for control guidance
|
| 982 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 983 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 984 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 985 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 986 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 987 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 988 |
+
control_guidance_start, control_guidance_end = (
|
| 989 |
+
mult * [control_guidance_start],
|
| 990 |
+
mult * [control_guidance_end],
|
| 991 |
+
)
|
| 992 |
+
|
| 993 |
+
# from IPython import embed; embed()
|
| 994 |
+
# 1. Check inputs. Raise error if not correct
|
| 995 |
+
self.check_inputs(
|
| 996 |
+
prompt,
|
| 997 |
+
image,
|
| 998 |
+
num_inference_steps,
|
| 999 |
+
callback_steps,
|
| 1000 |
+
negative_prompt,
|
| 1001 |
+
prompt_embeds,
|
| 1002 |
+
negative_prompt_embeds,
|
| 1003 |
+
pooled_prompt_embeds,
|
| 1004 |
+
negative_pooled_prompt_embeds,
|
| 1005 |
+
ip_adapter_image,
|
| 1006 |
+
ip_adapter_image_embeds,
|
| 1007 |
+
controlnet_conditioning_scale,
|
| 1008 |
+
control_guidance_start,
|
| 1009 |
+
control_guidance_end,
|
| 1010 |
+
callback_on_step_end_tensor_inputs,
|
| 1011 |
+
)
|
| 1012 |
+
|
| 1013 |
+
self._guidance_scale = guidance_scale
|
| 1014 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1015 |
+
|
| 1016 |
+
# 2. Define call parameters
|
| 1017 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1018 |
+
batch_size = 1
|
| 1019 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1020 |
+
batch_size = len(prompt)
|
| 1021 |
+
else:
|
| 1022 |
+
batch_size = prompt_embeds.shape[0]
|
| 1023 |
+
|
| 1024 |
+
device = self._execution_device
|
| 1025 |
+
|
| 1026 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1027 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1028 |
+
|
| 1029 |
+
# 3.1. Encode input prompt
|
| 1030 |
+
text_encoder_lora_scale = (
|
| 1031 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1032 |
+
)
|
| 1033 |
+
(
|
| 1034 |
+
prompt_embeds,
|
| 1035 |
+
negative_prompt_embeds,
|
| 1036 |
+
pooled_prompt_embeds,
|
| 1037 |
+
negative_pooled_prompt_embeds,
|
| 1038 |
+
) = self.encode_prompt(
|
| 1039 |
+
prompt,
|
| 1040 |
+
device,
|
| 1041 |
+
num_images_per_prompt,
|
| 1042 |
+
self.do_classifier_free_guidance,
|
| 1043 |
+
negative_prompt,
|
| 1044 |
+
prompt_embeds=prompt_embeds,
|
| 1045 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1046 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1047 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1048 |
+
lora_scale=text_encoder_lora_scale,
|
| 1049 |
+
)
|
| 1050 |
+
|
| 1051 |
+
# 3.2 Encode ip_adapter_image
|
| 1052 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1053 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 1054 |
+
ip_adapter_image,
|
| 1055 |
+
ip_adapter_image_embeds,
|
| 1056 |
+
device,
|
| 1057 |
+
batch_size * num_images_per_prompt,
|
| 1058 |
+
self.do_classifier_free_guidance,
|
| 1059 |
+
)
|
| 1060 |
+
|
| 1061 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1062 |
+
image = self.prepare_control_image(
|
| 1063 |
+
image=image,
|
| 1064 |
+
width=width,
|
| 1065 |
+
height=height,
|
| 1066 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1067 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1068 |
+
device=device,
|
| 1069 |
+
dtype=controlnet.dtype,
|
| 1070 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1071 |
+
guess_mode=guess_mode,
|
| 1072 |
+
)
|
| 1073 |
+
height, width = image.shape[-2:]
|
| 1074 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1075 |
+
control_images = []
|
| 1076 |
+
|
| 1077 |
+
for control_image_ in image:
|
| 1078 |
+
control_image_ = self.prepare_control_image(
|
| 1079 |
+
image=control_image_,
|
| 1080 |
+
width=width,
|
| 1081 |
+
height=height,
|
| 1082 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1083 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1084 |
+
device=device,
|
| 1085 |
+
dtype=controlnet.dtype,
|
| 1086 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1087 |
+
guess_mode=guess_mode,
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
control_images.append(control_image_)
|
| 1091 |
+
|
| 1092 |
+
image = control_images
|
| 1093 |
+
height, width = image[0].shape[-2:]
|
| 1094 |
+
else:
|
| 1095 |
+
assert False
|
| 1096 |
+
|
| 1097 |
+
# 4. Prepare timesteps
|
| 1098 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1099 |
+
|
| 1100 |
+
timesteps = self.scheduler.timesteps
|
| 1101 |
+
|
| 1102 |
+
# 5. Prepare latent variables
|
| 1103 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1104 |
+
latents = self.prepare_latents(
|
| 1105 |
+
batch_size * num_images_per_prompt,
|
| 1106 |
+
num_channels_latents,
|
| 1107 |
+
height,
|
| 1108 |
+
width,
|
| 1109 |
+
prompt_embeds.dtype,
|
| 1110 |
+
device,
|
| 1111 |
+
generator,
|
| 1112 |
+
latents,
|
| 1113 |
+
)
|
| 1114 |
+
|
| 1115 |
+
# 6.5 Optionally get Guidance Scale Embedding
|
| 1116 |
+
timestep_cond = None
|
| 1117 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1118 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1119 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1120 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1121 |
+
).to(device=device, dtype=latents.dtype)
|
| 1122 |
+
|
| 1123 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1124 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1125 |
+
|
| 1126 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 1127 |
+
controlnet_keep = []
|
| 1128 |
+
for i in range(len(timesteps)):
|
| 1129 |
+
keeps = [
|
| 1130 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1131 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1132 |
+
]
|
| 1133 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1134 |
+
|
| 1135 |
+
# 7.2 Prepare added time ids & embeddings
|
| 1136 |
+
if isinstance(image, list):
|
| 1137 |
+
original_size = original_size or image[0].shape[-2:]
|
| 1138 |
+
else:
|
| 1139 |
+
original_size = original_size or image.shape[-2:]
|
| 1140 |
+
target_size = target_size or (height, width)
|
| 1141 |
+
|
| 1142 |
+
# 7. Prepare added time ids & embeddings
|
| 1143 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1144 |
+
|
| 1145 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1146 |
+
add_time_ids = self._get_add_time_ids(
|
| 1147 |
+
original_size,
|
| 1148 |
+
crops_coords_top_left,
|
| 1149 |
+
target_size,
|
| 1150 |
+
dtype=prompt_embeds.dtype,
|
| 1151 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1152 |
+
)
|
| 1153 |
+
|
| 1154 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 1155 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 1156 |
+
negative_original_size,
|
| 1157 |
+
negative_crops_coords_top_left,
|
| 1158 |
+
negative_target_size,
|
| 1159 |
+
dtype=prompt_embeds.dtype,
|
| 1160 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1161 |
+
)
|
| 1162 |
+
else:
|
| 1163 |
+
negative_add_time_ids = add_time_ids
|
| 1164 |
+
|
| 1165 |
+
if self.do_classifier_free_guidance:
|
| 1166 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1167 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1168 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
| 1169 |
+
|
| 1170 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1171 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1172 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1173 |
+
|
| 1174 |
+
# patch diffusers controlnet instance forward, undo
|
| 1175 |
+
# after denoising loop
|
| 1176 |
+
|
| 1177 |
+
patched_cn_models = []
|
| 1178 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 1179 |
+
cn_models_to_patch = self.controlnet.nets
|
| 1180 |
+
else:
|
| 1181 |
+
cn_models_to_patch = [self.controlnet]
|
| 1182 |
+
|
| 1183 |
+
for cn_model in cn_models_to_patch:
|
| 1184 |
+
cn_og_forward = cn_model.forward
|
| 1185 |
+
|
| 1186 |
+
def _cn_patch_forward(*args, **kwargs):
|
| 1187 |
+
encoder_hidden_states = kwargs["encoder_hidden_states"]
|
| 1188 |
+
if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
|
| 1189 |
+
# Ensure encoder_hidden_states is on the same device as the projection layer
|
| 1190 |
+
encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
|
| 1191 |
+
encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
|
| 1192 |
+
kwargs.pop("encoder_hidden_states")
|
| 1193 |
+
return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
|
| 1194 |
+
|
| 1195 |
+
cn_model.forward = _cn_patch_forward
|
| 1196 |
+
patched_cn_models.append((cn_model, cn_og_forward))
|
| 1197 |
+
|
| 1198 |
+
# 8. Denoising loop
|
| 1199 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1200 |
+
try:
|
| 1201 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1202 |
+
for i, t in enumerate(timesteps):
|
| 1203 |
+
# expand the latents if we are doing classifier free guidance
|
| 1204 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1205 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1206 |
+
|
| 1207 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1208 |
+
|
| 1209 |
+
# controlnet(s) inference
|
| 1210 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1211 |
+
# Infer ControlNet only for the conditional batch.
|
| 1212 |
+
control_model_input = latents
|
| 1213 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1214 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1215 |
+
controlnet_added_cond_kwargs = {
|
| 1216 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1217 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1218 |
+
}
|
| 1219 |
+
else:
|
| 1220 |
+
control_model_input = latent_model_input
|
| 1221 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1222 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1223 |
+
|
| 1224 |
+
if isinstance(controlnet_keep[i], list):
|
| 1225 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1226 |
+
else:
|
| 1227 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1228 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1229 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1230 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1231 |
+
|
| 1232 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1233 |
+
control_model_input,
|
| 1234 |
+
t,
|
| 1235 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1236 |
+
controlnet_cond=image,
|
| 1237 |
+
conditioning_scale=cond_scale,
|
| 1238 |
+
guess_mode=guess_mode,
|
| 1239 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1240 |
+
return_dict=False,
|
| 1241 |
+
)
|
| 1242 |
+
|
| 1243 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1244 |
+
# Inferred ControlNet only for the conditional batch.
|
| 1245 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1246 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1247 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1248 |
+
mid_block_res_sample = torch.cat(
|
| 1249 |
+
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
|
| 1250 |
+
)
|
| 1251 |
+
|
| 1252 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1253 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1254 |
+
|
| 1255 |
+
# predict the noise residual
|
| 1256 |
+
noise_pred = self.unet(
|
| 1257 |
+
latent_model_input,
|
| 1258 |
+
t,
|
| 1259 |
+
encoder_hidden_states=prompt_embeds,
|
| 1260 |
+
timestep_cond=timestep_cond,
|
| 1261 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1262 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1263 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1264 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1265 |
+
return_dict=False,
|
| 1266 |
+
)[0]
|
| 1267 |
+
|
| 1268 |
+
# perform guidance
|
| 1269 |
+
if self.do_classifier_free_guidance:
|
| 1270 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1271 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1272 |
+
|
| 1273 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1274 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1275 |
+
|
| 1276 |
+
if callback_on_step_end is not None:
|
| 1277 |
+
callback_kwargs = {}
|
| 1278 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1279 |
+
callback_kwargs[k] = locals()[k]
|
| 1280 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1281 |
+
|
| 1282 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1283 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1284 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1285 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 1286 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 1287 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 1288 |
+
)
|
| 1289 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 1290 |
+
negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
|
| 1291 |
+
image = callback_outputs.pop("image", image)
|
| 1292 |
+
|
| 1293 |
+
# call the callback, if provided
|
| 1294 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1295 |
+
progress_bar.update()
|
| 1296 |
+
if callback is not None and i % callback_steps == 0:
|
| 1297 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1298 |
+
callback(step_idx, t, latents)
|
| 1299 |
+
|
| 1300 |
+
finally:
|
| 1301 |
+
for cn_and_og in patched_cn_models:
|
| 1302 |
+
cn_and_og[0].forward = cn_and_og[1]
|
| 1303 |
+
|
| 1304 |
+
# If we do sequential model offloading, let's offload unet and controlnet
|
| 1305 |
+
# manually for max memory savings
|
| 1306 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1307 |
+
self.unet.to("cpu")
|
| 1308 |
+
self.controlnet.to("cpu")
|
| 1309 |
+
torch.cuda.empty_cache()
|
| 1310 |
+
torch.cuda.ipc_collect()
|
| 1311 |
+
|
| 1312 |
+
if not output_type == "latent":
|
| 1313 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1314 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1315 |
+
|
| 1316 |
+
if needs_upcasting:
|
| 1317 |
+
self.upcast_vae()
|
| 1318 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1319 |
+
|
| 1320 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1321 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1322 |
+
|
| 1323 |
+
# cast back to fp16 if needed
|
| 1324 |
+
if needs_upcasting:
|
| 1325 |
+
self.vae.to(dtype=torch.float16)
|
| 1326 |
+
else:
|
| 1327 |
+
image = latents
|
| 1328 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1329 |
+
|
| 1330 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1331 |
+
|
| 1332 |
+
# Offload all models
|
| 1333 |
+
self.maybe_free_model_hooks()
|
| 1334 |
+
|
| 1335 |
+
if not return_dict:
|
| 1336 |
+
return (image,)
|
| 1337 |
+
|
| 1338 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.36.0/pipeline_controlnet_xl_kolors_img2img.py
ADDED
|
@@ -0,0 +1,1540 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import inspect
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import PIL.Image
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from transformers import (
|
| 24 |
+
CLIPImageProcessor,
|
| 25 |
+
CLIPVisionModelWithProjection,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import (
|
| 31 |
+
FromSingleFileMixin,
|
| 32 |
+
IPAdapterMixin,
|
| 33 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 34 |
+
TextualInversionLoaderMixin,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.models import (
|
| 37 |
+
AutoencoderKL,
|
| 38 |
+
ControlNetModel,
|
| 39 |
+
ImageProjection,
|
| 40 |
+
MultiControlNetModel,
|
| 41 |
+
UNet2DConditionModel,
|
| 42 |
+
)
|
| 43 |
+
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
|
| 44 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 45 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 46 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 47 |
+
from diffusers.utils import (
|
| 48 |
+
deprecate,
|
| 49 |
+
is_invisible_watermark_available,
|
| 50 |
+
logging,
|
| 51 |
+
replace_example_docstring,
|
| 52 |
+
)
|
| 53 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
if is_invisible_watermark_available():
|
| 57 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 58 |
+
|
| 59 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
EXAMPLE_DOC_STRING = """
|
| 63 |
+
Examples:
|
| 64 |
+
```py
|
| 65 |
+
>>> import torch
|
| 66 |
+
>>> import numpy as np
|
| 67 |
+
>>> from PIL import Image
|
| 68 |
+
|
| 69 |
+
>>> from transformers import DPTImageProcessor, DPTForDepthEstimation
|
| 70 |
+
>>> from diffusers import ControlNetModel, KolorsControlNetImg2ImgPipeline
|
| 71 |
+
>>> from diffusers.utils import load_image
|
| 72 |
+
|
| 73 |
+
>>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
|
| 74 |
+
>>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
|
| 75 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 76 |
+
... "Kwai-Kolors/Kolors-ControlNet-Depth",
|
| 77 |
+
... use_safetensors=True,
|
| 78 |
+
... torch_dtype=torch.float16
|
| 79 |
+
... )
|
| 80 |
+
>>> pipe = KolorsControlNetImg2ImgPipeline.from_pretrained(
|
| 81 |
+
... "Kwai-Kolors/Kolors-diffusers",
|
| 82 |
+
... controlnet=controlnet,
|
| 83 |
+
... variant="fp16",
|
| 84 |
+
... use_safetensors=True,
|
| 85 |
+
... torch_dtype=torch.float16
|
| 86 |
+
... )
|
| 87 |
+
>>> pipe.enable_model_cpu_offload()
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
>>> def get_depth_map(image):
|
| 91 |
+
... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
|
| 92 |
+
...
|
| 93 |
+
... with torch.no_grad(), torch.autocast("cuda"):
|
| 94 |
+
... depth_map = depth_estimator(image).predicted_depth
|
| 95 |
+
...
|
| 96 |
+
... depth_map = torch.nn.functional.interpolate(
|
| 97 |
+
... depth_map.unsqueeze(1),
|
| 98 |
+
... size=(1024, 1024),
|
| 99 |
+
... mode="bicubic",
|
| 100 |
+
... align_corners=False,
|
| 101 |
+
... )
|
| 102 |
+
... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
|
| 103 |
+
... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
|
| 104 |
+
... depth_map = (depth_map - depth_min) / (depth_max - depth_min)
|
| 105 |
+
... image = torch.cat([depth_map] * 3, dim=1)
|
| 106 |
+
... image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
|
| 107 |
+
... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
|
| 108 |
+
... return image
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
>>> prompt = "A robot, 4k photo"
|
| 112 |
+
>>> image = load_image(
|
| 113 |
+
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
| 114 |
+
... "/kandinsky/cat.png"
|
| 115 |
+
... ).resize((1024, 1024))
|
| 116 |
+
>>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
|
| 117 |
+
>>> depth_image = get_depth_map(image)
|
| 118 |
+
|
| 119 |
+
>>> images = pipe(
|
| 120 |
+
... prompt,
|
| 121 |
+
... image=image,
|
| 122 |
+
... control_image=depth_image,
|
| 123 |
+
... strength=0.80,
|
| 124 |
+
... num_inference_steps=50,
|
| 125 |
+
... controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 126 |
+
... ).images
|
| 127 |
+
>>> images[0].save(f"robot_cat.png")
|
| 128 |
+
```
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 133 |
+
def retrieve_latents(
|
| 134 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 135 |
+
):
|
| 136 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 137 |
+
return encoder_output.latent_dist.sample(generator)
|
| 138 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 139 |
+
return encoder_output.latent_dist.mode()
|
| 140 |
+
elif hasattr(encoder_output, "latents"):
|
| 141 |
+
return encoder_output.latents
|
| 142 |
+
else:
|
| 143 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class KolorsControlNetImg2ImgPipeline(
|
| 147 |
+
DiffusionPipeline,
|
| 148 |
+
StableDiffusionMixin,
|
| 149 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 150 |
+
FromSingleFileMixin,
|
| 151 |
+
IPAdapterMixin,
|
| 152 |
+
):
|
| 153 |
+
r"""
|
| 154 |
+
Pipeline for image-to-image generation using Kolors with ControlNet guidance.
|
| 155 |
+
|
| 156 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 157 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 158 |
+
|
| 159 |
+
The pipeline also inherits the following loading methods:
|
| 160 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
|
| 161 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 162 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 163 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
vae ([`AutoencoderKL`]):
|
| 167 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 168 |
+
text_encoder ([`ChatGLMModel`]):
|
| 169 |
+
Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
|
| 170 |
+
tokenizer (`ChatGLMTokenizer`):
|
| 171 |
+
Tokenizer of class
|
| 172 |
+
[ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
|
| 173 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 174 |
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
| 175 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
| 176 |
+
as a list, the outputs from each ControlNet are added together to create one combined additional
|
| 177 |
+
conditioning.
|
| 178 |
+
scheduler ([`SchedulerMixin`]):
|
| 179 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 180 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 181 |
+
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
| 182 |
+
Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
|
| 183 |
+
config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
|
| 184 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 185 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 186 |
+
`Kwai-Kolors/Kolors-diffusers`.
|
| 187 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 188 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 192 |
+
|
| 193 |
+
_optional_components = [
|
| 194 |
+
"tokenizer",
|
| 195 |
+
"text_encoder",
|
| 196 |
+
"feature_extractor",
|
| 197 |
+
"image_encoder",
|
| 198 |
+
]
|
| 199 |
+
_callback_tensor_inputs = [
|
| 200 |
+
"latents",
|
| 201 |
+
"prompt_embeds",
|
| 202 |
+
"negative_prompt_embeds",
|
| 203 |
+
"add_text_embeds",
|
| 204 |
+
"add_time_ids",
|
| 205 |
+
"negative_pooled_prompt_embeds",
|
| 206 |
+
"add_neg_time_ids",
|
| 207 |
+
"control_image",
|
| 208 |
+
]
|
| 209 |
+
|
| 210 |
+
def __init__(
|
| 211 |
+
self,
|
| 212 |
+
vae: AutoencoderKL,
|
| 213 |
+
text_encoder: ChatGLMModel,
|
| 214 |
+
tokenizer: ChatGLMTokenizer,
|
| 215 |
+
unet: UNet2DConditionModel,
|
| 216 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 217 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 218 |
+
requires_aesthetics_score: bool = False,
|
| 219 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 220 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 221 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 222 |
+
add_watermarker: Optional[bool] = None,
|
| 223 |
+
):
|
| 224 |
+
super().__init__()
|
| 225 |
+
|
| 226 |
+
if isinstance(controlnet, (list, tuple)):
|
| 227 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 228 |
+
|
| 229 |
+
self.register_modules(
|
| 230 |
+
vae=vae,
|
| 231 |
+
text_encoder=text_encoder,
|
| 232 |
+
tokenizer=tokenizer,
|
| 233 |
+
unet=unet,
|
| 234 |
+
controlnet=controlnet,
|
| 235 |
+
scheduler=scheduler,
|
| 236 |
+
feature_extractor=feature_extractor,
|
| 237 |
+
image_encoder=image_encoder,
|
| 238 |
+
)
|
| 239 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 240 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 241 |
+
self.control_image_processor = VaeImageProcessor(
|
| 242 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
if add_watermarker:
|
| 246 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 247 |
+
else:
|
| 248 |
+
self.watermark = None
|
| 249 |
+
|
| 250 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 251 |
+
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 252 |
+
|
| 253 |
+
def encode_prompt(
|
| 254 |
+
self,
|
| 255 |
+
prompt,
|
| 256 |
+
device: Optional[torch.device] = None,
|
| 257 |
+
num_images_per_prompt: int = 1,
|
| 258 |
+
do_classifier_free_guidance: bool = True,
|
| 259 |
+
negative_prompt=None,
|
| 260 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 261 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 262 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 263 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 264 |
+
lora_scale: Optional[float] = None,
|
| 265 |
+
):
|
| 266 |
+
r"""
|
| 267 |
+
Encodes the prompt into text encoder hidden states.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 271 |
+
prompt to be encoded
|
| 272 |
+
device: (`torch.device`):
|
| 273 |
+
torch device
|
| 274 |
+
num_images_per_prompt (`int`):
|
| 275 |
+
number of images that should be generated per prompt
|
| 276 |
+
do_classifier_free_guidance (`bool`):
|
| 277 |
+
whether to use classifier free guidance or not
|
| 278 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 279 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 280 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 281 |
+
less than `1`).
|
| 282 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 283 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 284 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 285 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 286 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 287 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 288 |
+
argument.
|
| 289 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 290 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 291 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 292 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 293 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 294 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 295 |
+
input argument.
|
| 296 |
+
lora_scale (`float`, *optional*):
|
| 297 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 298 |
+
"""
|
| 299 |
+
# from IPython import embed; embed(); exit()
|
| 300 |
+
device = device or self._execution_device
|
| 301 |
+
|
| 302 |
+
# set lora scale so that monkey patched LoRA
|
| 303 |
+
# function of text encoder can correctly access it
|
| 304 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 305 |
+
self._lora_scale = lora_scale
|
| 306 |
+
|
| 307 |
+
if prompt is not None and isinstance(prompt, str):
|
| 308 |
+
batch_size = 1
|
| 309 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 310 |
+
batch_size = len(prompt)
|
| 311 |
+
else:
|
| 312 |
+
batch_size = prompt_embeds.shape[0]
|
| 313 |
+
|
| 314 |
+
# Define tokenizers and text encoders
|
| 315 |
+
tokenizers = [self.tokenizer]
|
| 316 |
+
text_encoders = [self.text_encoder]
|
| 317 |
+
|
| 318 |
+
if prompt_embeds is None:
|
| 319 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 320 |
+
prompt_embeds_list = []
|
| 321 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 322 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 323 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 324 |
+
|
| 325 |
+
text_inputs = tokenizer(
|
| 326 |
+
prompt,
|
| 327 |
+
padding="max_length",
|
| 328 |
+
max_length=256,
|
| 329 |
+
truncation=True,
|
| 330 |
+
return_tensors="pt",
|
| 331 |
+
).to(self._execution_device)
|
| 332 |
+
output = text_encoder(
|
| 333 |
+
input_ids=text_inputs["input_ids"],
|
| 334 |
+
attention_mask=text_inputs["attention_mask"],
|
| 335 |
+
position_ids=text_inputs["position_ids"],
|
| 336 |
+
output_hidden_states=True,
|
| 337 |
+
)
|
| 338 |
+
prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 339 |
+
pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 340 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 341 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 342 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 343 |
+
|
| 344 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 345 |
+
|
| 346 |
+
# prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 347 |
+
prompt_embeds = prompt_embeds_list[0]
|
| 348 |
+
|
| 349 |
+
# get unconditional embeddings for classifier free guidance
|
| 350 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 351 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 352 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 353 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 354 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 355 |
+
# negative_prompt = negative_prompt or ""
|
| 356 |
+
uncond_tokens: List[str]
|
| 357 |
+
if negative_prompt is None:
|
| 358 |
+
uncond_tokens = [""] * batch_size
|
| 359 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 360 |
+
raise TypeError(
|
| 361 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 362 |
+
f" {type(prompt)}."
|
| 363 |
+
)
|
| 364 |
+
elif isinstance(negative_prompt, str):
|
| 365 |
+
uncond_tokens = [negative_prompt]
|
| 366 |
+
elif batch_size != len(negative_prompt):
|
| 367 |
+
raise ValueError(
|
| 368 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 369 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 370 |
+
" the batch size of `prompt`."
|
| 371 |
+
)
|
| 372 |
+
else:
|
| 373 |
+
uncond_tokens = negative_prompt
|
| 374 |
+
|
| 375 |
+
negative_prompt_embeds_list = []
|
| 376 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 377 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 378 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 379 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
|
| 380 |
+
|
| 381 |
+
max_length = prompt_embeds.shape[1]
|
| 382 |
+
uncond_input = tokenizer(
|
| 383 |
+
uncond_tokens,
|
| 384 |
+
padding="max_length",
|
| 385 |
+
max_length=max_length,
|
| 386 |
+
truncation=True,
|
| 387 |
+
return_tensors="pt",
|
| 388 |
+
).to(self._execution_device)
|
| 389 |
+
output = text_encoder(
|
| 390 |
+
input_ids=uncond_input["input_ids"],
|
| 391 |
+
attention_mask=uncond_input["attention_mask"],
|
| 392 |
+
position_ids=uncond_input["position_ids"],
|
| 393 |
+
output_hidden_states=True,
|
| 394 |
+
)
|
| 395 |
+
negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 396 |
+
negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 397 |
+
|
| 398 |
+
if do_classifier_free_guidance:
|
| 399 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 400 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 401 |
+
|
| 402 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
|
| 403 |
+
|
| 404 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 405 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
| 406 |
+
batch_size * num_images_per_prompt, seq_len, -1
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 410 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 411 |
+
# to avoid doing two forward passes
|
| 412 |
+
|
| 413 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 414 |
+
|
| 415 |
+
# negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 416 |
+
negative_prompt_embeds = negative_prompt_embeds_list[0]
|
| 417 |
+
|
| 418 |
+
bs_embed = pooled_prompt_embeds.shape[0]
|
| 419 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 420 |
+
bs_embed * num_images_per_prompt, -1
|
| 421 |
+
)
|
| 422 |
+
if do_classifier_free_guidance:
|
| 423 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 424 |
+
bs_embed * num_images_per_prompt, -1
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 428 |
+
|
| 429 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 430 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 431 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 432 |
+
|
| 433 |
+
if not isinstance(image, torch.Tensor):
|
| 434 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 435 |
+
|
| 436 |
+
image = image.to(device=device, dtype=dtype)
|
| 437 |
+
if output_hidden_states:
|
| 438 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 439 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 440 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 441 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 442 |
+
).hidden_states[-2]
|
| 443 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 444 |
+
num_images_per_prompt, dim=0
|
| 445 |
+
)
|
| 446 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 447 |
+
else:
|
| 448 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 449 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 450 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 451 |
+
|
| 452 |
+
return image_embeds, uncond_image_embeds
|
| 453 |
+
|
| 454 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 455 |
+
def prepare_ip_adapter_image_embeds(
|
| 456 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
|
| 457 |
+
):
|
| 458 |
+
image_embeds = []
|
| 459 |
+
if do_classifier_free_guidance:
|
| 460 |
+
negative_image_embeds = []
|
| 461 |
+
if ip_adapter_image_embeds is None:
|
| 462 |
+
if not isinstance(ip_adapter_image, list):
|
| 463 |
+
ip_adapter_image = [ip_adapter_image]
|
| 464 |
+
|
| 465 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 466 |
+
raise ValueError(
|
| 467 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 471 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 472 |
+
):
|
| 473 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 474 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 475 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
image_embeds.append(single_image_embeds[None, :])
|
| 479 |
+
if do_classifier_free_guidance:
|
| 480 |
+
negative_image_embeds.append(single_negative_image_embeds[None, :])
|
| 481 |
+
else:
|
| 482 |
+
for single_image_embeds in ip_adapter_image_embeds:
|
| 483 |
+
if do_classifier_free_guidance:
|
| 484 |
+
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
|
| 485 |
+
negative_image_embeds.append(single_negative_image_embeds)
|
| 486 |
+
image_embeds.append(single_image_embeds)
|
| 487 |
+
|
| 488 |
+
ip_adapter_image_embeds = []
|
| 489 |
+
for i, single_image_embeds in enumerate(image_embeds):
|
| 490 |
+
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 491 |
+
if do_classifier_free_guidance:
|
| 492 |
+
single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
|
| 493 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
|
| 494 |
+
|
| 495 |
+
single_image_embeds = single_image_embeds.to(device=device)
|
| 496 |
+
ip_adapter_image_embeds.append(single_image_embeds)
|
| 497 |
+
|
| 498 |
+
return ip_adapter_image_embeds
|
| 499 |
+
|
| 500 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 501 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 502 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 503 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for others.
|
| 504 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 505 |
+
# and should be between [0, 1]
|
| 506 |
+
|
| 507 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 508 |
+
extra_step_kwargs = {}
|
| 509 |
+
if accepts_eta:
|
| 510 |
+
extra_step_kwargs["eta"] = eta
|
| 511 |
+
|
| 512 |
+
# check if the scheduler accepts generator
|
| 513 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 514 |
+
if accepts_generator:
|
| 515 |
+
extra_step_kwargs["generator"] = generator
|
| 516 |
+
return extra_step_kwargs
|
| 517 |
+
|
| 518 |
+
def check_inputs(
|
| 519 |
+
self,
|
| 520 |
+
prompt,
|
| 521 |
+
image,
|
| 522 |
+
strength,
|
| 523 |
+
num_inference_steps,
|
| 524 |
+
callback_steps,
|
| 525 |
+
negative_prompt=None,
|
| 526 |
+
prompt_embeds=None,
|
| 527 |
+
negative_prompt_embeds=None,
|
| 528 |
+
pooled_prompt_embeds=None,
|
| 529 |
+
negative_pooled_prompt_embeds=None,
|
| 530 |
+
ip_adapter_image=None,
|
| 531 |
+
ip_adapter_image_embeds=None,
|
| 532 |
+
controlnet_conditioning_scale=1.0,
|
| 533 |
+
control_guidance_start=0.0,
|
| 534 |
+
control_guidance_end=1.0,
|
| 535 |
+
callback_on_step_end_tensor_inputs=None,
|
| 536 |
+
):
|
| 537 |
+
if strength < 0 or strength > 1:
|
| 538 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 539 |
+
if num_inference_steps is None:
|
| 540 |
+
raise ValueError("`num_inference_steps` cannot be None.")
|
| 541 |
+
elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
|
| 542 |
+
raise ValueError(
|
| 543 |
+
f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
|
| 544 |
+
f" {type(num_inference_steps)}."
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 548 |
+
raise ValueError(
|
| 549 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 550 |
+
f" {type(callback_steps)}."
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 554 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 555 |
+
):
|
| 556 |
+
raise ValueError(
|
| 557 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
if prompt is not None and prompt_embeds is not None:
|
| 561 |
+
raise ValueError(
|
| 562 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 563 |
+
" only forward one of the two."
|
| 564 |
+
)
|
| 565 |
+
elif prompt is None and prompt_embeds is None:
|
| 566 |
+
raise ValueError(
|
| 567 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 568 |
+
)
|
| 569 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 570 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 571 |
+
|
| 572 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 573 |
+
raise ValueError(
|
| 574 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 575 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 579 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 580 |
+
raise ValueError(
|
| 581 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 582 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 583 |
+
f" {negative_prompt_embeds.shape}."
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 587 |
+
raise ValueError(
|
| 588 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 592 |
+
raise ValueError(
|
| 593 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 597 |
+
# conditionings.
|
| 598 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 599 |
+
if isinstance(prompt, list):
|
| 600 |
+
logger.warning(
|
| 601 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 602 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
# Check `image`
|
| 606 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 607 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
if (
|
| 611 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 612 |
+
or is_compiled
|
| 613 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 614 |
+
):
|
| 615 |
+
self.check_image(image, prompt, prompt_embeds)
|
| 616 |
+
elif (
|
| 617 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 618 |
+
or is_compiled
|
| 619 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 620 |
+
):
|
| 621 |
+
if not isinstance(image, list):
|
| 622 |
+
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
| 623 |
+
|
| 624 |
+
# When `image` is a nested list:
|
| 625 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 626 |
+
elif any(isinstance(i, list) for i in image):
|
| 627 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 628 |
+
elif len(image) != len(self.controlnet.nets):
|
| 629 |
+
raise ValueError(
|
| 630 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
for image_ in image:
|
| 634 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 635 |
+
else:
|
| 636 |
+
assert False
|
| 637 |
+
|
| 638 |
+
# Check `controlnet_conditioning_scale`
|
| 639 |
+
if (
|
| 640 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 641 |
+
or is_compiled
|
| 642 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 643 |
+
):
|
| 644 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 645 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 646 |
+
elif (
|
| 647 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 648 |
+
or is_compiled
|
| 649 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 650 |
+
):
|
| 651 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 652 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 653 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 654 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 655 |
+
self.controlnet.nets
|
| 656 |
+
):
|
| 657 |
+
raise ValueError(
|
| 658 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 659 |
+
" the same length as the number of controlnets"
|
| 660 |
+
)
|
| 661 |
+
else:
|
| 662 |
+
assert False
|
| 663 |
+
|
| 664 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 665 |
+
control_guidance_start = [control_guidance_start]
|
| 666 |
+
|
| 667 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 668 |
+
control_guidance_end = [control_guidance_end]
|
| 669 |
+
|
| 670 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 671 |
+
raise ValueError(
|
| 672 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 676 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 677 |
+
raise ValueError(
|
| 678 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 682 |
+
if start >= end:
|
| 683 |
+
raise ValueError(
|
| 684 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 685 |
+
)
|
| 686 |
+
if start < 0.0:
|
| 687 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 688 |
+
if end > 1.0:
|
| 689 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 690 |
+
|
| 691 |
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
| 692 |
+
raise ValueError(
|
| 693 |
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
| 694 |
+
)
|
| 695 |
+
|
| 696 |
+
if ip_adapter_image_embeds is not None:
|
| 697 |
+
if not isinstance(ip_adapter_image_embeds, list):
|
| 698 |
+
raise ValueError(
|
| 699 |
+
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
|
| 700 |
+
)
|
| 701 |
+
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
|
| 702 |
+
raise ValueError(
|
| 703 |
+
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
|
| 707 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 708 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 709 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 710 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 711 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 712 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 713 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 714 |
+
|
| 715 |
+
if (
|
| 716 |
+
not image_is_pil
|
| 717 |
+
and not image_is_tensor
|
| 718 |
+
and not image_is_np
|
| 719 |
+
and not image_is_pil_list
|
| 720 |
+
and not image_is_tensor_list
|
| 721 |
+
and not image_is_np_list
|
| 722 |
+
):
|
| 723 |
+
raise TypeError(
|
| 724 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 725 |
+
)
|
| 726 |
+
|
| 727 |
+
if image_is_pil:
|
| 728 |
+
image_batch_size = 1
|
| 729 |
+
else:
|
| 730 |
+
image_batch_size = len(image)
|
| 731 |
+
|
| 732 |
+
if prompt is not None and isinstance(prompt, str):
|
| 733 |
+
prompt_batch_size = 1
|
| 734 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 735 |
+
prompt_batch_size = len(prompt)
|
| 736 |
+
elif prompt_embeds is not None:
|
| 737 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 738 |
+
|
| 739 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 740 |
+
raise ValueError(
|
| 741 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 742 |
+
)
|
| 743 |
+
|
| 744 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
|
| 745 |
+
def prepare_control_image(
|
| 746 |
+
self,
|
| 747 |
+
image,
|
| 748 |
+
width,
|
| 749 |
+
height,
|
| 750 |
+
batch_size,
|
| 751 |
+
num_images_per_prompt,
|
| 752 |
+
device,
|
| 753 |
+
dtype,
|
| 754 |
+
do_classifier_free_guidance=False,
|
| 755 |
+
guess_mode=False,
|
| 756 |
+
):
|
| 757 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 758 |
+
image_batch_size = image.shape[0]
|
| 759 |
+
|
| 760 |
+
if image_batch_size == 1:
|
| 761 |
+
repeat_by = batch_size
|
| 762 |
+
else:
|
| 763 |
+
# image batch size is the same as prompt batch size
|
| 764 |
+
repeat_by = num_images_per_prompt
|
| 765 |
+
|
| 766 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 767 |
+
|
| 768 |
+
image = image.to(device=device, dtype=dtype)
|
| 769 |
+
|
| 770 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 771 |
+
image = torch.cat([image] * 2)
|
| 772 |
+
|
| 773 |
+
return image
|
| 774 |
+
|
| 775 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
|
| 776 |
+
def get_timesteps(self, num_inference_steps, strength, device):
|
| 777 |
+
# get the original timestep using init_timestep
|
| 778 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 779 |
+
|
| 780 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 781 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 782 |
+
if hasattr(self.scheduler, "set_begin_index"):
|
| 783 |
+
self.scheduler.set_begin_index(t_start * self.scheduler.order)
|
| 784 |
+
|
| 785 |
+
return timesteps, num_inference_steps - t_start
|
| 786 |
+
|
| 787 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
|
| 788 |
+
def prepare_latents(
|
| 789 |
+
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
|
| 790 |
+
):
|
| 791 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 792 |
+
raise ValueError(
|
| 793 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
# Offload text encoder if `enable_model_cpu_offload` was enabled
|
| 797 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 798 |
+
torch.cuda.empty_cache()
|
| 799 |
+
torch.cuda.ipc_collect()
|
| 800 |
+
|
| 801 |
+
image = image.to(device=device, dtype=dtype)
|
| 802 |
+
|
| 803 |
+
batch_size = batch_size * num_images_per_prompt
|
| 804 |
+
|
| 805 |
+
if image.shape[1] == 4:
|
| 806 |
+
init_latents = image
|
| 807 |
+
|
| 808 |
+
else:
|
| 809 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 810 |
+
if self.vae.config.force_upcast:
|
| 811 |
+
image = image.float()
|
| 812 |
+
self.vae.to(dtype=torch.float32)
|
| 813 |
+
|
| 814 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 815 |
+
raise ValueError(
|
| 816 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 817 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
elif isinstance(generator, list):
|
| 821 |
+
init_latents = [
|
| 822 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 823 |
+
for i in range(batch_size)
|
| 824 |
+
]
|
| 825 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 826 |
+
else:
|
| 827 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 828 |
+
|
| 829 |
+
if self.vae.config.force_upcast:
|
| 830 |
+
self.vae.to(dtype)
|
| 831 |
+
|
| 832 |
+
init_latents = init_latents.to(dtype)
|
| 833 |
+
|
| 834 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 835 |
+
|
| 836 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 837 |
+
# expand init_latents for batch_size
|
| 838 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 839 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 840 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 841 |
+
raise ValueError(
|
| 842 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 843 |
+
)
|
| 844 |
+
else:
|
| 845 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 846 |
+
|
| 847 |
+
if add_noise:
|
| 848 |
+
shape = init_latents.shape
|
| 849 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 850 |
+
# get latents
|
| 851 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 852 |
+
|
| 853 |
+
latents = init_latents
|
| 854 |
+
|
| 855 |
+
return latents
|
| 856 |
+
|
| 857 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 858 |
+
def prepare_latents_t2i(
|
| 859 |
+
self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
|
| 860 |
+
):
|
| 861 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 862 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 863 |
+
raise ValueError(
|
| 864 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 865 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 866 |
+
)
|
| 867 |
+
|
| 868 |
+
if latents is None:
|
| 869 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 870 |
+
else:
|
| 871 |
+
latents = latents.to(device)
|
| 872 |
+
|
| 873 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 874 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 875 |
+
return latents
|
| 876 |
+
|
| 877 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
|
| 878 |
+
def _get_add_time_ids(
|
| 879 |
+
self,
|
| 880 |
+
original_size,
|
| 881 |
+
crops_coords_top_left,
|
| 882 |
+
target_size,
|
| 883 |
+
aesthetic_score,
|
| 884 |
+
negative_aesthetic_score,
|
| 885 |
+
negative_original_size,
|
| 886 |
+
negative_crops_coords_top_left,
|
| 887 |
+
negative_target_size,
|
| 888 |
+
dtype,
|
| 889 |
+
text_encoder_projection_dim=None,
|
| 890 |
+
):
|
| 891 |
+
if self.config.requires_aesthetics_score:
|
| 892 |
+
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
|
| 893 |
+
add_neg_time_ids = list(
|
| 894 |
+
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
|
| 895 |
+
)
|
| 896 |
+
else:
|
| 897 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 898 |
+
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
|
| 899 |
+
|
| 900 |
+
passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096
|
| 901 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 902 |
+
|
| 903 |
+
if (
|
| 904 |
+
expected_add_embed_dim > passed_add_embed_dim
|
| 905 |
+
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 906 |
+
):
|
| 907 |
+
raise ValueError(
|
| 908 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
|
| 909 |
+
)
|
| 910 |
+
elif (
|
| 911 |
+
expected_add_embed_dim < passed_add_embed_dim
|
| 912 |
+
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 913 |
+
):
|
| 914 |
+
raise ValueError(
|
| 915 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
|
| 916 |
+
)
|
| 917 |
+
elif expected_add_embed_dim != passed_add_embed_dim:
|
| 918 |
+
raise ValueError(
|
| 919 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`."
|
| 920 |
+
)
|
| 921 |
+
|
| 922 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 923 |
+
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
|
| 924 |
+
|
| 925 |
+
return add_time_ids, add_neg_time_ids
|
| 926 |
+
|
| 927 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 928 |
+
def upcast_vae(self):
|
| 929 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 930 |
+
self.vae.to(dtype=torch.float32)
|
| 931 |
+
|
| 932 |
+
@property
|
| 933 |
+
def guidance_scale(self):
|
| 934 |
+
return self._guidance_scale
|
| 935 |
+
|
| 936 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 937 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 938 |
+
# corresponds to doing no classifier free guidance.
|
| 939 |
+
@property
|
| 940 |
+
def do_classifier_free_guidance(self):
|
| 941 |
+
return self._guidance_scale > 1
|
| 942 |
+
|
| 943 |
+
@property
|
| 944 |
+
def cross_attention_kwargs(self):
|
| 945 |
+
return self._cross_attention_kwargs
|
| 946 |
+
|
| 947 |
+
@property
|
| 948 |
+
def num_timesteps(self):
|
| 949 |
+
return self._num_timesteps
|
| 950 |
+
|
| 951 |
+
@torch.no_grad()
|
| 952 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 953 |
+
def __call__(
|
| 954 |
+
self,
|
| 955 |
+
prompt: Union[str, List[str]] = None,
|
| 956 |
+
image: PipelineImageInput = None,
|
| 957 |
+
control_image: PipelineImageInput = None,
|
| 958 |
+
height: Optional[int] = None,
|
| 959 |
+
width: Optional[int] = None,
|
| 960 |
+
strength: float = 0.8,
|
| 961 |
+
num_inference_steps: int = 50,
|
| 962 |
+
guidance_scale: float = 5.0,
|
| 963 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 964 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 965 |
+
eta: float = 0.0,
|
| 966 |
+
guess_mode: bool = False,
|
| 967 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 968 |
+
latents: Optional[torch.Tensor] = None,
|
| 969 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 970 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 971 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 972 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 973 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 974 |
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
| 975 |
+
output_type: Optional[str] = "pil",
|
| 976 |
+
return_dict: bool = True,
|
| 977 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 978 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 979 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 980 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 981 |
+
original_size: Tuple[int, int] = None,
|
| 982 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 983 |
+
target_size: Tuple[int, int] = None,
|
| 984 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 985 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 986 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 987 |
+
aesthetic_score: float = 6.0,
|
| 988 |
+
negative_aesthetic_score: float = 2.5,
|
| 989 |
+
callback_on_step_end: Optional[
|
| 990 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 991 |
+
] = None,
|
| 992 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 993 |
+
**kwargs,
|
| 994 |
+
):
|
| 995 |
+
r"""
|
| 996 |
+
Function invoked when calling the pipeline for generation.
|
| 997 |
+
|
| 998 |
+
Args:
|
| 999 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1000 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 1001 |
+
instead.
|
| 1002 |
+
image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 1003 |
+
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 1004 |
+
The initial image will be used as the starting point for the image generation process. Can also accept
|
| 1005 |
+
image latents as `image`, if passing latents directly, it will not be encoded again.
|
| 1006 |
+
control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 1007 |
+
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 1008 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
| 1009 |
+
the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
|
| 1010 |
+
be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 1011 |
+
and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
|
| 1012 |
+
init, images must be passed as a list such that each element of the list can be correctly batched for
|
| 1013 |
+
input to a single controlnet.
|
| 1014 |
+
height (`int`, *optional*, defaults to the size of control_image):
|
| 1015 |
+
The height in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 1016 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1017 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1018 |
+
width (`int`, *optional*, defaults to the size of control_image):
|
| 1019 |
+
The width in pixels of the generated image. Anything below 512 pixels won't work well for
|
| 1020 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1021 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1022 |
+
strength (`float`, *optional*, defaults to 0.8):
|
| 1023 |
+
Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
|
| 1024 |
+
starting point and more noise is added the higher the `strength`. The number of denoising steps depends
|
| 1025 |
+
on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
|
| 1026 |
+
process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
|
| 1027 |
+
essentially ignores `image`.
|
| 1028 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1029 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1030 |
+
expense of slower inference.
|
| 1031 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1032 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1033 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1034 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1035 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1036 |
+
usually at the expense of lower image quality.
|
| 1037 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1038 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1039 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1040 |
+
less than `1`).
|
| 1041 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1042 |
+
The number of images to generate per prompt.
|
| 1043 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1044 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1045 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1046 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 1047 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1048 |
+
to make generation deterministic.
|
| 1049 |
+
latents (`torch.Tensor`, *optional*):
|
| 1050 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1051 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1052 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 1053 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1054 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1055 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1056 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1057 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1058 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1059 |
+
argument.
|
| 1060 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1061 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 1062 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 1063 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1064 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1065 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 1066 |
+
input argument.
|
| 1067 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 1068 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 1069 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
|
| 1070 |
+
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
|
| 1071 |
+
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
|
| 1072 |
+
provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 1073 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1074 |
+
The output format of the generate image. Choose between
|
| 1075 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1076 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1077 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1078 |
+
plain tuple.
|
| 1079 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1080 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1081 |
+
`self.processor` in
|
| 1082 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1083 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1084 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 1085 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 1086 |
+
corresponding scale as a list.
|
| 1087 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 1088 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 1089 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1090 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 1091 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1092 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 1093 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 1094 |
+
explained in section 2.2 of
|
| 1095 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1096 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1097 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1098 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1099 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1100 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1101 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1102 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1103 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 1104 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1105 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1106 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 1107 |
+
micro-conditioning as explained in section 2.2 of
|
| 1108 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1109 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1110 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1111 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 1112 |
+
micro-conditioning as explained in section 2.2 of
|
| 1113 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1114 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1115 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1116 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 1117 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1118 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1119 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1120 |
+
aesthetic_score (`float`, *optional*, defaults to 6.0):
|
| 1121 |
+
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
|
| 1122 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1123 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1124 |
+
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
|
| 1125 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1126 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
|
| 1127 |
+
simulate an aesthetic score of the generated image by influencing the negative text condition.
|
| 1128 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 1129 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 1130 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 1131 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 1132 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 1133 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 1134 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 1135 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 1136 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 1137 |
+
|
| 1138 |
+
Examples:
|
| 1139 |
+
|
| 1140 |
+
Returns:
|
| 1141 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
| 1142 |
+
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
|
| 1143 |
+
containing the output images.
|
| 1144 |
+
"""
|
| 1145 |
+
|
| 1146 |
+
callback = kwargs.pop("callback", None)
|
| 1147 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 1148 |
+
|
| 1149 |
+
if callback is not None:
|
| 1150 |
+
deprecate(
|
| 1151 |
+
"callback",
|
| 1152 |
+
"1.0.0",
|
| 1153 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1154 |
+
)
|
| 1155 |
+
if callback_steps is not None:
|
| 1156 |
+
deprecate(
|
| 1157 |
+
"callback_steps",
|
| 1158 |
+
"1.0.0",
|
| 1159 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1160 |
+
)
|
| 1161 |
+
|
| 1162 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 1163 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 1164 |
+
|
| 1165 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 1166 |
+
|
| 1167 |
+
# align format for control guidance
|
| 1168 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 1169 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1170 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1171 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1172 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 1173 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 1174 |
+
control_guidance_start, control_guidance_end = (
|
| 1175 |
+
mult * [control_guidance_start],
|
| 1176 |
+
mult * [control_guidance_end],
|
| 1177 |
+
)
|
| 1178 |
+
|
| 1179 |
+
# from IPython import embed; embed()
|
| 1180 |
+
# 1. Check inputs. Raise error if not correct
|
| 1181 |
+
self.check_inputs(
|
| 1182 |
+
prompt,
|
| 1183 |
+
control_image,
|
| 1184 |
+
strength,
|
| 1185 |
+
num_inference_steps,
|
| 1186 |
+
callback_steps,
|
| 1187 |
+
negative_prompt,
|
| 1188 |
+
prompt_embeds,
|
| 1189 |
+
negative_prompt_embeds,
|
| 1190 |
+
pooled_prompt_embeds,
|
| 1191 |
+
negative_pooled_prompt_embeds,
|
| 1192 |
+
ip_adapter_image,
|
| 1193 |
+
ip_adapter_image_embeds,
|
| 1194 |
+
controlnet_conditioning_scale,
|
| 1195 |
+
control_guidance_start,
|
| 1196 |
+
control_guidance_end,
|
| 1197 |
+
callback_on_step_end_tensor_inputs,
|
| 1198 |
+
)
|
| 1199 |
+
|
| 1200 |
+
self._guidance_scale = guidance_scale
|
| 1201 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1202 |
+
|
| 1203 |
+
# 2. Define call parameters
|
| 1204 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1205 |
+
batch_size = 1
|
| 1206 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1207 |
+
batch_size = len(prompt)
|
| 1208 |
+
else:
|
| 1209 |
+
batch_size = prompt_embeds.shape[0]
|
| 1210 |
+
|
| 1211 |
+
device = self._execution_device
|
| 1212 |
+
|
| 1213 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1214 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1215 |
+
|
| 1216 |
+
# 3.1. Encode input prompt
|
| 1217 |
+
text_encoder_lora_scale = (
|
| 1218 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1219 |
+
)
|
| 1220 |
+
(
|
| 1221 |
+
prompt_embeds,
|
| 1222 |
+
negative_prompt_embeds,
|
| 1223 |
+
pooled_prompt_embeds,
|
| 1224 |
+
negative_pooled_prompt_embeds,
|
| 1225 |
+
) = self.encode_prompt(
|
| 1226 |
+
prompt,
|
| 1227 |
+
device,
|
| 1228 |
+
num_images_per_prompt,
|
| 1229 |
+
self.do_classifier_free_guidance,
|
| 1230 |
+
negative_prompt,
|
| 1231 |
+
prompt_embeds=prompt_embeds,
|
| 1232 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1233 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1234 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1235 |
+
lora_scale=text_encoder_lora_scale,
|
| 1236 |
+
)
|
| 1237 |
+
|
| 1238 |
+
# 3.2 Encode ip_adapter_image
|
| 1239 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1240 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 1241 |
+
ip_adapter_image,
|
| 1242 |
+
ip_adapter_image_embeds,
|
| 1243 |
+
device,
|
| 1244 |
+
batch_size * num_images_per_prompt,
|
| 1245 |
+
self.do_classifier_free_guidance,
|
| 1246 |
+
)
|
| 1247 |
+
|
| 1248 |
+
# 4. Prepare image and controlnet_conditioning_image
|
| 1249 |
+
image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 1250 |
+
|
| 1251 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1252 |
+
control_image = self.prepare_control_image(
|
| 1253 |
+
image=control_image,
|
| 1254 |
+
width=width,
|
| 1255 |
+
height=height,
|
| 1256 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1257 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1258 |
+
device=device,
|
| 1259 |
+
dtype=controlnet.dtype,
|
| 1260 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1261 |
+
guess_mode=guess_mode,
|
| 1262 |
+
)
|
| 1263 |
+
height, width = control_image.shape[-2:]
|
| 1264 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1265 |
+
control_images = []
|
| 1266 |
+
|
| 1267 |
+
for control_image_ in control_image:
|
| 1268 |
+
control_image_ = self.prepare_control_image(
|
| 1269 |
+
image=control_image_,
|
| 1270 |
+
width=width,
|
| 1271 |
+
height=height,
|
| 1272 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1273 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1274 |
+
device=device,
|
| 1275 |
+
dtype=controlnet.dtype,
|
| 1276 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1277 |
+
guess_mode=guess_mode,
|
| 1278 |
+
)
|
| 1279 |
+
|
| 1280 |
+
control_images.append(control_image_)
|
| 1281 |
+
|
| 1282 |
+
control_image = control_images
|
| 1283 |
+
height, width = control_image[0].shape[-2:]
|
| 1284 |
+
else:
|
| 1285 |
+
assert False
|
| 1286 |
+
|
| 1287 |
+
# 5. Prepare timesteps
|
| 1288 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1289 |
+
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
| 1290 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1291 |
+
self._num_timesteps = len(timesteps)
|
| 1292 |
+
|
| 1293 |
+
# 6. Prepare latent variables
|
| 1294 |
+
|
| 1295 |
+
num_channels_latents = self.unet.config.in_channels
|
| 1296 |
+
if latents is None:
|
| 1297 |
+
if strength >= 1.0:
|
| 1298 |
+
latents = self.prepare_latents_t2i(
|
| 1299 |
+
batch_size * num_images_per_prompt,
|
| 1300 |
+
num_channels_latents,
|
| 1301 |
+
height,
|
| 1302 |
+
width,
|
| 1303 |
+
prompt_embeds.dtype,
|
| 1304 |
+
device,
|
| 1305 |
+
generator,
|
| 1306 |
+
latents,
|
| 1307 |
+
)
|
| 1308 |
+
else:
|
| 1309 |
+
latents = self.prepare_latents(
|
| 1310 |
+
image,
|
| 1311 |
+
latent_timestep,
|
| 1312 |
+
batch_size,
|
| 1313 |
+
num_images_per_prompt,
|
| 1314 |
+
prompt_embeds.dtype,
|
| 1315 |
+
device,
|
| 1316 |
+
generator,
|
| 1317 |
+
True,
|
| 1318 |
+
)
|
| 1319 |
+
|
| 1320 |
+
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1321 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1322 |
+
|
| 1323 |
+
# 7.1 Create tensor stating which controlnets to keep
|
| 1324 |
+
controlnet_keep = []
|
| 1325 |
+
for i in range(len(timesteps)):
|
| 1326 |
+
keeps = [
|
| 1327 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1328 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1329 |
+
]
|
| 1330 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1331 |
+
|
| 1332 |
+
# 7.2 Prepare added time ids & embeddings
|
| 1333 |
+
if isinstance(control_image, list):
|
| 1334 |
+
original_size = original_size or control_image[0].shape[-2:]
|
| 1335 |
+
else:
|
| 1336 |
+
original_size = original_size or control_image.shape[-2:]
|
| 1337 |
+
target_size = target_size or (height, width)
|
| 1338 |
+
|
| 1339 |
+
# 7. Prepare added time ids & embeddings
|
| 1340 |
+
if negative_original_size is None:
|
| 1341 |
+
negative_original_size = original_size
|
| 1342 |
+
if negative_target_size is None:
|
| 1343 |
+
negative_target_size = target_size
|
| 1344 |
+
|
| 1345 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1346 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1347 |
+
|
| 1348 |
+
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
| 1349 |
+
original_size,
|
| 1350 |
+
crops_coords_top_left,
|
| 1351 |
+
target_size,
|
| 1352 |
+
aesthetic_score,
|
| 1353 |
+
negative_aesthetic_score,
|
| 1354 |
+
negative_original_size,
|
| 1355 |
+
negative_crops_coords_top_left,
|
| 1356 |
+
negative_target_size,
|
| 1357 |
+
dtype=prompt_embeds.dtype,
|
| 1358 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1359 |
+
)
|
| 1360 |
+
|
| 1361 |
+
if self.do_classifier_free_guidance:
|
| 1362 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1363 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1364 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
| 1365 |
+
add_neg_time_ids = torch.cat([add_neg_time_ids, add_neg_time_ids], dim=0)
|
| 1366 |
+
|
| 1367 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1368 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1369 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1370 |
+
add_neg_time_ids = add_neg_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1371 |
+
|
| 1372 |
+
# patch diffusers controlnet instance forward, undo
|
| 1373 |
+
# after denoising loop
|
| 1374 |
+
|
| 1375 |
+
patched_cn_models = []
|
| 1376 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 1377 |
+
cn_models_to_patch = self.controlnet.nets
|
| 1378 |
+
else:
|
| 1379 |
+
cn_models_to_patch = [self.controlnet]
|
| 1380 |
+
|
| 1381 |
+
for cn_model in cn_models_to_patch:
|
| 1382 |
+
cn_og_forward = cn_model.forward
|
| 1383 |
+
|
| 1384 |
+
def _cn_patch_forward(*args, **kwargs):
|
| 1385 |
+
encoder_hidden_states = kwargs["encoder_hidden_states"]
|
| 1386 |
+
if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
|
| 1387 |
+
# Ensure encoder_hidden_states is on the same device as the projection layer
|
| 1388 |
+
encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
|
| 1389 |
+
encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
|
| 1390 |
+
kwargs.pop("encoder_hidden_states")
|
| 1391 |
+
return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
|
| 1392 |
+
|
| 1393 |
+
cn_model.forward = _cn_patch_forward
|
| 1394 |
+
patched_cn_models.append((cn_model, cn_og_forward))
|
| 1395 |
+
|
| 1396 |
+
# 8. Denoising loop
|
| 1397 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 1398 |
+
|
| 1399 |
+
try:
|
| 1400 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1401 |
+
for i, t in enumerate(timesteps):
|
| 1402 |
+
# expand the latents if we are doing classifier free guidance
|
| 1403 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1404 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1405 |
+
|
| 1406 |
+
added_cond_kwargs = {
|
| 1407 |
+
"text_embeds": add_text_embeds,
|
| 1408 |
+
"time_ids": add_time_ids,
|
| 1409 |
+
"neg_time_ids": add_neg_time_ids,
|
| 1410 |
+
}
|
| 1411 |
+
|
| 1412 |
+
# controlnet(s) inference
|
| 1413 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1414 |
+
# Infer ControlNet only for the conditional batch.
|
| 1415 |
+
control_model_input = latents
|
| 1416 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1417 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1418 |
+
controlnet_added_cond_kwargs = {
|
| 1419 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1420 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1421 |
+
"neg_time_ids": add_neg_time_ids.chunk(2)[1],
|
| 1422 |
+
}
|
| 1423 |
+
else:
|
| 1424 |
+
control_model_input = latent_model_input
|
| 1425 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1426 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1427 |
+
|
| 1428 |
+
if isinstance(controlnet_keep[i], list):
|
| 1429 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1430 |
+
else:
|
| 1431 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1432 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1433 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1434 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1435 |
+
|
| 1436 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1437 |
+
control_model_input,
|
| 1438 |
+
t,
|
| 1439 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1440 |
+
controlnet_cond=control_image,
|
| 1441 |
+
conditioning_scale=cond_scale,
|
| 1442 |
+
guess_mode=guess_mode,
|
| 1443 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1444 |
+
return_dict=False,
|
| 1445 |
+
)
|
| 1446 |
+
|
| 1447 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1448 |
+
# Inferred ControlNet only for the conditional batch.
|
| 1449 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1450 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1451 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1452 |
+
mid_block_res_sample = torch.cat(
|
| 1453 |
+
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
|
| 1454 |
+
)
|
| 1455 |
+
|
| 1456 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1457 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1458 |
+
|
| 1459 |
+
# predict the noise residual
|
| 1460 |
+
noise_pred = self.unet(
|
| 1461 |
+
latent_model_input,
|
| 1462 |
+
t,
|
| 1463 |
+
encoder_hidden_states=prompt_embeds,
|
| 1464 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1465 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1466 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1467 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1468 |
+
return_dict=False,
|
| 1469 |
+
)[0]
|
| 1470 |
+
|
| 1471 |
+
# perform guidance
|
| 1472 |
+
if self.do_classifier_free_guidance:
|
| 1473 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1474 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1475 |
+
|
| 1476 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1477 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1478 |
+
|
| 1479 |
+
if callback_on_step_end is not None:
|
| 1480 |
+
callback_kwargs = {}
|
| 1481 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1482 |
+
callback_kwargs[k] = locals()[k]
|
| 1483 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1484 |
+
|
| 1485 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1486 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1487 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1488 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 1489 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 1490 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 1491 |
+
)
|
| 1492 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 1493 |
+
add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
|
| 1494 |
+
control_image = callback_outputs.pop("control_image", control_image)
|
| 1495 |
+
|
| 1496 |
+
# call the callback, if provided
|
| 1497 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1498 |
+
progress_bar.update()
|
| 1499 |
+
if callback is not None and i % callback_steps == 0:
|
| 1500 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1501 |
+
callback(step_idx, t, latents)
|
| 1502 |
+
finally:
|
| 1503 |
+
for cn_and_og in patched_cn_models:
|
| 1504 |
+
cn_and_og[0].forward = cn_and_og[1]
|
| 1505 |
+
|
| 1506 |
+
# If we do sequential model offloading, let's offload unet and controlnet
|
| 1507 |
+
# manually for max memory savings
|
| 1508 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1509 |
+
self.unet.to("cpu")
|
| 1510 |
+
self.controlnet.to("cpu")
|
| 1511 |
+
torch.cuda.empty_cache()
|
| 1512 |
+
torch.cuda.ipc_collect()
|
| 1513 |
+
|
| 1514 |
+
if not output_type == "latent":
|
| 1515 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1516 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1517 |
+
|
| 1518 |
+
if needs_upcasting:
|
| 1519 |
+
self.upcast_vae()
|
| 1520 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1521 |
+
|
| 1522 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1523 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1524 |
+
|
| 1525 |
+
# cast back to fp16 if needed
|
| 1526 |
+
if needs_upcasting:
|
| 1527 |
+
self.vae.to(dtype=torch.float16)
|
| 1528 |
+
else:
|
| 1529 |
+
image = latents
|
| 1530 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1531 |
+
|
| 1532 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1533 |
+
|
| 1534 |
+
# Offload all models
|
| 1535 |
+
self.maybe_free_model_hooks()
|
| 1536 |
+
|
| 1537 |
+
if not return_dict:
|
| 1538 |
+
return (image,)
|
| 1539 |
+
|
| 1540 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.36.0/pipeline_controlnet_xl_kolors_inpaint.py
ADDED
|
@@ -0,0 +1,1854 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import PIL.Image
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
from transformers import (
|
| 23 |
+
CLIPImageProcessor,
|
| 24 |
+
CLIPVisionModelWithProjection,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
| 28 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 29 |
+
from diffusers.loaders import (
|
| 30 |
+
FromSingleFileMixin,
|
| 31 |
+
IPAdapterMixin,
|
| 32 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 33 |
+
TextualInversionLoaderMixin,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.models import (
|
| 36 |
+
AutoencoderKL,
|
| 37 |
+
ControlNetModel,
|
| 38 |
+
ImageProjection,
|
| 39 |
+
MultiControlNetModel,
|
| 40 |
+
UNet2DConditionModel,
|
| 41 |
+
)
|
| 42 |
+
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
|
| 43 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 44 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
| 45 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 46 |
+
from diffusers.utils import deprecate, is_invisible_watermark_available, logging, replace_example_docstring
|
| 47 |
+
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if is_invisible_watermark_available():
|
| 51 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 52 |
+
|
| 53 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
EXAMPLE_DOC_STRING = """
|
| 57 |
+
Examples:
|
| 58 |
+
```py
|
| 59 |
+
>>> from diffusers import KolorsControlNetInpaintPipeline, ControlNetModel
|
| 60 |
+
>>> from diffusers.utils import load_image
|
| 61 |
+
>>> from PIL import Image
|
| 62 |
+
>>> import numpy as np
|
| 63 |
+
>>> import torch
|
| 64 |
+
>>> import cv2
|
| 65 |
+
|
| 66 |
+
>>> init_image = load_image(
|
| 67 |
+
... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
|
| 68 |
+
... )
|
| 69 |
+
>>> init_image = init_image.resize((1024, 1024))
|
| 70 |
+
|
| 71 |
+
>>> generator = torch.Generator(device="cpu").manual_seed(1)
|
| 72 |
+
|
| 73 |
+
>>> mask_image = load_image(
|
| 74 |
+
... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
|
| 75 |
+
... )
|
| 76 |
+
>>> mask_image = mask_image.resize((1024, 1024))
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
>>> def make_canny_condition(image):
|
| 80 |
+
... image = np.array(image)
|
| 81 |
+
... image = cv2.Canny(image, 100, 200)
|
| 82 |
+
... image = image[:, :, None]
|
| 83 |
+
... image = np.concatenate([image, image, image], axis=2)
|
| 84 |
+
... image = Image.fromarray(image)
|
| 85 |
+
... return image
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
>>> control_image = make_canny_condition(init_image)
|
| 89 |
+
|
| 90 |
+
>>> controlnet = ControlNetModel.from_pretrained(
|
| 91 |
+
... "Kwai-Kolors/Kolors-ControlNet-Canny",
|
| 92 |
+
... use_safetensors=True,
|
| 93 |
+
... torch_dtype=torch.float16
|
| 94 |
+
... )
|
| 95 |
+
>>> pipe = KolorsControlNetInpaintPipeline.from_pretrained(
|
| 96 |
+
... "Kwai-Kolors/Kolors-diffusers",
|
| 97 |
+
... controlnet=controlnet,
|
| 98 |
+
... variant="fp16",
|
| 99 |
+
... use_safetensors=True,
|
| 100 |
+
... torch_dtype=torch.float16
|
| 101 |
+
... )
|
| 102 |
+
|
| 103 |
+
>>> pipe.enable_model_cpu_offload()
|
| 104 |
+
|
| 105 |
+
# generate image
|
| 106 |
+
>>> image = pipe(
|
| 107 |
+
... "a handsome man with ray-ban sunglasses",
|
| 108 |
+
... num_inference_steps=20,
|
| 109 |
+
... generator=generator,
|
| 110 |
+
... eta=1.0,
|
| 111 |
+
... image=init_image,
|
| 112 |
+
... mask_image=mask_image,
|
| 113 |
+
... control_image=control_image,
|
| 114 |
+
... ).images[0]
|
| 115 |
+
```
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
| 120 |
+
def retrieve_latents(
|
| 121 |
+
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
| 122 |
+
):
|
| 123 |
+
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
| 124 |
+
return encoder_output.latent_dist.sample(generator)
|
| 125 |
+
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
| 126 |
+
return encoder_output.latent_dist.mode()
|
| 127 |
+
elif hasattr(encoder_output, "latents"):
|
| 128 |
+
return encoder_output.latents
|
| 129 |
+
else:
|
| 130 |
+
raise AttributeError("Could not access latents of provided encoder_output")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
| 134 |
+
def retrieve_timesteps(
|
| 135 |
+
scheduler,
|
| 136 |
+
num_inference_steps: Optional[int] = None,
|
| 137 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 138 |
+
timesteps: Optional[List[int]] = None,
|
| 139 |
+
sigmas: Optional[List[float]] = None,
|
| 140 |
+
**kwargs,
|
| 141 |
+
):
|
| 142 |
+
"""
|
| 143 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
| 144 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
scheduler (`SchedulerMixin`):
|
| 148 |
+
The scheduler to get timesteps from.
|
| 149 |
+
num_inference_steps (`int`):
|
| 150 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
| 151 |
+
must be `None`.
|
| 152 |
+
device (`str` or `torch.device`, *optional*):
|
| 153 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
| 154 |
+
timesteps (`List[int]`, *optional*):
|
| 155 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
| 156 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
| 157 |
+
sigmas (`List[float]`, *optional*):
|
| 158 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
| 159 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
| 163 |
+
second element is the number of inference steps.
|
| 164 |
+
"""
|
| 165 |
+
if timesteps is not None and sigmas is not None:
|
| 166 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
| 167 |
+
if timesteps is not None:
|
| 168 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 169 |
+
if not accepts_timesteps:
|
| 170 |
+
raise ValueError(
|
| 171 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 172 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
| 173 |
+
)
|
| 174 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
| 175 |
+
timesteps = scheduler.timesteps
|
| 176 |
+
num_inference_steps = len(timesteps)
|
| 177 |
+
elif sigmas is not None:
|
| 178 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
| 179 |
+
if not accept_sigmas:
|
| 180 |
+
raise ValueError(
|
| 181 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
| 182 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
| 183 |
+
)
|
| 184 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
| 185 |
+
timesteps = scheduler.timesteps
|
| 186 |
+
num_inference_steps = len(timesteps)
|
| 187 |
+
else:
|
| 188 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
| 189 |
+
timesteps = scheduler.timesteps
|
| 190 |
+
return timesteps, num_inference_steps
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class KolorsControlNetInpaintPipeline(
|
| 194 |
+
DiffusionPipeline,
|
| 195 |
+
StableDiffusionMixin,
|
| 196 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 197 |
+
FromSingleFileMixin,
|
| 198 |
+
IPAdapterMixin,
|
| 199 |
+
):
|
| 200 |
+
r"""
|
| 201 |
+
Pipeline for inpainting using Kolors with ControlNet guidance.
|
| 202 |
+
|
| 203 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 204 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 205 |
+
|
| 206 |
+
The pipeline also inherits the following loading methods:
|
| 207 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
|
| 208 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
| 209 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
| 210 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
| 211 |
+
|
| 212 |
+
Args:
|
| 213 |
+
vae ([`AutoencoderKL`]):
|
| 214 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 215 |
+
text_encoder ([`ChatGLMModel`]):
|
| 216 |
+
Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
|
| 217 |
+
tokenizer (`ChatGLMTokenizer`):
|
| 218 |
+
Tokenizer of class
|
| 219 |
+
[ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
|
| 220 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 221 |
+
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
|
| 222 |
+
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
|
| 223 |
+
as a list, the outputs from each ControlNet are added together to create one combined additional
|
| 224 |
+
conditioning.
|
| 225 |
+
scheduler ([`SchedulerMixin`]):
|
| 226 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 227 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 228 |
+
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
| 229 |
+
Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
|
| 230 |
+
config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
|
| 231 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 232 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 233 |
+
`Kwai-Kolors/Kolors-diffusers`.
|
| 234 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 235 |
+
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
| 236 |
+
"""
|
| 237 |
+
|
| 238 |
+
model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
|
| 239 |
+
|
| 240 |
+
_optional_components = [
|
| 241 |
+
"tokenizer",
|
| 242 |
+
"text_encoder",
|
| 243 |
+
"feature_extractor",
|
| 244 |
+
"image_encoder",
|
| 245 |
+
]
|
| 246 |
+
_callback_tensor_inputs = [
|
| 247 |
+
"latents",
|
| 248 |
+
"prompt_embeds",
|
| 249 |
+
"negative_prompt_embeds",
|
| 250 |
+
"add_text_embeds",
|
| 251 |
+
"add_time_ids",
|
| 252 |
+
"negative_pooled_prompt_embeds",
|
| 253 |
+
"add_neg_time_ids",
|
| 254 |
+
"mask",
|
| 255 |
+
"masked_image_latents",
|
| 256 |
+
"control_image",
|
| 257 |
+
]
|
| 258 |
+
|
| 259 |
+
def __init__(
|
| 260 |
+
self,
|
| 261 |
+
vae: AutoencoderKL,
|
| 262 |
+
text_encoder: ChatGLMModel,
|
| 263 |
+
tokenizer: ChatGLMTokenizer,
|
| 264 |
+
unet: UNet2DConditionModel,
|
| 265 |
+
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
| 266 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 267 |
+
requires_aesthetics_score: bool = False,
|
| 268 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 269 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 270 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 271 |
+
add_watermarker: Optional[bool] = None,
|
| 272 |
+
):
|
| 273 |
+
super().__init__()
|
| 274 |
+
|
| 275 |
+
if isinstance(controlnet, (list, tuple)):
|
| 276 |
+
controlnet = MultiControlNetModel(controlnet)
|
| 277 |
+
|
| 278 |
+
self.register_modules(
|
| 279 |
+
vae=vae,
|
| 280 |
+
text_encoder=text_encoder,
|
| 281 |
+
tokenizer=tokenizer,
|
| 282 |
+
unet=unet,
|
| 283 |
+
controlnet=controlnet,
|
| 284 |
+
scheduler=scheduler,
|
| 285 |
+
feature_extractor=feature_extractor,
|
| 286 |
+
image_encoder=image_encoder,
|
| 287 |
+
)
|
| 288 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 289 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
|
| 290 |
+
self.control_image_processor = VaeImageProcessor(
|
| 291 |
+
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
|
| 292 |
+
)
|
| 293 |
+
self.mask_processor = VaeImageProcessor(
|
| 294 |
+
vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
if add_watermarker:
|
| 298 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 299 |
+
else:
|
| 300 |
+
self.watermark = None
|
| 301 |
+
|
| 302 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 303 |
+
self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
|
| 304 |
+
|
| 305 |
+
def encode_prompt(
|
| 306 |
+
self,
|
| 307 |
+
prompt,
|
| 308 |
+
device: Optional[torch.device] = None,
|
| 309 |
+
num_images_per_prompt: int = 1,
|
| 310 |
+
do_classifier_free_guidance: bool = True,
|
| 311 |
+
negative_prompt=None,
|
| 312 |
+
prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 313 |
+
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 314 |
+
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 315 |
+
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
|
| 316 |
+
lora_scale: Optional[float] = None,
|
| 317 |
+
):
|
| 318 |
+
r"""
|
| 319 |
+
Encodes the prompt into text encoder hidden states.
|
| 320 |
+
|
| 321 |
+
Args:
|
| 322 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 323 |
+
prompt to be encoded
|
| 324 |
+
device: (`torch.device`):
|
| 325 |
+
torch device
|
| 326 |
+
num_images_per_prompt (`int`):
|
| 327 |
+
number of images that should be generated per prompt
|
| 328 |
+
do_classifier_free_guidance (`bool`):
|
| 329 |
+
whether to use classifier free guidance or not
|
| 330 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 331 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 332 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 333 |
+
less than `1`).
|
| 334 |
+
prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 335 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 336 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 337 |
+
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 338 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 339 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 340 |
+
argument.
|
| 341 |
+
pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 342 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 343 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 344 |
+
negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
|
| 345 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 346 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 347 |
+
input argument.
|
| 348 |
+
lora_scale (`float`, *optional*):
|
| 349 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 350 |
+
"""
|
| 351 |
+
device = device or self._execution_device
|
| 352 |
+
|
| 353 |
+
# set lora scale so that monkey patched LoRA
|
| 354 |
+
# function of text encoder can correctly access it
|
| 355 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 356 |
+
self._lora_scale = lora_scale
|
| 357 |
+
|
| 358 |
+
if prompt is not None and isinstance(prompt, str):
|
| 359 |
+
batch_size = 1
|
| 360 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 361 |
+
batch_size = len(prompt)
|
| 362 |
+
else:
|
| 363 |
+
batch_size = prompt_embeds.shape[0]
|
| 364 |
+
|
| 365 |
+
# Define tokenizers and text encoders
|
| 366 |
+
tokenizers = [self.tokenizer]
|
| 367 |
+
text_encoders = [self.text_encoder]
|
| 368 |
+
|
| 369 |
+
if prompt_embeds is None:
|
| 370 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 371 |
+
prompt_embeds_list = []
|
| 372 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 373 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 374 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 375 |
+
|
| 376 |
+
text_inputs = tokenizer(
|
| 377 |
+
prompt,
|
| 378 |
+
padding="max_length",
|
| 379 |
+
max_length=256,
|
| 380 |
+
truncation=True,
|
| 381 |
+
return_tensors="pt",
|
| 382 |
+
).to(self._execution_device)
|
| 383 |
+
output = text_encoder(
|
| 384 |
+
input_ids=text_inputs["input_ids"],
|
| 385 |
+
attention_mask=text_inputs["attention_mask"],
|
| 386 |
+
position_ids=text_inputs["position_ids"],
|
| 387 |
+
output_hidden_states=True,
|
| 388 |
+
)
|
| 389 |
+
prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 390 |
+
pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 391 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 392 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 393 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 394 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 395 |
+
|
| 396 |
+
# prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 397 |
+
prompt_embeds = prompt_embeds_list[0]
|
| 398 |
+
|
| 399 |
+
# get unconditional embeddings for classifier free guidance
|
| 400 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 401 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 402 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 403 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 404 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 405 |
+
# negative_prompt = negative_prompt or ""
|
| 406 |
+
uncond_tokens: List[str]
|
| 407 |
+
if negative_prompt is None:
|
| 408 |
+
uncond_tokens = [""] * batch_size
|
| 409 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 410 |
+
raise TypeError(
|
| 411 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 412 |
+
f" {type(prompt)}."
|
| 413 |
+
)
|
| 414 |
+
elif isinstance(negative_prompt, str):
|
| 415 |
+
uncond_tokens = [negative_prompt]
|
| 416 |
+
elif batch_size != len(negative_prompt):
|
| 417 |
+
raise ValueError(
|
| 418 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 419 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 420 |
+
" the batch size of `prompt`."
|
| 421 |
+
)
|
| 422 |
+
else:
|
| 423 |
+
uncond_tokens = negative_prompt
|
| 424 |
+
|
| 425 |
+
negative_prompt_embeds_list = []
|
| 426 |
+
for tokenizer, text_encoder in zip(tokenizers, text_encoders):
|
| 427 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 428 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 429 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
|
| 430 |
+
|
| 431 |
+
max_length = prompt_embeds.shape[1]
|
| 432 |
+
uncond_input = tokenizer(
|
| 433 |
+
uncond_tokens,
|
| 434 |
+
padding="max_length",
|
| 435 |
+
max_length=max_length,
|
| 436 |
+
truncation=True,
|
| 437 |
+
return_tensors="pt",
|
| 438 |
+
).to(self._execution_device)
|
| 439 |
+
output = text_encoder(
|
| 440 |
+
input_ids=uncond_input["input_ids"],
|
| 441 |
+
attention_mask=uncond_input["attention_mask"],
|
| 442 |
+
position_ids=uncond_input["position_ids"],
|
| 443 |
+
output_hidden_states=True,
|
| 444 |
+
)
|
| 445 |
+
negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
|
| 446 |
+
negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
|
| 447 |
+
|
| 448 |
+
if do_classifier_free_guidance:
|
| 449 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 450 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 451 |
+
|
| 452 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
|
| 453 |
+
|
| 454 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 455 |
+
negative_prompt_embeds = negative_prompt_embeds.view(
|
| 456 |
+
batch_size * num_images_per_prompt, seq_len, -1
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 460 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 461 |
+
# to avoid doing two forward passes
|
| 462 |
+
|
| 463 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 464 |
+
|
| 465 |
+
# negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 466 |
+
negative_prompt_embeds = negative_prompt_embeds_list[0]
|
| 467 |
+
|
| 468 |
+
bs_embed = pooled_prompt_embeds.shape[0]
|
| 469 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 470 |
+
bs_embed * num_images_per_prompt, -1
|
| 471 |
+
)
|
| 472 |
+
if do_classifier_free_guidance:
|
| 473 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 474 |
+
bs_embed * num_images_per_prompt, -1
|
| 475 |
+
)
|
| 476 |
+
|
| 477 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 478 |
+
|
| 479 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
|
| 480 |
+
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
|
| 481 |
+
dtype = next(self.image_encoder.parameters()).dtype
|
| 482 |
+
|
| 483 |
+
if not isinstance(image, torch.Tensor):
|
| 484 |
+
image = self.feature_extractor(image, return_tensors="pt").pixel_values
|
| 485 |
+
|
| 486 |
+
image = image.to(device=device, dtype=dtype)
|
| 487 |
+
if output_hidden_states:
|
| 488 |
+
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
|
| 489 |
+
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
| 490 |
+
uncond_image_enc_hidden_states = self.image_encoder(
|
| 491 |
+
torch.zeros_like(image), output_hidden_states=True
|
| 492 |
+
).hidden_states[-2]
|
| 493 |
+
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
|
| 494 |
+
num_images_per_prompt, dim=0
|
| 495 |
+
)
|
| 496 |
+
return image_enc_hidden_states, uncond_image_enc_hidden_states
|
| 497 |
+
else:
|
| 498 |
+
image_embeds = self.image_encoder(image).image_embeds
|
| 499 |
+
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
| 500 |
+
uncond_image_embeds = torch.zeros_like(image_embeds)
|
| 501 |
+
|
| 502 |
+
return image_embeds, uncond_image_embeds
|
| 503 |
+
|
| 504 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
|
| 505 |
+
def prepare_ip_adapter_image_embeds(
|
| 506 |
+
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
|
| 507 |
+
):
|
| 508 |
+
image_embeds = []
|
| 509 |
+
if do_classifier_free_guidance:
|
| 510 |
+
negative_image_embeds = []
|
| 511 |
+
if ip_adapter_image_embeds is None:
|
| 512 |
+
if not isinstance(ip_adapter_image, list):
|
| 513 |
+
ip_adapter_image = [ip_adapter_image]
|
| 514 |
+
|
| 515 |
+
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
|
| 516 |
+
raise ValueError(
|
| 517 |
+
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got "
|
| 518 |
+
f"{len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
for single_ip_adapter_image, image_proj_layer in zip(
|
| 522 |
+
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
|
| 523 |
+
):
|
| 524 |
+
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
|
| 525 |
+
single_image_embeds, single_negative_image_embeds = self.encode_image(
|
| 526 |
+
single_ip_adapter_image, device, 1, output_hidden_state
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
image_embeds.append(single_image_embeds[None, :])
|
| 530 |
+
if do_classifier_free_guidance:
|
| 531 |
+
negative_image_embeds.append(single_negative_image_embeds[None, :])
|
| 532 |
+
else:
|
| 533 |
+
for single_image_embeds in ip_adapter_image_embeds:
|
| 534 |
+
if do_classifier_free_guidance:
|
| 535 |
+
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
|
| 536 |
+
negative_image_embeds.append(single_negative_image_embeds)
|
| 537 |
+
image_embeds.append(single_image_embeds)
|
| 538 |
+
|
| 539 |
+
ip_adapter_image_embeds = []
|
| 540 |
+
for i, single_image_embeds in enumerate(image_embeds):
|
| 541 |
+
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
|
| 542 |
+
if do_classifier_free_guidance:
|
| 543 |
+
single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
|
| 544 |
+
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
|
| 545 |
+
|
| 546 |
+
single_image_embeds = single_image_embeds.to(device=device)
|
| 547 |
+
ip_adapter_image_embeds.append(single_image_embeds)
|
| 548 |
+
|
| 549 |
+
return ip_adapter_image_embeds
|
| 550 |
+
|
| 551 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 552 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 553 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 554 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 555 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 556 |
+
# and should be between [0, 1]
|
| 557 |
+
|
| 558 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 559 |
+
extra_step_kwargs = {}
|
| 560 |
+
if accepts_eta:
|
| 561 |
+
extra_step_kwargs["eta"] = eta
|
| 562 |
+
|
| 563 |
+
# check if the scheduler accepts generator
|
| 564 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 565 |
+
if accepts_generator:
|
| 566 |
+
extra_step_kwargs["generator"] = generator
|
| 567 |
+
return extra_step_kwargs
|
| 568 |
+
|
| 569 |
+
def check_inputs(
|
| 570 |
+
self,
|
| 571 |
+
prompt,
|
| 572 |
+
image,
|
| 573 |
+
strength,
|
| 574 |
+
num_inference_steps,
|
| 575 |
+
callback_steps,
|
| 576 |
+
negative_prompt=None,
|
| 577 |
+
prompt_embeds=None,
|
| 578 |
+
negative_prompt_embeds=None,
|
| 579 |
+
pooled_prompt_embeds=None,
|
| 580 |
+
negative_pooled_prompt_embeds=None,
|
| 581 |
+
ip_adapter_image=None,
|
| 582 |
+
ip_adapter_image_embeds=None,
|
| 583 |
+
controlnet_conditioning_scale=1.0,
|
| 584 |
+
control_guidance_start=0.0,
|
| 585 |
+
control_guidance_end=1.0,
|
| 586 |
+
callback_on_step_end_tensor_inputs=None,
|
| 587 |
+
):
|
| 588 |
+
if strength < 0 or strength > 1:
|
| 589 |
+
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
| 590 |
+
if num_inference_steps is None:
|
| 591 |
+
raise ValueError("`num_inference_steps` cannot be None.")
|
| 592 |
+
elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
|
| 593 |
+
raise ValueError(
|
| 594 |
+
f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
|
| 595 |
+
f" {type(num_inference_steps)}."
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
|
| 599 |
+
raise ValueError(
|
| 600 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 601 |
+
f" {type(callback_steps)}."
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 605 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 606 |
+
):
|
| 607 |
+
raise ValueError(
|
| 608 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 609 |
+
)
|
| 610 |
+
|
| 611 |
+
if prompt is not None and prompt_embeds is not None:
|
| 612 |
+
raise ValueError(
|
| 613 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 614 |
+
" only forward one of the two."
|
| 615 |
+
)
|
| 616 |
+
elif prompt is None and prompt_embeds is None:
|
| 617 |
+
raise ValueError(
|
| 618 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 619 |
+
)
|
| 620 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 621 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 622 |
+
|
| 623 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 624 |
+
raise ValueError(
|
| 625 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 626 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 630 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 631 |
+
raise ValueError(
|
| 632 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 633 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 634 |
+
f" {negative_prompt_embeds.shape}."
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 638 |
+
raise ValueError(
|
| 639 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 643 |
+
raise ValueError(
|
| 644 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
# `prompt` needs more sophisticated handling when there are multiple
|
| 648 |
+
# conditionings.
|
| 649 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 650 |
+
if isinstance(prompt, list):
|
| 651 |
+
logger.warning(
|
| 652 |
+
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
|
| 653 |
+
" prompts. The conditionings will be fixed across the prompts."
|
| 654 |
+
)
|
| 655 |
+
|
| 656 |
+
# Check `image`
|
| 657 |
+
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
|
| 658 |
+
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
if (
|
| 662 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 663 |
+
or is_compiled
|
| 664 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 665 |
+
):
|
| 666 |
+
self.check_image(image, prompt, prompt_embeds)
|
| 667 |
+
elif (
|
| 668 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 669 |
+
or is_compiled
|
| 670 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 671 |
+
):
|
| 672 |
+
if not isinstance(image, list):
|
| 673 |
+
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
| 674 |
+
|
| 675 |
+
# When `image` is a nested list:
|
| 676 |
+
# (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
|
| 677 |
+
elif any(isinstance(i, list) for i in image):
|
| 678 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 679 |
+
elif len(image) != len(self.controlnet.nets):
|
| 680 |
+
raise ValueError(
|
| 681 |
+
f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
for image_ in image:
|
| 685 |
+
self.check_image(image_, prompt, prompt_embeds)
|
| 686 |
+
else:
|
| 687 |
+
assert False
|
| 688 |
+
|
| 689 |
+
# Check `controlnet_conditioning_scale`
|
| 690 |
+
if (
|
| 691 |
+
isinstance(self.controlnet, ControlNetModel)
|
| 692 |
+
or is_compiled
|
| 693 |
+
and isinstance(self.controlnet._orig_mod, ControlNetModel)
|
| 694 |
+
):
|
| 695 |
+
if not isinstance(controlnet_conditioning_scale, float):
|
| 696 |
+
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
| 697 |
+
elif (
|
| 698 |
+
isinstance(self.controlnet, MultiControlNetModel)
|
| 699 |
+
or is_compiled
|
| 700 |
+
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
|
| 701 |
+
):
|
| 702 |
+
if isinstance(controlnet_conditioning_scale, list):
|
| 703 |
+
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
|
| 704 |
+
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
|
| 705 |
+
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
| 706 |
+
self.controlnet.nets
|
| 707 |
+
):
|
| 708 |
+
raise ValueError(
|
| 709 |
+
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
| 710 |
+
" the same length as the number of controlnets"
|
| 711 |
+
)
|
| 712 |
+
else:
|
| 713 |
+
assert False
|
| 714 |
+
|
| 715 |
+
if not isinstance(control_guidance_start, (tuple, list)):
|
| 716 |
+
control_guidance_start = [control_guidance_start]
|
| 717 |
+
|
| 718 |
+
if not isinstance(control_guidance_end, (tuple, list)):
|
| 719 |
+
control_guidance_end = [control_guidance_end]
|
| 720 |
+
|
| 721 |
+
if len(control_guidance_start) != len(control_guidance_end):
|
| 722 |
+
raise ValueError(
|
| 723 |
+
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
|
| 724 |
+
)
|
| 725 |
+
|
| 726 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 727 |
+
if len(control_guidance_start) != len(self.controlnet.nets):
|
| 728 |
+
raise ValueError(
|
| 729 |
+
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
|
| 730 |
+
)
|
| 731 |
+
|
| 732 |
+
for start, end in zip(control_guidance_start, control_guidance_end):
|
| 733 |
+
if start >= end:
|
| 734 |
+
raise ValueError(
|
| 735 |
+
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
|
| 736 |
+
)
|
| 737 |
+
if start < 0.0:
|
| 738 |
+
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
|
| 739 |
+
if end > 1.0:
|
| 740 |
+
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
|
| 741 |
+
|
| 742 |
+
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
|
| 743 |
+
raise ValueError(
|
| 744 |
+
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
|
| 745 |
+
)
|
| 746 |
+
|
| 747 |
+
if ip_adapter_image_embeds is not None:
|
| 748 |
+
if not isinstance(ip_adapter_image_embeds, list):
|
| 749 |
+
raise ValueError(
|
| 750 |
+
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
|
| 751 |
+
)
|
| 752 |
+
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
|
| 753 |
+
raise ValueError(
|
| 754 |
+
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
|
| 755 |
+
)
|
| 756 |
+
|
| 757 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
|
| 758 |
+
def check_image(self, image, prompt, prompt_embeds):
|
| 759 |
+
image_is_pil = isinstance(image, PIL.Image.Image)
|
| 760 |
+
image_is_tensor = isinstance(image, torch.Tensor)
|
| 761 |
+
image_is_np = isinstance(image, np.ndarray)
|
| 762 |
+
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
| 763 |
+
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
| 764 |
+
image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
|
| 765 |
+
|
| 766 |
+
if (
|
| 767 |
+
not image_is_pil
|
| 768 |
+
and not image_is_tensor
|
| 769 |
+
and not image_is_np
|
| 770 |
+
and not image_is_pil_list
|
| 771 |
+
and not image_is_tensor_list
|
| 772 |
+
and not image_is_np_list
|
| 773 |
+
):
|
| 774 |
+
raise TypeError(
|
| 775 |
+
f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
if image_is_pil:
|
| 779 |
+
image_batch_size = 1
|
| 780 |
+
else:
|
| 781 |
+
image_batch_size = len(image)
|
| 782 |
+
|
| 783 |
+
if prompt is not None and isinstance(prompt, str):
|
| 784 |
+
prompt_batch_size = 1
|
| 785 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 786 |
+
prompt_batch_size = len(prompt)
|
| 787 |
+
elif prompt_embeds is not None:
|
| 788 |
+
prompt_batch_size = prompt_embeds.shape[0]
|
| 789 |
+
|
| 790 |
+
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
| 791 |
+
raise ValueError(
|
| 792 |
+
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
| 793 |
+
)
|
| 794 |
+
|
| 795 |
+
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
|
| 796 |
+
def prepare_control_image(
|
| 797 |
+
self,
|
| 798 |
+
image,
|
| 799 |
+
width,
|
| 800 |
+
height,
|
| 801 |
+
batch_size,
|
| 802 |
+
num_images_per_prompt,
|
| 803 |
+
device,
|
| 804 |
+
dtype,
|
| 805 |
+
do_classifier_free_guidance=False,
|
| 806 |
+
guess_mode=False,
|
| 807 |
+
):
|
| 808 |
+
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
|
| 809 |
+
image_batch_size = image.shape[0]
|
| 810 |
+
|
| 811 |
+
if image_batch_size == 1:
|
| 812 |
+
repeat_by = batch_size
|
| 813 |
+
else:
|
| 814 |
+
# image batch size is the same as prompt batch size
|
| 815 |
+
repeat_by = num_images_per_prompt
|
| 816 |
+
|
| 817 |
+
image = image.repeat_interleave(repeat_by, dim=0)
|
| 818 |
+
|
| 819 |
+
image = image.to(device=device, dtype=dtype)
|
| 820 |
+
|
| 821 |
+
if do_classifier_free_guidance and not guess_mode:
|
| 822 |
+
image = torch.cat([image] * 2)
|
| 823 |
+
|
| 824 |
+
return image
|
| 825 |
+
|
| 826 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
|
| 827 |
+
def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
|
| 828 |
+
# get the original timestep using init_timestep
|
| 829 |
+
if denoising_start is None:
|
| 830 |
+
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
| 831 |
+
t_start = max(num_inference_steps - init_timestep, 0)
|
| 832 |
+
else:
|
| 833 |
+
t_start = 0
|
| 834 |
+
|
| 835 |
+
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
|
| 836 |
+
|
| 837 |
+
# Strength is irrelevant if we directly request a timestep to start at;
|
| 838 |
+
# that is, strength is determined by the denoising_start instead.
|
| 839 |
+
if denoising_start is not None:
|
| 840 |
+
discrete_timestep_cutoff = int(
|
| 841 |
+
round(
|
| 842 |
+
self.scheduler.config.num_train_timesteps
|
| 843 |
+
- (denoising_start * self.scheduler.config.num_train_timesteps)
|
| 844 |
+
)
|
| 845 |
+
)
|
| 846 |
+
|
| 847 |
+
num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
|
| 848 |
+
if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
|
| 849 |
+
# if the scheduler is a 2nd order scheduler we might have to do +1
|
| 850 |
+
# because `num_inference_steps` might be even given that every timestep
|
| 851 |
+
# (except the highest one) is duplicated. If `num_inference_steps` is even it would
|
| 852 |
+
# mean that we cut the timesteps in the middle of the denoising step
|
| 853 |
+
# (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
|
| 854 |
+
# we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
|
| 855 |
+
num_inference_steps = num_inference_steps + 1
|
| 856 |
+
|
| 857 |
+
# because t_n+1 >= t_n, we slice the timesteps starting from the end
|
| 858 |
+
timesteps = timesteps[-num_inference_steps:]
|
| 859 |
+
return timesteps, num_inference_steps
|
| 860 |
+
|
| 861 |
+
return timesteps, num_inference_steps - t_start
|
| 862 |
+
|
| 863 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
|
| 864 |
+
def prepare_latents(
|
| 865 |
+
self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
|
| 866 |
+
):
|
| 867 |
+
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
|
| 868 |
+
raise ValueError(
|
| 869 |
+
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
# Offload text encoder if `enable_model_cpu_offload` was enabled
|
| 873 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 874 |
+
torch.cuda.empty_cache()
|
| 875 |
+
torch.cuda.ipc_collect()
|
| 876 |
+
|
| 877 |
+
image = image.to(device=device, dtype=dtype)
|
| 878 |
+
|
| 879 |
+
batch_size = batch_size * num_images_per_prompt
|
| 880 |
+
|
| 881 |
+
if image.shape[1] == 4:
|
| 882 |
+
init_latents = image
|
| 883 |
+
|
| 884 |
+
else:
|
| 885 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 886 |
+
if self.vae.config.force_upcast:
|
| 887 |
+
image = image.float()
|
| 888 |
+
self.vae.to(dtype=torch.float32)
|
| 889 |
+
|
| 890 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 891 |
+
raise ValueError(
|
| 892 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 893 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 894 |
+
)
|
| 895 |
+
|
| 896 |
+
elif isinstance(generator, list):
|
| 897 |
+
init_latents = [
|
| 898 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 899 |
+
for i in range(batch_size)
|
| 900 |
+
]
|
| 901 |
+
init_latents = torch.cat(init_latents, dim=0)
|
| 902 |
+
else:
|
| 903 |
+
init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 904 |
+
|
| 905 |
+
if self.vae.config.force_upcast:
|
| 906 |
+
self.vae.to(dtype)
|
| 907 |
+
|
| 908 |
+
init_latents = init_latents.to(dtype)
|
| 909 |
+
|
| 910 |
+
init_latents = self.vae.config.scaling_factor * init_latents
|
| 911 |
+
|
| 912 |
+
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
| 913 |
+
# expand init_latents for batch_size
|
| 914 |
+
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
| 915 |
+
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
| 916 |
+
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
| 917 |
+
raise ValueError(
|
| 918 |
+
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
| 919 |
+
)
|
| 920 |
+
else:
|
| 921 |
+
init_latents = torch.cat([init_latents], dim=0)
|
| 922 |
+
|
| 923 |
+
if add_noise:
|
| 924 |
+
shape = init_latents.shape
|
| 925 |
+
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 926 |
+
# get latents
|
| 927 |
+
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
| 928 |
+
|
| 929 |
+
latents = init_latents
|
| 930 |
+
|
| 931 |
+
return latents
|
| 932 |
+
|
| 933 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 934 |
+
def prepare_latents_t2i(
|
| 935 |
+
self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
|
| 936 |
+
):
|
| 937 |
+
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 938 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 939 |
+
raise ValueError(
|
| 940 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 941 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 942 |
+
)
|
| 943 |
+
|
| 944 |
+
if latents is None:
|
| 945 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 946 |
+
else:
|
| 947 |
+
latents = latents.to(device)
|
| 948 |
+
|
| 949 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 950 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 951 |
+
return latents
|
| 952 |
+
|
| 953 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
|
| 954 |
+
def _get_add_time_ids(
|
| 955 |
+
self,
|
| 956 |
+
original_size,
|
| 957 |
+
crops_coords_top_left,
|
| 958 |
+
target_size,
|
| 959 |
+
aesthetic_score,
|
| 960 |
+
negative_aesthetic_score,
|
| 961 |
+
negative_original_size,
|
| 962 |
+
negative_crops_coords_top_left,
|
| 963 |
+
negative_target_size,
|
| 964 |
+
dtype,
|
| 965 |
+
text_encoder_projection_dim=None,
|
| 966 |
+
):
|
| 967 |
+
if self.config.requires_aesthetics_score:
|
| 968 |
+
add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
|
| 969 |
+
add_neg_time_ids = list(
|
| 970 |
+
negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
|
| 971 |
+
)
|
| 972 |
+
else:
|
| 973 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 974 |
+
add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
|
| 975 |
+
|
| 976 |
+
passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096
|
| 977 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 978 |
+
|
| 979 |
+
if (
|
| 980 |
+
expected_add_embed_dim > passed_add_embed_dim
|
| 981 |
+
and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 982 |
+
):
|
| 983 |
+
raise ValueError(
|
| 984 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
|
| 985 |
+
)
|
| 986 |
+
elif (
|
| 987 |
+
expected_add_embed_dim < passed_add_embed_dim
|
| 988 |
+
and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
|
| 989 |
+
):
|
| 990 |
+
raise ValueError(
|
| 991 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
|
| 992 |
+
)
|
| 993 |
+
elif expected_add_embed_dim != passed_add_embed_dim:
|
| 994 |
+
raise ValueError(
|
| 995 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`."
|
| 996 |
+
)
|
| 997 |
+
|
| 998 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 999 |
+
add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
|
| 1000 |
+
|
| 1001 |
+
return add_time_ids, add_neg_time_ids
|
| 1002 |
+
|
| 1003 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 1004 |
+
def upcast_vae(self):
|
| 1005 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 1006 |
+
self.vae.to(dtype=torch.float32)
|
| 1007 |
+
|
| 1008 |
+
@property
|
| 1009 |
+
def denoising_end(self):
|
| 1010 |
+
return self._denoising_end
|
| 1011 |
+
|
| 1012 |
+
@property
|
| 1013 |
+
def denoising_start(self):
|
| 1014 |
+
return self._denoising_start
|
| 1015 |
+
|
| 1016 |
+
@property
|
| 1017 |
+
def guidance_scale(self):
|
| 1018 |
+
return self._guidance_scale
|
| 1019 |
+
|
| 1020 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 1021 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 1022 |
+
# corresponds to doing no classifier free guidance.
|
| 1023 |
+
@property
|
| 1024 |
+
def do_classifier_free_guidance(self):
|
| 1025 |
+
return self._guidance_scale > 1
|
| 1026 |
+
|
| 1027 |
+
@property
|
| 1028 |
+
def cross_attention_kwargs(self):
|
| 1029 |
+
return self._cross_attention_kwargs
|
| 1030 |
+
|
| 1031 |
+
@property
|
| 1032 |
+
def num_timesteps(self):
|
| 1033 |
+
return self._num_timesteps
|
| 1034 |
+
|
| 1035 |
+
def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
|
| 1036 |
+
dtype = image.dtype
|
| 1037 |
+
if self.vae.config.force_upcast:
|
| 1038 |
+
image = image.float()
|
| 1039 |
+
self.vae.to(dtype=torch.float32)
|
| 1040 |
+
|
| 1041 |
+
if isinstance(generator, list):
|
| 1042 |
+
image_latents = [
|
| 1043 |
+
retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
|
| 1044 |
+
for i in range(image.shape[0])
|
| 1045 |
+
]
|
| 1046 |
+
image_latents = torch.cat(image_latents, dim=0)
|
| 1047 |
+
else:
|
| 1048 |
+
image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
|
| 1049 |
+
|
| 1050 |
+
if self.vae.config.force_upcast:
|
| 1051 |
+
self.vae.to(dtype)
|
| 1052 |
+
|
| 1053 |
+
image_latents = image_latents.to(dtype)
|
| 1054 |
+
image_latents = self.vae.config.scaling_factor * image_latents
|
| 1055 |
+
|
| 1056 |
+
return image_latents
|
| 1057 |
+
|
| 1058 |
+
def prepare_mask_latents(
|
| 1059 |
+
self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
|
| 1060 |
+
):
|
| 1061 |
+
# resize the mask to latents shape as we concatenate the mask to the latents
|
| 1062 |
+
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
| 1063 |
+
# and half precision
|
| 1064 |
+
mask = torch.nn.functional.interpolate(
|
| 1065 |
+
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 1066 |
+
)
|
| 1067 |
+
mask = mask.to(device=device, dtype=dtype)
|
| 1068 |
+
|
| 1069 |
+
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
|
| 1070 |
+
if mask.shape[0] < batch_size:
|
| 1071 |
+
if not batch_size % mask.shape[0] == 0:
|
| 1072 |
+
raise ValueError(
|
| 1073 |
+
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
| 1074 |
+
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
|
| 1075 |
+
" of masks that you pass is divisible by the total requested batch size."
|
| 1076 |
+
)
|
| 1077 |
+
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
|
| 1078 |
+
|
| 1079 |
+
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
|
| 1080 |
+
|
| 1081 |
+
if masked_image is not None and masked_image.shape[1] == 4:
|
| 1082 |
+
masked_image_latents = masked_image
|
| 1083 |
+
else:
|
| 1084 |
+
masked_image_latents = None
|
| 1085 |
+
|
| 1086 |
+
if masked_image is not None:
|
| 1087 |
+
if masked_image_latents is None:
|
| 1088 |
+
masked_image = masked_image.to(device=device, dtype=dtype)
|
| 1089 |
+
masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
|
| 1090 |
+
|
| 1091 |
+
if masked_image_latents.shape[0] < batch_size:
|
| 1092 |
+
if not batch_size % masked_image_latents.shape[0] == 0:
|
| 1093 |
+
raise ValueError(
|
| 1094 |
+
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
| 1095 |
+
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
| 1096 |
+
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
| 1097 |
+
)
|
| 1098 |
+
masked_image_latents = masked_image_latents.repeat(
|
| 1099 |
+
batch_size // masked_image_latents.shape[0], 1, 1, 1
|
| 1100 |
+
)
|
| 1101 |
+
|
| 1102 |
+
masked_image_latents = (
|
| 1103 |
+
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
# aligning device to prevent device errors when concating it with the latent model input
|
| 1107 |
+
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
| 1108 |
+
|
| 1109 |
+
return mask, masked_image_latents
|
| 1110 |
+
|
| 1111 |
+
@torch.no_grad()
|
| 1112 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 1113 |
+
def __call__(
|
| 1114 |
+
self,
|
| 1115 |
+
prompt: Union[str, List[str]] = None,
|
| 1116 |
+
image: PipelineImageInput = None,
|
| 1117 |
+
mask_image: PipelineImageInput = None,
|
| 1118 |
+
control_image: PipelineImageInput = None,
|
| 1119 |
+
masked_image_latents: torch.Tensor = None,
|
| 1120 |
+
height: Optional[int] = None,
|
| 1121 |
+
width: Optional[int] = None,
|
| 1122 |
+
padding_mask_crop: Optional[int] = None,
|
| 1123 |
+
strength: float = 0.9999,
|
| 1124 |
+
num_inference_steps: int = 50,
|
| 1125 |
+
timesteps: List[int] = None,
|
| 1126 |
+
sigmas: List[float] = None,
|
| 1127 |
+
denoising_start: Optional[float] = None,
|
| 1128 |
+
denoising_end: Optional[float] = None,
|
| 1129 |
+
guidance_scale: float = 7.5,
|
| 1130 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 1131 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 1132 |
+
eta: float = 0.0,
|
| 1133 |
+
guess_mode: bool = False,
|
| 1134 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 1135 |
+
latents: Optional[torch.Tensor] = None,
|
| 1136 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 1137 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1138 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1139 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 1140 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 1141 |
+
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
|
| 1142 |
+
output_type: Optional[str] = "pil",
|
| 1143 |
+
return_dict: bool = True,
|
| 1144 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1145 |
+
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
|
| 1146 |
+
control_guidance_start: Union[float, List[float]] = 0.0,
|
| 1147 |
+
control_guidance_end: Union[float, List[float]] = 1.0,
|
| 1148 |
+
guidance_rescale: float = 0.0,
|
| 1149 |
+
original_size: Tuple[int, int] = None,
|
| 1150 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1151 |
+
target_size: Tuple[int, int] = None,
|
| 1152 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 1153 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1154 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 1155 |
+
aesthetic_score: float = 6.0,
|
| 1156 |
+
negative_aesthetic_score: float = 2.5,
|
| 1157 |
+
callback_on_step_end: Optional[
|
| 1158 |
+
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
|
| 1159 |
+
] = None,
|
| 1160 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 1161 |
+
**kwargs,
|
| 1162 |
+
):
|
| 1163 |
+
r"""
|
| 1164 |
+
Function invoked when calling the pipeline for generation.
|
| 1165 |
+
|
| 1166 |
+
Args:
|
| 1167 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 1168 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 1169 |
+
instead.
|
| 1170 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 1171 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 1172 |
+
used in both text-encoders
|
| 1173 |
+
image (`PIL.Image.Image`):
|
| 1174 |
+
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
| 1175 |
+
be masked out with `mask_image` and repainted according to `prompt`.
|
| 1176 |
+
mask_image (`PIL.Image.Image`):
|
| 1177 |
+
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
| 1178 |
+
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
| 1179 |
+
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
| 1180 |
+
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
| 1181 |
+
control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
|
| 1182 |
+
`List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
|
| 1183 |
+
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
| 1184 |
+
the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
|
| 1185 |
+
be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
|
| 1186 |
+
and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
|
| 1187 |
+
init, images must be passed as a list such that each element of the list can be correctly batched for
|
| 1188 |
+
input to a single controlnet.
|
| 1189 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1190 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 1191 |
+
Anything below 512 pixels won't work well for
|
| 1192 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1193 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1194 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 1195 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 1196 |
+
Anything below 512 pixels won't work well for
|
| 1197 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 1198 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 1199 |
+
padding_mask_crop (`int`, *optional*, defaults to `None`):
|
| 1200 |
+
The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to
|
| 1201 |
+
image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region
|
| 1202 |
+
with the same aspect ration of the image and contains all masked area, and then expand that area based
|
| 1203 |
+
on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before
|
| 1204 |
+
resizing to the original image size for inpainting. This is useful when the masked area is small while
|
| 1205 |
+
the image is large and contain information irrelevant for inpainting, such as background.
|
| 1206 |
+
strength (`float`, *optional*, defaults to 0.9999):
|
| 1207 |
+
Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
|
| 1208 |
+
between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
|
| 1209 |
+
`strength`. The number of denoising steps depends on the amount of noise initially added. When
|
| 1210 |
+
`strength` is 1, added noise will be maximum and the denoising process will run for the full number of
|
| 1211 |
+
iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
|
| 1212 |
+
portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
|
| 1213 |
+
integer, the value of `strength` will be ignored.
|
| 1214 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 1215 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 1216 |
+
expense of slower inference.
|
| 1217 |
+
timesteps (`List[int]`, *optional*):
|
| 1218 |
+
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
| 1219 |
+
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
| 1220 |
+
passed will be used. Must be in descending order.
|
| 1221 |
+
sigmas (`List[float]`, *optional*):
|
| 1222 |
+
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
|
| 1223 |
+
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
|
| 1224 |
+
will be used.
|
| 1225 |
+
denoising_start (`float`, *optional*):
|
| 1226 |
+
When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 1227 |
+
bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
|
| 1228 |
+
it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
|
| 1229 |
+
strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
|
| 1230 |
+
is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
|
| 1231 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
|
| 1232 |
+
denoising_end (`float`, *optional*):
|
| 1233 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 1234 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 1235 |
+
still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
|
| 1236 |
+
denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
|
| 1237 |
+
final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
|
| 1238 |
+
forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 1239 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
|
| 1240 |
+
guidance_scale (`float`, *optional*, defaults to 7.5):
|
| 1241 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 1242 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 1243 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 1244 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 1245 |
+
usually at the expense of lower image quality.
|
| 1246 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1247 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1248 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1249 |
+
less than `1`).
|
| 1250 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 1251 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 1252 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 1253 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 1254 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 1255 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 1256 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1257 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1258 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 1259 |
+
argument.
|
| 1260 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1261 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 1262 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 1263 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 1264 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 1265 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 1266 |
+
input argument.
|
| 1267 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
|
| 1268 |
+
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
|
| 1269 |
+
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
|
| 1270 |
+
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
|
| 1271 |
+
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
|
| 1272 |
+
provided, embeddings are computed from the `ip_adapter_image` input argument.
|
| 1273 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 1274 |
+
The number of images to generate per prompt.
|
| 1275 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 1276 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 1277 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 1278 |
+
generator (`torch.Generator`, *optional*):
|
| 1279 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 1280 |
+
to make generation deterministic.
|
| 1281 |
+
latents (`torch.Tensor`, *optional*):
|
| 1282 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 1283 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 1284 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 1285 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 1286 |
+
The output format of the generate image. Choose between
|
| 1287 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 1288 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 1289 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 1290 |
+
plain tuple.
|
| 1291 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1292 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1293 |
+
`self.processor` in
|
| 1294 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1295 |
+
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1296 |
+
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
| 1297 |
+
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
|
| 1298 |
+
corresponding scale as a list.
|
| 1299 |
+
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
|
| 1300 |
+
The percentage of total steps at which the controlnet starts applying.
|
| 1301 |
+
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
|
| 1302 |
+
The percentage of total steps at which the controlnet stops applying.
|
| 1303 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1304 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 1305 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
| 1306 |
+
explained in section 2.2 of
|
| 1307 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1308 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1309 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1310 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1311 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1312 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1313 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1314 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 1315 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
| 1316 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1317 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1318 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 1319 |
+
micro-conditioning as explained in section 2.2 of
|
| 1320 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1321 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1322 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1323 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 1324 |
+
micro-conditioning as explained in section 2.2 of
|
| 1325 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1326 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1327 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 1328 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 1329 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1330 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 1331 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 1332 |
+
aesthetic_score (`float`, *optional*, defaults to 6.0):
|
| 1333 |
+
Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
|
| 1334 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1335 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1336 |
+
negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
|
| 1337 |
+
Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1338 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
|
| 1339 |
+
simulate an aesthetic score of the generated image by influencing the negative text condition.
|
| 1340 |
+
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
|
| 1341 |
+
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
|
| 1342 |
+
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
|
| 1343 |
+
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
|
| 1344 |
+
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
|
| 1345 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 1346 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 1347 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 1348 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 1349 |
+
|
| 1350 |
+
Examples:
|
| 1351 |
+
|
| 1352 |
+
Returns:
|
| 1353 |
+
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
|
| 1354 |
+
[`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
|
| 1355 |
+
`tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
|
| 1356 |
+
"""
|
| 1357 |
+
|
| 1358 |
+
callback = kwargs.pop("callback", None)
|
| 1359 |
+
callback_steps = kwargs.pop("callback_steps", None)
|
| 1360 |
+
|
| 1361 |
+
if callback is not None:
|
| 1362 |
+
deprecate(
|
| 1363 |
+
"callback",
|
| 1364 |
+
"1.0.0",
|
| 1365 |
+
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1366 |
+
)
|
| 1367 |
+
if callback_steps is not None:
|
| 1368 |
+
deprecate(
|
| 1369 |
+
"callback_steps",
|
| 1370 |
+
"1.0.0",
|
| 1371 |
+
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
|
| 1372 |
+
)
|
| 1373 |
+
|
| 1374 |
+
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
| 1375 |
+
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
| 1376 |
+
|
| 1377 |
+
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
|
| 1378 |
+
|
| 1379 |
+
# align format for control guidance
|
| 1380 |
+
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
|
| 1381 |
+
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
|
| 1382 |
+
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
|
| 1383 |
+
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
|
| 1384 |
+
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
|
| 1385 |
+
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
|
| 1386 |
+
control_guidance_start, control_guidance_end = (
|
| 1387 |
+
mult * [control_guidance_start],
|
| 1388 |
+
mult * [control_guidance_end],
|
| 1389 |
+
)
|
| 1390 |
+
|
| 1391 |
+
# from IPython import embed; embed()
|
| 1392 |
+
# 1. Check inputs. Raise error if not correct
|
| 1393 |
+
self.check_inputs(
|
| 1394 |
+
prompt,
|
| 1395 |
+
control_image,
|
| 1396 |
+
strength,
|
| 1397 |
+
num_inference_steps,
|
| 1398 |
+
callback_steps,
|
| 1399 |
+
negative_prompt,
|
| 1400 |
+
prompt_embeds,
|
| 1401 |
+
negative_prompt_embeds,
|
| 1402 |
+
pooled_prompt_embeds,
|
| 1403 |
+
negative_pooled_prompt_embeds,
|
| 1404 |
+
ip_adapter_image,
|
| 1405 |
+
ip_adapter_image_embeds,
|
| 1406 |
+
controlnet_conditioning_scale,
|
| 1407 |
+
control_guidance_start,
|
| 1408 |
+
control_guidance_end,
|
| 1409 |
+
callback_on_step_end_tensor_inputs,
|
| 1410 |
+
)
|
| 1411 |
+
|
| 1412 |
+
self._guidance_scale = guidance_scale
|
| 1413 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1414 |
+
self._denoising_end = denoising_end
|
| 1415 |
+
self._denoising_start = denoising_start
|
| 1416 |
+
|
| 1417 |
+
# 2. Define call parameters
|
| 1418 |
+
if prompt is not None and isinstance(prompt, str):
|
| 1419 |
+
batch_size = 1
|
| 1420 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 1421 |
+
batch_size = len(prompt)
|
| 1422 |
+
else:
|
| 1423 |
+
batch_size = prompt_embeds.shape[0]
|
| 1424 |
+
|
| 1425 |
+
device = self._execution_device
|
| 1426 |
+
|
| 1427 |
+
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
| 1428 |
+
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
|
| 1429 |
+
|
| 1430 |
+
# 3.1. Encode input prompt
|
| 1431 |
+
text_encoder_lora_scale = (
|
| 1432 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 1433 |
+
)
|
| 1434 |
+
(
|
| 1435 |
+
prompt_embeds,
|
| 1436 |
+
negative_prompt_embeds,
|
| 1437 |
+
pooled_prompt_embeds,
|
| 1438 |
+
negative_pooled_prompt_embeds,
|
| 1439 |
+
) = self.encode_prompt(
|
| 1440 |
+
prompt,
|
| 1441 |
+
device,
|
| 1442 |
+
num_images_per_prompt,
|
| 1443 |
+
self.do_classifier_free_guidance,
|
| 1444 |
+
negative_prompt,
|
| 1445 |
+
prompt_embeds=prompt_embeds,
|
| 1446 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1447 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 1448 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1449 |
+
lora_scale=text_encoder_lora_scale,
|
| 1450 |
+
)
|
| 1451 |
+
|
| 1452 |
+
# 3.2 Encode ip_adapter_image
|
| 1453 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1454 |
+
image_embeds = self.prepare_ip_adapter_image_embeds(
|
| 1455 |
+
ip_adapter_image,
|
| 1456 |
+
ip_adapter_image_embeds,
|
| 1457 |
+
device,
|
| 1458 |
+
batch_size * num_images_per_prompt,
|
| 1459 |
+
self.do_classifier_free_guidance,
|
| 1460 |
+
)
|
| 1461 |
+
|
| 1462 |
+
# 4. Prepare image, mask, and controlnet_conditioning_image
|
| 1463 |
+
if isinstance(controlnet, ControlNetModel):
|
| 1464 |
+
control_image = self.prepare_control_image(
|
| 1465 |
+
image=control_image,
|
| 1466 |
+
width=width,
|
| 1467 |
+
height=height,
|
| 1468 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1469 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1470 |
+
device=device,
|
| 1471 |
+
dtype=controlnet.dtype,
|
| 1472 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1473 |
+
guess_mode=guess_mode,
|
| 1474 |
+
)
|
| 1475 |
+
height, width = control_image.shape[-2:]
|
| 1476 |
+
elif isinstance(controlnet, MultiControlNetModel):
|
| 1477 |
+
control_images = []
|
| 1478 |
+
|
| 1479 |
+
for control_image_ in control_image:
|
| 1480 |
+
control_image_ = self.prepare_control_image(
|
| 1481 |
+
image=control_image_,
|
| 1482 |
+
width=width,
|
| 1483 |
+
height=height,
|
| 1484 |
+
batch_size=batch_size * num_images_per_prompt,
|
| 1485 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1486 |
+
device=device,
|
| 1487 |
+
dtype=controlnet.dtype,
|
| 1488 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
| 1489 |
+
guess_mode=guess_mode,
|
| 1490 |
+
)
|
| 1491 |
+
|
| 1492 |
+
control_images.append(control_image_)
|
| 1493 |
+
|
| 1494 |
+
control_image = control_images
|
| 1495 |
+
height, width = control_image[0].shape[-2:]
|
| 1496 |
+
else:
|
| 1497 |
+
assert False
|
| 1498 |
+
|
| 1499 |
+
# 5. set timesteps
|
| 1500 |
+
def denoising_value_valid(dnv):
|
| 1501 |
+
return isinstance(dnv, float) and 0 < dnv < 1
|
| 1502 |
+
|
| 1503 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
| 1504 |
+
self.scheduler, num_inference_steps, device, timesteps, sigmas
|
| 1505 |
+
)
|
| 1506 |
+
timesteps, num_inference_steps = self.get_timesteps(
|
| 1507 |
+
num_inference_steps,
|
| 1508 |
+
strength,
|
| 1509 |
+
device,
|
| 1510 |
+
denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
|
| 1511 |
+
)
|
| 1512 |
+
# check that number of inference steps is not < 1 - as this doesn't make sense
|
| 1513 |
+
if num_inference_steps < 1:
|
| 1514 |
+
raise ValueError(
|
| 1515 |
+
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
|
| 1516 |
+
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
|
| 1517 |
+
)
|
| 1518 |
+
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
|
| 1519 |
+
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
| 1520 |
+
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
|
| 1521 |
+
is_strength_max = strength == 1.0
|
| 1522 |
+
|
| 1523 |
+
# 6. Preprocess mask and image
|
| 1524 |
+
if padding_mask_crop is not None:
|
| 1525 |
+
crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
|
| 1526 |
+
resize_mode = "fill"
|
| 1527 |
+
else:
|
| 1528 |
+
crops_coords = None
|
| 1529 |
+
resize_mode = "default"
|
| 1530 |
+
|
| 1531 |
+
original_image = image
|
| 1532 |
+
init_image = self.image_processor.preprocess(
|
| 1533 |
+
image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
|
| 1534 |
+
)
|
| 1535 |
+
init_image = init_image.to(dtype=torch.float32)
|
| 1536 |
+
|
| 1537 |
+
mask = self.mask_processor.preprocess(
|
| 1538 |
+
mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
|
| 1539 |
+
)
|
| 1540 |
+
|
| 1541 |
+
if masked_image_latents is not None:
|
| 1542 |
+
masked_image = masked_image_latents
|
| 1543 |
+
elif init_image.shape[1] == 4:
|
| 1544 |
+
# if images are in latent space, we can't mask it
|
| 1545 |
+
masked_image = None
|
| 1546 |
+
else:
|
| 1547 |
+
masked_image = init_image * (mask < 0.5)
|
| 1548 |
+
|
| 1549 |
+
# 7. Prepare latent variables
|
| 1550 |
+
num_channels_latents = self.vae.config.latent_channels
|
| 1551 |
+
num_channels_unet = self.unet.config.in_channels
|
| 1552 |
+
return_image_latents = num_channels_unet == 4
|
| 1553 |
+
|
| 1554 |
+
if latents is None:
|
| 1555 |
+
if strength >= 1.0:
|
| 1556 |
+
latents = self.prepare_latents_t2i(
|
| 1557 |
+
batch_size * num_images_per_prompt,
|
| 1558 |
+
num_channels_latents,
|
| 1559 |
+
height,
|
| 1560 |
+
width,
|
| 1561 |
+
prompt_embeds.dtype,
|
| 1562 |
+
device,
|
| 1563 |
+
generator,
|
| 1564 |
+
latents,
|
| 1565 |
+
)
|
| 1566 |
+
else:
|
| 1567 |
+
latents = self.prepare_latents(
|
| 1568 |
+
init_image,
|
| 1569 |
+
latent_timestep,
|
| 1570 |
+
batch_size,
|
| 1571 |
+
num_images_per_prompt,
|
| 1572 |
+
prompt_embeds.dtype,
|
| 1573 |
+
device,
|
| 1574 |
+
generator,
|
| 1575 |
+
True,
|
| 1576 |
+
)
|
| 1577 |
+
|
| 1578 |
+
# 8. Prepare mask latent variables
|
| 1579 |
+
mask, masked_image_latents = self.prepare_mask_latents(
|
| 1580 |
+
mask,
|
| 1581 |
+
masked_image,
|
| 1582 |
+
batch_size * num_images_per_prompt,
|
| 1583 |
+
height,
|
| 1584 |
+
width,
|
| 1585 |
+
prompt_embeds.dtype,
|
| 1586 |
+
device,
|
| 1587 |
+
generator,
|
| 1588 |
+
self.do_classifier_free_guidance,
|
| 1589 |
+
)
|
| 1590 |
+
|
| 1591 |
+
# 9. Check that sizes of mask, masked image and latents match
|
| 1592 |
+
if num_channels_unet == 9:
|
| 1593 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1594 |
+
num_channels_mask = mask.shape[1]
|
| 1595 |
+
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1596 |
+
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
| 1597 |
+
raise ValueError(
|
| 1598 |
+
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
|
| 1599 |
+
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
|
| 1600 |
+
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
|
| 1601 |
+
f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
|
| 1602 |
+
" `pipeline.unet` or your `mask_image` or `image` input."
|
| 1603 |
+
)
|
| 1604 |
+
elif num_channels_unet != 4:
|
| 1605 |
+
raise ValueError(
|
| 1606 |
+
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
|
| 1607 |
+
)
|
| 1608 |
+
|
| 1609 |
+
# 8.1. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 1610 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 1611 |
+
|
| 1612 |
+
# 8.2 Create tensor stating which controlnets to keep
|
| 1613 |
+
controlnet_keep = []
|
| 1614 |
+
for i in range(len(timesteps)):
|
| 1615 |
+
keeps = [
|
| 1616 |
+
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
|
| 1617 |
+
for s, e in zip(control_guidance_start, control_guidance_end)
|
| 1618 |
+
]
|
| 1619 |
+
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
|
| 1620 |
+
|
| 1621 |
+
# 9 Prepare added time ids & embeddings
|
| 1622 |
+
if isinstance(control_image, list):
|
| 1623 |
+
original_size = original_size or control_image[0].shape[-2:]
|
| 1624 |
+
else:
|
| 1625 |
+
original_size = original_size or control_image.shape[-2:]
|
| 1626 |
+
target_size = target_size or (height, width)
|
| 1627 |
+
|
| 1628 |
+
if negative_original_size is None:
|
| 1629 |
+
negative_original_size = original_size
|
| 1630 |
+
if negative_target_size is None:
|
| 1631 |
+
negative_target_size = target_size
|
| 1632 |
+
|
| 1633 |
+
add_text_embeds = pooled_prompt_embeds
|
| 1634 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
| 1635 |
+
|
| 1636 |
+
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
|
| 1637 |
+
original_size,
|
| 1638 |
+
crops_coords_top_left,
|
| 1639 |
+
target_size,
|
| 1640 |
+
aesthetic_score,
|
| 1641 |
+
negative_aesthetic_score,
|
| 1642 |
+
negative_original_size,
|
| 1643 |
+
negative_crops_coords_top_left,
|
| 1644 |
+
negative_target_size,
|
| 1645 |
+
dtype=prompt_embeds.dtype,
|
| 1646 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1647 |
+
)
|
| 1648 |
+
|
| 1649 |
+
if self.do_classifier_free_guidance:
|
| 1650 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1651 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 1652 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
| 1653 |
+
add_neg_time_ids = torch.cat([add_neg_time_ids, add_neg_time_ids], dim=0)
|
| 1654 |
+
|
| 1655 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1656 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1657 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1658 |
+
add_neg_time_ids = add_neg_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1659 |
+
|
| 1660 |
+
# 10. Denoising loop
|
| 1661 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 1662 |
+
|
| 1663 |
+
if (
|
| 1664 |
+
self.denoising_end is not None
|
| 1665 |
+
and self.denoising_start is not None
|
| 1666 |
+
and denoising_value_valid(self.denoising_end)
|
| 1667 |
+
and denoising_value_valid(self.denoising_start)
|
| 1668 |
+
and self.denoising_start >= self.denoising_end
|
| 1669 |
+
):
|
| 1670 |
+
raise ValueError(
|
| 1671 |
+
f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
|
| 1672 |
+
+ f" {self.denoising_end} when using type float."
|
| 1673 |
+
)
|
| 1674 |
+
elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
|
| 1675 |
+
discrete_timestep_cutoff = int(
|
| 1676 |
+
round(
|
| 1677 |
+
self.scheduler.config.num_train_timesteps
|
| 1678 |
+
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1679 |
+
)
|
| 1680 |
+
)
|
| 1681 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1682 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1683 |
+
|
| 1684 |
+
# 11.1 Optionally get Guidance Scale Embedding
|
| 1685 |
+
timestep_cond = None
|
| 1686 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1687 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1688 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1689 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1690 |
+
).to(device=device, dtype=latents.dtype)
|
| 1691 |
+
|
| 1692 |
+
# patch diffusers controlnet instance forward, undo
|
| 1693 |
+
# after denoising loop
|
| 1694 |
+
|
| 1695 |
+
patched_cn_models = []
|
| 1696 |
+
if isinstance(self.controlnet, MultiControlNetModel):
|
| 1697 |
+
cn_models_to_patch = self.controlnet.nets
|
| 1698 |
+
else:
|
| 1699 |
+
cn_models_to_patch = [self.controlnet]
|
| 1700 |
+
|
| 1701 |
+
for cn_model in cn_models_to_patch:
|
| 1702 |
+
cn_og_forward = cn_model.forward
|
| 1703 |
+
|
| 1704 |
+
def _cn_patch_forward(*args, **kwargs):
|
| 1705 |
+
encoder_hidden_states = kwargs["encoder_hidden_states"]
|
| 1706 |
+
if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
|
| 1707 |
+
# Ensure encoder_hidden_states is on the same device as the projection layer
|
| 1708 |
+
encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
|
| 1709 |
+
encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
|
| 1710 |
+
kwargs.pop("encoder_hidden_states")
|
| 1711 |
+
return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
|
| 1712 |
+
|
| 1713 |
+
cn_model.forward = _cn_patch_forward
|
| 1714 |
+
patched_cn_models.append((cn_model, cn_og_forward))
|
| 1715 |
+
|
| 1716 |
+
try:
|
| 1717 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1718 |
+
for i, t in enumerate(timesteps):
|
| 1719 |
+
# expand the latents if we are doing classifier free guidance
|
| 1720 |
+
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
| 1721 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1722 |
+
|
| 1723 |
+
if num_channels_unet == 9:
|
| 1724 |
+
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
|
| 1725 |
+
|
| 1726 |
+
added_cond_kwargs = {
|
| 1727 |
+
"text_embeds": add_text_embeds,
|
| 1728 |
+
"time_ids": add_time_ids,
|
| 1729 |
+
"neg_time_ids": add_neg_time_ids,
|
| 1730 |
+
}
|
| 1731 |
+
|
| 1732 |
+
# controlnet(s) inference
|
| 1733 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1734 |
+
# Infer ControlNet only for the conditional batch.
|
| 1735 |
+
control_model_input = latents
|
| 1736 |
+
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
|
| 1737 |
+
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
|
| 1738 |
+
controlnet_added_cond_kwargs = {
|
| 1739 |
+
"text_embeds": add_text_embeds.chunk(2)[1],
|
| 1740 |
+
"time_ids": add_time_ids.chunk(2)[1],
|
| 1741 |
+
"neg_time_ids": add_neg_time_ids.chunk(2)[1],
|
| 1742 |
+
}
|
| 1743 |
+
else:
|
| 1744 |
+
control_model_input = latent_model_input
|
| 1745 |
+
controlnet_prompt_embeds = prompt_embeds
|
| 1746 |
+
controlnet_added_cond_kwargs = added_cond_kwargs
|
| 1747 |
+
|
| 1748 |
+
if isinstance(controlnet_keep[i], list):
|
| 1749 |
+
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
|
| 1750 |
+
else:
|
| 1751 |
+
controlnet_cond_scale = controlnet_conditioning_scale
|
| 1752 |
+
if isinstance(controlnet_cond_scale, list):
|
| 1753 |
+
controlnet_cond_scale = controlnet_cond_scale[0]
|
| 1754 |
+
cond_scale = controlnet_cond_scale * controlnet_keep[i]
|
| 1755 |
+
|
| 1756 |
+
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
| 1757 |
+
control_model_input,
|
| 1758 |
+
t,
|
| 1759 |
+
encoder_hidden_states=controlnet_prompt_embeds,
|
| 1760 |
+
controlnet_cond=control_image,
|
| 1761 |
+
conditioning_scale=cond_scale,
|
| 1762 |
+
guess_mode=guess_mode,
|
| 1763 |
+
added_cond_kwargs=controlnet_added_cond_kwargs,
|
| 1764 |
+
return_dict=False,
|
| 1765 |
+
)
|
| 1766 |
+
|
| 1767 |
+
if guess_mode and self.do_classifier_free_guidance:
|
| 1768 |
+
# Inferred ControlNet only for the conditional batch.
|
| 1769 |
+
# To apply the output of ControlNet to both the unconditional and conditional batches,
|
| 1770 |
+
# add 0 to the unconditional batch to keep it unchanged.
|
| 1771 |
+
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
|
| 1772 |
+
mid_block_res_sample = torch.cat(
|
| 1773 |
+
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
|
| 1774 |
+
)
|
| 1775 |
+
|
| 1776 |
+
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
|
| 1777 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1778 |
+
|
| 1779 |
+
# predict the noise residual
|
| 1780 |
+
noise_pred = self.unet(
|
| 1781 |
+
latent_model_input,
|
| 1782 |
+
t,
|
| 1783 |
+
encoder_hidden_states=prompt_embeds,
|
| 1784 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
| 1785 |
+
down_block_additional_residuals=down_block_res_samples,
|
| 1786 |
+
mid_block_additional_residual=mid_block_res_sample,
|
| 1787 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1788 |
+
return_dict=False,
|
| 1789 |
+
)[0]
|
| 1790 |
+
|
| 1791 |
+
# perform guidance
|
| 1792 |
+
if self.do_classifier_free_guidance:
|
| 1793 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
| 1794 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1795 |
+
|
| 1796 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1797 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 1798 |
+
|
| 1799 |
+
if callback_on_step_end is not None:
|
| 1800 |
+
callback_kwargs = {}
|
| 1801 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1802 |
+
callback_kwargs[k] = locals()[k]
|
| 1803 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1804 |
+
|
| 1805 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1806 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1807 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1808 |
+
control_image = callback_outputs.pop("control_image", control_image)
|
| 1809 |
+
|
| 1810 |
+
# call the callback, if provided
|
| 1811 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1812 |
+
progress_bar.update()
|
| 1813 |
+
if callback is not None and i % callback_steps == 0:
|
| 1814 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1815 |
+
callback(step_idx, t, latents)
|
| 1816 |
+
finally:
|
| 1817 |
+
for cn_and_og in patched_cn_models:
|
| 1818 |
+
cn_and_og[0].forward = cn_and_og[1]
|
| 1819 |
+
|
| 1820 |
+
# If we do sequential model offloading, let's offload unet and controlnet
|
| 1821 |
+
# manually for max memory savings
|
| 1822 |
+
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
| 1823 |
+
self.unet.to("cpu")
|
| 1824 |
+
self.controlnet.to("cpu")
|
| 1825 |
+
torch.cuda.empty_cache()
|
| 1826 |
+
torch.cuda.ipc_collect()
|
| 1827 |
+
|
| 1828 |
+
if not output_type == "latent":
|
| 1829 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1830 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1831 |
+
|
| 1832 |
+
if needs_upcasting:
|
| 1833 |
+
self.upcast_vae()
|
| 1834 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1835 |
+
|
| 1836 |
+
latents = latents / self.vae.config.scaling_factor
|
| 1837 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 1838 |
+
|
| 1839 |
+
# cast back to fp16 if needed
|
| 1840 |
+
if needs_upcasting:
|
| 1841 |
+
self.vae.to(dtype=torch.float16)
|
| 1842 |
+
else:
|
| 1843 |
+
image = latents
|
| 1844 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
| 1845 |
+
|
| 1846 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1847 |
+
|
| 1848 |
+
# Offload all models
|
| 1849 |
+
self.maybe_free_model_hooks()
|
| 1850 |
+
|
| 1851 |
+
if not return_dict:
|
| 1852 |
+
return (image,)
|
| 1853 |
+
|
| 1854 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
v0.36.0/pipeline_demofusion_sdxl.py
ADDED
|
@@ -0,0 +1,1382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import warnings
|
| 5 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 6 |
+
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 11 |
+
|
| 12 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 13 |
+
from diffusers.loaders import (
|
| 14 |
+
FromSingleFileMixin,
|
| 15 |
+
StableDiffusionLoraLoaderMixin,
|
| 16 |
+
TextualInversionLoaderMixin,
|
| 17 |
+
)
|
| 18 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 19 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 20 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
| 21 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers
|
| 22 |
+
from diffusers.utils import (
|
| 23 |
+
deprecate,
|
| 24 |
+
is_accelerate_available,
|
| 25 |
+
is_accelerate_version,
|
| 26 |
+
is_invisible_watermark_available,
|
| 27 |
+
logging,
|
| 28 |
+
replace_example_docstring,
|
| 29 |
+
)
|
| 30 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
if is_invisible_watermark_available():
|
| 34 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import (
|
| 35 |
+
StableDiffusionXLWatermarker,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 40 |
+
|
| 41 |
+
EXAMPLE_DOC_STRING = """
|
| 42 |
+
Examples:
|
| 43 |
+
```py
|
| 44 |
+
>>> import torch
|
| 45 |
+
>>> from diffusers import StableDiffusionXLPipeline
|
| 46 |
+
|
| 47 |
+
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 48 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 49 |
+
... )
|
| 50 |
+
>>> pipe = pipe.to("cuda")
|
| 51 |
+
|
| 52 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 53 |
+
>>> image = pipe(prompt).images[0]
|
| 54 |
+
```
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
|
| 59 |
+
x_coord = torch.arange(kernel_size)
|
| 60 |
+
gaussian_1d = torch.exp(-((x_coord - (kernel_size - 1) / 2) ** 2) / (2 * sigma**2))
|
| 61 |
+
gaussian_1d = gaussian_1d / gaussian_1d.sum()
|
| 62 |
+
gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
|
| 63 |
+
kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
|
| 64 |
+
|
| 65 |
+
return kernel
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def gaussian_filter(latents, kernel_size=3, sigma=1.0):
|
| 69 |
+
channels = latents.shape[1]
|
| 70 |
+
kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
|
| 71 |
+
blurred_latents = F.conv2d(latents, kernel, padding=kernel_size // 2, groups=channels)
|
| 72 |
+
|
| 73 |
+
return blurred_latents
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 77 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 78 |
+
"""
|
| 79 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 80 |
+
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891). See Section 3.4
|
| 81 |
+
"""
|
| 82 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 83 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 84 |
+
# rescale the results from guidance (fixes overexposure)
|
| 85 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 86 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 87 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 88 |
+
return noise_cfg
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class DemoFusionSDXLPipeline(
|
| 92 |
+
DiffusionPipeline,
|
| 93 |
+
StableDiffusionMixin,
|
| 94 |
+
FromSingleFileMixin,
|
| 95 |
+
StableDiffusionLoraLoaderMixin,
|
| 96 |
+
TextualInversionLoaderMixin,
|
| 97 |
+
):
|
| 98 |
+
r"""
|
| 99 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
| 100 |
+
|
| 101 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 102 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 103 |
+
|
| 104 |
+
In addition the pipeline inherits the following loading methods:
|
| 105 |
+
- *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
|
| 106 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
| 107 |
+
|
| 108 |
+
as well as the following saving methods:
|
| 109 |
+
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
vae ([`AutoencoderKL`]):
|
| 113 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 114 |
+
text_encoder ([`CLIPTextModel`]):
|
| 115 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 116 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 117 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 118 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
| 119 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 120 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 121 |
+
specifically the
|
| 122 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 123 |
+
variant.
|
| 124 |
+
tokenizer (`CLIPTokenizer`):
|
| 125 |
+
Tokenizer of class
|
| 126 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 127 |
+
tokenizer_2 (`CLIPTokenizer`):
|
| 128 |
+
Second Tokenizer of class
|
| 129 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 130 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 131 |
+
scheduler ([`SchedulerMixin`]):
|
| 132 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 133 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 134 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 135 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 136 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 137 |
+
add_watermarker (`bool`, *optional*):
|
| 138 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 139 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 140 |
+
watermarker will be used.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 144 |
+
|
| 145 |
+
def __init__(
|
| 146 |
+
self,
|
| 147 |
+
vae: AutoencoderKL,
|
| 148 |
+
text_encoder: CLIPTextModel,
|
| 149 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 150 |
+
tokenizer: CLIPTokenizer,
|
| 151 |
+
tokenizer_2: CLIPTokenizer,
|
| 152 |
+
unet: UNet2DConditionModel,
|
| 153 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 154 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 155 |
+
add_watermarker: Optional[bool] = None,
|
| 156 |
+
):
|
| 157 |
+
super().__init__()
|
| 158 |
+
|
| 159 |
+
self.register_modules(
|
| 160 |
+
vae=vae,
|
| 161 |
+
text_encoder=text_encoder,
|
| 162 |
+
text_encoder_2=text_encoder_2,
|
| 163 |
+
tokenizer=tokenizer,
|
| 164 |
+
tokenizer_2=tokenizer_2,
|
| 165 |
+
unet=unet,
|
| 166 |
+
scheduler=scheduler,
|
| 167 |
+
)
|
| 168 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 169 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 170 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 171 |
+
self.default_sample_size = (
|
| 172 |
+
self.unet.config.sample_size
|
| 173 |
+
if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size")
|
| 174 |
+
else 128
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 178 |
+
|
| 179 |
+
if add_watermarker:
|
| 180 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 181 |
+
else:
|
| 182 |
+
self.watermark = None
|
| 183 |
+
|
| 184 |
+
def encode_prompt(
|
| 185 |
+
self,
|
| 186 |
+
prompt: str,
|
| 187 |
+
prompt_2: Optional[str] = None,
|
| 188 |
+
device: Optional[torch.device] = None,
|
| 189 |
+
num_images_per_prompt: int = 1,
|
| 190 |
+
do_classifier_free_guidance: bool = True,
|
| 191 |
+
negative_prompt: Optional[str] = None,
|
| 192 |
+
negative_prompt_2: Optional[str] = None,
|
| 193 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 194 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 195 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 196 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 197 |
+
lora_scale: Optional[float] = None,
|
| 198 |
+
):
|
| 199 |
+
r"""
|
| 200 |
+
Encodes the prompt into text encoder hidden states.
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 204 |
+
prompt to be encoded
|
| 205 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 206 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 207 |
+
used in both text-encoders
|
| 208 |
+
device: (`torch.device`):
|
| 209 |
+
torch device
|
| 210 |
+
num_images_per_prompt (`int`):
|
| 211 |
+
number of images that should be generated per prompt
|
| 212 |
+
do_classifier_free_guidance (`bool`):
|
| 213 |
+
whether to use classifier free guidance or not
|
| 214 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 215 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 216 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 217 |
+
less than `1`).
|
| 218 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 219 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 220 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 221 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 222 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 223 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 224 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 225 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 226 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 227 |
+
argument.
|
| 228 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 229 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 230 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 231 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 232 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 233 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 234 |
+
input argument.
|
| 235 |
+
lora_scale (`float`, *optional*):
|
| 236 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 237 |
+
"""
|
| 238 |
+
device = device or self._execution_device
|
| 239 |
+
|
| 240 |
+
# set lora scale so that monkey patched LoRA
|
| 241 |
+
# function of text encoder can correctly access it
|
| 242 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 243 |
+
self._lora_scale = lora_scale
|
| 244 |
+
|
| 245 |
+
# dynamically adjust the LoRA scale
|
| 246 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 247 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 248 |
+
|
| 249 |
+
if prompt is not None and isinstance(prompt, str):
|
| 250 |
+
batch_size = 1
|
| 251 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 252 |
+
batch_size = len(prompt)
|
| 253 |
+
else:
|
| 254 |
+
batch_size = prompt_embeds.shape[0]
|
| 255 |
+
|
| 256 |
+
# Define tokenizers and text encoders
|
| 257 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 258 |
+
text_encoders = (
|
| 259 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
if prompt_embeds is None:
|
| 263 |
+
prompt_2 = prompt_2 or prompt
|
| 264 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 265 |
+
prompt_embeds_list = []
|
| 266 |
+
prompts = [prompt, prompt_2]
|
| 267 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
| 268 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 269 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
| 270 |
+
|
| 271 |
+
text_inputs = tokenizer(
|
| 272 |
+
prompt,
|
| 273 |
+
padding="max_length",
|
| 274 |
+
max_length=tokenizer.model_max_length,
|
| 275 |
+
truncation=True,
|
| 276 |
+
return_tensors="pt",
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
text_input_ids = text_inputs.input_ids
|
| 280 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 281 |
+
|
| 282 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 283 |
+
text_input_ids, untruncated_ids
|
| 284 |
+
):
|
| 285 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
| 286 |
+
logger.warning(
|
| 287 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 288 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
prompt_embeds = text_encoder(
|
| 292 |
+
text_input_ids.to(device),
|
| 293 |
+
output_hidden_states=True,
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 297 |
+
if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2:
|
| 298 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
| 299 |
+
|
| 300 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
| 301 |
+
|
| 302 |
+
prompt_embeds_list.append(prompt_embeds)
|
| 303 |
+
|
| 304 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
| 305 |
+
|
| 306 |
+
# get unconditional embeddings for classifier free guidance
|
| 307 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 308 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
| 309 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
| 310 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
| 311 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 312 |
+
negative_prompt = negative_prompt or ""
|
| 313 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 314 |
+
|
| 315 |
+
uncond_tokens: List[str]
|
| 316 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
| 317 |
+
raise TypeError(
|
| 318 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 319 |
+
f" {type(prompt)}."
|
| 320 |
+
)
|
| 321 |
+
elif isinstance(negative_prompt, str):
|
| 322 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 323 |
+
elif batch_size != len(negative_prompt):
|
| 324 |
+
raise ValueError(
|
| 325 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 326 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 327 |
+
" the batch size of `prompt`."
|
| 328 |
+
)
|
| 329 |
+
else:
|
| 330 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 331 |
+
|
| 332 |
+
negative_prompt_embeds_list = []
|
| 333 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 334 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 335 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 336 |
+
|
| 337 |
+
max_length = prompt_embeds.shape[1]
|
| 338 |
+
uncond_input = tokenizer(
|
| 339 |
+
negative_prompt,
|
| 340 |
+
padding="max_length",
|
| 341 |
+
max_length=max_length,
|
| 342 |
+
truncation=True,
|
| 343 |
+
return_tensors="pt",
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
negative_prompt_embeds = text_encoder(
|
| 347 |
+
uncond_input.input_ids.to(device),
|
| 348 |
+
output_hidden_states=True,
|
| 349 |
+
)
|
| 350 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 351 |
+
if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2:
|
| 352 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 353 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 354 |
+
|
| 355 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 356 |
+
|
| 357 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 358 |
+
|
| 359 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 360 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 361 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 362 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 363 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 364 |
+
|
| 365 |
+
if do_classifier_free_guidance:
|
| 366 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 367 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 368 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 369 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 370 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 371 |
+
|
| 372 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 373 |
+
bs_embed * num_images_per_prompt, -1
|
| 374 |
+
)
|
| 375 |
+
if do_classifier_free_guidance:
|
| 376 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 377 |
+
bs_embed * num_images_per_prompt, -1
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
| 381 |
+
|
| 382 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 383 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
| 384 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 385 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 386 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 387 |
+
# and should be between [0, 1]
|
| 388 |
+
|
| 389 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 390 |
+
extra_step_kwargs = {}
|
| 391 |
+
if accepts_eta:
|
| 392 |
+
extra_step_kwargs["eta"] = eta
|
| 393 |
+
|
| 394 |
+
# check if the scheduler accepts generator
|
| 395 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 396 |
+
if accepts_generator:
|
| 397 |
+
extra_step_kwargs["generator"] = generator
|
| 398 |
+
return extra_step_kwargs
|
| 399 |
+
|
| 400 |
+
def check_inputs(
|
| 401 |
+
self,
|
| 402 |
+
prompt,
|
| 403 |
+
prompt_2,
|
| 404 |
+
height,
|
| 405 |
+
width,
|
| 406 |
+
callback_steps,
|
| 407 |
+
negative_prompt=None,
|
| 408 |
+
negative_prompt_2=None,
|
| 409 |
+
prompt_embeds=None,
|
| 410 |
+
negative_prompt_embeds=None,
|
| 411 |
+
pooled_prompt_embeds=None,
|
| 412 |
+
negative_pooled_prompt_embeds=None,
|
| 413 |
+
num_images_per_prompt=None,
|
| 414 |
+
):
|
| 415 |
+
if height % 8 != 0 or width % 8 != 0:
|
| 416 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
| 417 |
+
|
| 418 |
+
if (callback_steps is None) or (
|
| 419 |
+
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
| 420 |
+
):
|
| 421 |
+
raise ValueError(
|
| 422 |
+
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
| 423 |
+
f" {type(callback_steps)}."
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
if prompt is not None and prompt_embeds is not None:
|
| 427 |
+
raise ValueError(
|
| 428 |
+
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 429 |
+
" only forward one of the two."
|
| 430 |
+
)
|
| 431 |
+
elif prompt_2 is not None and prompt_embeds is not None:
|
| 432 |
+
raise ValueError(
|
| 433 |
+
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
| 434 |
+
" only forward one of the two."
|
| 435 |
+
)
|
| 436 |
+
elif prompt is None and prompt_embeds is None:
|
| 437 |
+
raise ValueError(
|
| 438 |
+
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
| 439 |
+
)
|
| 440 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 441 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 442 |
+
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
|
| 443 |
+
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
|
| 444 |
+
|
| 445 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 446 |
+
raise ValueError(
|
| 447 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 448 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 449 |
+
)
|
| 450 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 451 |
+
raise ValueError(
|
| 452 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 453 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
| 457 |
+
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
| 458 |
+
raise ValueError(
|
| 459 |
+
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 460 |
+
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
| 461 |
+
f" {negative_prompt_embeds.shape}."
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
if prompt_embeds is not None and pooled_prompt_embeds is None:
|
| 465 |
+
raise ValueError(
|
| 466 |
+
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 470 |
+
raise ValueError(
|
| 471 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
# DemoFusion specific checks
|
| 475 |
+
if max(height, width) % 1024 != 0:
|
| 476 |
+
raise ValueError(
|
| 477 |
+
f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}."
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
if num_images_per_prompt != 1:
|
| 481 |
+
warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
|
| 482 |
+
num_images_per_prompt = 1
|
| 483 |
+
|
| 484 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 485 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
| 486 |
+
shape = (
|
| 487 |
+
batch_size,
|
| 488 |
+
num_channels_latents,
|
| 489 |
+
int(height) // self.vae_scale_factor,
|
| 490 |
+
int(width) // self.vae_scale_factor,
|
| 491 |
+
)
|
| 492 |
+
if isinstance(generator, list) and len(generator) != batch_size:
|
| 493 |
+
raise ValueError(
|
| 494 |
+
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
| 495 |
+
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
if latents is None:
|
| 499 |
+
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
| 500 |
+
else:
|
| 501 |
+
latents = latents.to(device)
|
| 502 |
+
|
| 503 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 504 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 505 |
+
return latents
|
| 506 |
+
|
| 507 |
+
def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
|
| 508 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 509 |
+
|
| 510 |
+
passed_add_embed_dim = (
|
| 511 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
|
| 512 |
+
)
|
| 513 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 514 |
+
|
| 515 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 516 |
+
raise ValueError(
|
| 517 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 518 |
+
)
|
| 519 |
+
|
| 520 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 521 |
+
return add_time_ids
|
| 522 |
+
|
| 523 |
+
def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
|
| 524 |
+
height //= self.vae_scale_factor
|
| 525 |
+
width //= self.vae_scale_factor
|
| 526 |
+
num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
|
| 527 |
+
num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
|
| 528 |
+
total_num_blocks = int(num_blocks_height * num_blocks_width)
|
| 529 |
+
views = []
|
| 530 |
+
for i in range(total_num_blocks):
|
| 531 |
+
h_start = int((i // num_blocks_width) * stride)
|
| 532 |
+
h_end = h_start + window_size
|
| 533 |
+
w_start = int((i % num_blocks_width) * stride)
|
| 534 |
+
w_end = w_start + window_size
|
| 535 |
+
|
| 536 |
+
if h_end > height:
|
| 537 |
+
h_start = int(h_start + height - h_end)
|
| 538 |
+
h_end = int(height)
|
| 539 |
+
if w_end > width:
|
| 540 |
+
w_start = int(w_start + width - w_end)
|
| 541 |
+
w_end = int(width)
|
| 542 |
+
if h_start < 0:
|
| 543 |
+
h_end = int(h_end - h_start)
|
| 544 |
+
h_start = 0
|
| 545 |
+
if w_start < 0:
|
| 546 |
+
w_end = int(w_end - w_start)
|
| 547 |
+
w_start = 0
|
| 548 |
+
|
| 549 |
+
if random_jitter:
|
| 550 |
+
jitter_range = (window_size - stride) // 4
|
| 551 |
+
w_jitter = 0
|
| 552 |
+
h_jitter = 0
|
| 553 |
+
if (w_start != 0) and (w_end != width):
|
| 554 |
+
w_jitter = random.randint(-jitter_range, jitter_range)
|
| 555 |
+
elif (w_start == 0) and (w_end != width):
|
| 556 |
+
w_jitter = random.randint(-jitter_range, 0)
|
| 557 |
+
elif (w_start != 0) and (w_end == width):
|
| 558 |
+
w_jitter = random.randint(0, jitter_range)
|
| 559 |
+
if (h_start != 0) and (h_end != height):
|
| 560 |
+
h_jitter = random.randint(-jitter_range, jitter_range)
|
| 561 |
+
elif (h_start == 0) and (h_end != height):
|
| 562 |
+
h_jitter = random.randint(-jitter_range, 0)
|
| 563 |
+
elif (h_start != 0) and (h_end == height):
|
| 564 |
+
h_jitter = random.randint(0, jitter_range)
|
| 565 |
+
h_start += h_jitter + jitter_range
|
| 566 |
+
h_end += h_jitter + jitter_range
|
| 567 |
+
w_start += w_jitter + jitter_range
|
| 568 |
+
w_end += w_jitter + jitter_range
|
| 569 |
+
|
| 570 |
+
views.append((h_start, h_end, w_start, w_end))
|
| 571 |
+
return views
|
| 572 |
+
|
| 573 |
+
def tiled_decode(self, latents, current_height, current_width):
|
| 574 |
+
core_size = self.unet.config.sample_size // 4
|
| 575 |
+
core_stride = core_size
|
| 576 |
+
pad_size = self.unet.config.sample_size // 4 * 3
|
| 577 |
+
decoder_view_batch_size = 1
|
| 578 |
+
|
| 579 |
+
views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
|
| 580 |
+
views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
|
| 581 |
+
latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), "constant", 0)
|
| 582 |
+
image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
|
| 583 |
+
count = torch.zeros_like(image).to(latents.device)
|
| 584 |
+
# get the latents corresponding to the current view coordinates
|
| 585 |
+
with self.progress_bar(total=len(views_batch)) as progress_bar:
|
| 586 |
+
for j, batch_view in enumerate(views_batch):
|
| 587 |
+
len(batch_view)
|
| 588 |
+
latents_for_view = torch.cat(
|
| 589 |
+
[
|
| 590 |
+
latents_[:, :, h_start : h_end + pad_size * 2, w_start : w_end + pad_size * 2]
|
| 591 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 592 |
+
]
|
| 593 |
+
)
|
| 594 |
+
image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 595 |
+
h_start, h_end, w_start, w_end = views[j]
|
| 596 |
+
h_start, h_end, w_start, w_end = (
|
| 597 |
+
h_start * self.vae_scale_factor,
|
| 598 |
+
h_end * self.vae_scale_factor,
|
| 599 |
+
w_start * self.vae_scale_factor,
|
| 600 |
+
w_end * self.vae_scale_factor,
|
| 601 |
+
)
|
| 602 |
+
p_h_start, p_h_end, p_w_start, p_w_end = (
|
| 603 |
+
pad_size * self.vae_scale_factor,
|
| 604 |
+
image_patch.size(2) - pad_size * self.vae_scale_factor,
|
| 605 |
+
pad_size * self.vae_scale_factor,
|
| 606 |
+
image_patch.size(3) - pad_size * self.vae_scale_factor,
|
| 607 |
+
)
|
| 608 |
+
image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end]
|
| 609 |
+
count[:, :, h_start:h_end, w_start:w_end] += 1
|
| 610 |
+
progress_bar.update()
|
| 611 |
+
image = image / count
|
| 612 |
+
|
| 613 |
+
return image
|
| 614 |
+
|
| 615 |
+
def upcast_vae(self):
|
| 616 |
+
deprecate("upcast_vae", "1.0.0", "`upcast_vae` is deprecated. Please use `pipe.vae.to(torch.float32)`")
|
| 617 |
+
self.vae.to(dtype=torch.float32)
|
| 618 |
+
|
| 619 |
+
@torch.no_grad()
|
| 620 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 621 |
+
def __call__(
|
| 622 |
+
self,
|
| 623 |
+
prompt: Union[str, List[str]] = None,
|
| 624 |
+
prompt_2: Optional[Union[str, List[str]]] = None,
|
| 625 |
+
height: Optional[int] = None,
|
| 626 |
+
width: Optional[int] = None,
|
| 627 |
+
num_inference_steps: int = 50,
|
| 628 |
+
denoising_end: Optional[float] = None,
|
| 629 |
+
guidance_scale: float = 5.0,
|
| 630 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 631 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 632 |
+
num_images_per_prompt: Optional[int] = 1,
|
| 633 |
+
eta: float = 0.0,
|
| 634 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 635 |
+
latents: Optional[torch.Tensor] = None,
|
| 636 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 637 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 638 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 639 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 640 |
+
output_type: Optional[str] = "pil",
|
| 641 |
+
return_dict: bool = False,
|
| 642 |
+
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
|
| 643 |
+
callback_steps: int = 1,
|
| 644 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 645 |
+
guidance_rescale: float = 0.0,
|
| 646 |
+
original_size: Optional[Tuple[int, int]] = None,
|
| 647 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 648 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 649 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
| 650 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 651 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
| 652 |
+
################### DemoFusion specific parameters ####################
|
| 653 |
+
view_batch_size: int = 16,
|
| 654 |
+
multi_decoder: bool = True,
|
| 655 |
+
stride: Optional[int] = 64,
|
| 656 |
+
cosine_scale_1: Optional[float] = 3.0,
|
| 657 |
+
cosine_scale_2: Optional[float] = 1.0,
|
| 658 |
+
cosine_scale_3: Optional[float] = 1.0,
|
| 659 |
+
sigma: Optional[float] = 0.8,
|
| 660 |
+
show_image: bool = False,
|
| 661 |
+
):
|
| 662 |
+
r"""
|
| 663 |
+
Function invoked when calling the pipeline for generation.
|
| 664 |
+
|
| 665 |
+
Args:
|
| 666 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 667 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
| 668 |
+
instead.
|
| 669 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
| 670 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
| 671 |
+
used in both text-encoders
|
| 672 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 673 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 674 |
+
Anything below 512 pixels won't work well for
|
| 675 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 676 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 677 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
| 678 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
| 679 |
+
Anything below 512 pixels won't work well for
|
| 680 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
| 681 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
| 682 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 683 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 684 |
+
expense of slower inference.
|
| 685 |
+
denoising_end (`float`, *optional*):
|
| 686 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 687 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 688 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 689 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 690 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 691 |
+
Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
|
| 692 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
| 693 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://huggingface.co/papers/2207.12598).
|
| 694 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
| 695 |
+
Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting `guidance_scale >
|
| 696 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
| 697 |
+
usually at the expense of lower image quality.
|
| 698 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 699 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 700 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 701 |
+
less than `1`).
|
| 702 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 703 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 704 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 705 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 706 |
+
The number of images to generate per prompt.
|
| 707 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 708 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://huggingface.co/papers/2010.02502. Only applies to
|
| 709 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
| 710 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 711 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 712 |
+
to make generation deterministic.
|
| 713 |
+
latents (`torch.Tensor`, *optional*):
|
| 714 |
+
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
| 715 |
+
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
| 716 |
+
tensor will be generated by sampling using the supplied random `generator`.
|
| 717 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 718 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 719 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 720 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 721 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 722 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 723 |
+
argument.
|
| 724 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 725 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 726 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
| 727 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 728 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 729 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 730 |
+
input argument.
|
| 731 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 732 |
+
The output format of the generate image. Choose between
|
| 733 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 734 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 735 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 736 |
+
of a plain tuple.
|
| 737 |
+
callback (`Callable`, *optional*):
|
| 738 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 739 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 740 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 741 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 742 |
+
called at every step.
|
| 743 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 744 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 745 |
+
`self.processor` in
|
| 746 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 747 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 748 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 749 |
+
Flawed](https://huggingface.co/papers/2305.08891) `guidance_scale` is defined as `φ` in equation 16. of
|
| 750 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891).
|
| 751 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 752 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 753 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
| 754 |
+
`original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
|
| 755 |
+
explained in section 2.2 of
|
| 756 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 757 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 758 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 759 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 760 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 761 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 762 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 763 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 764 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 765 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 766 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 767 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
| 768 |
+
micro-conditioning as explained in section 2.2 of
|
| 769 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 770 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 771 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 772 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
| 773 |
+
micro-conditioning as explained in section 2.2 of
|
| 774 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 775 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 776 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 777 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
| 778 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 779 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
| 780 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
| 781 |
+
################### DemoFusion specific parameters ####################
|
| 782 |
+
view_batch_size (`int`, defaults to 16):
|
| 783 |
+
The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
|
| 784 |
+
efficiency but comes with increased GPU memory requirements.
|
| 785 |
+
multi_decoder (`bool`, defaults to True):
|
| 786 |
+
Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
|
| 787 |
+
a tiled decoder becomes necessary.
|
| 788 |
+
stride (`int`, defaults to 64):
|
| 789 |
+
The stride of moving local patches. A smaller stride is better for alleviating seam issues,
|
| 790 |
+
but it also introduces additional computational overhead and inference time.
|
| 791 |
+
cosine_scale_1 (`float`, defaults to 3):
|
| 792 |
+
Control the strength of skip-residual. For specific impacts, please refer to Appendix C
|
| 793 |
+
in the DemoFusion paper.
|
| 794 |
+
cosine_scale_2 (`float`, defaults to 1):
|
| 795 |
+
Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
|
| 796 |
+
in the DemoFusion paper.
|
| 797 |
+
cosine_scale_3 (`float`, defaults to 1):
|
| 798 |
+
Control the strength of the gaussian filter. For specific impacts, please refer to Appendix C
|
| 799 |
+
in the DemoFusion paper.
|
| 800 |
+
sigma (`float`, defaults to 1):
|
| 801 |
+
The standard value of the gaussian filter.
|
| 802 |
+
show_image (`bool`, defaults to False):
|
| 803 |
+
Determine whether to show intermediate results during generation.
|
| 804 |
+
|
| 805 |
+
Examples:
|
| 806 |
+
|
| 807 |
+
Returns:
|
| 808 |
+
a `list` with the generated images at each phase.
|
| 809 |
+
"""
|
| 810 |
+
|
| 811 |
+
# 0. Default height and width to unet
|
| 812 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
| 813 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
| 814 |
+
|
| 815 |
+
x1_size = self.default_sample_size * self.vae_scale_factor
|
| 816 |
+
|
| 817 |
+
height_scale = height / x1_size
|
| 818 |
+
width_scale = width / x1_size
|
| 819 |
+
scale_num = int(max(height_scale, width_scale))
|
| 820 |
+
aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
|
| 821 |
+
|
| 822 |
+
original_size = original_size or (height, width)
|
| 823 |
+
target_size = target_size or (height, width)
|
| 824 |
+
|
| 825 |
+
# 1. Check inputs. Raise error if not correct
|
| 826 |
+
self.check_inputs(
|
| 827 |
+
prompt,
|
| 828 |
+
prompt_2,
|
| 829 |
+
height,
|
| 830 |
+
width,
|
| 831 |
+
callback_steps,
|
| 832 |
+
negative_prompt,
|
| 833 |
+
negative_prompt_2,
|
| 834 |
+
prompt_embeds,
|
| 835 |
+
negative_prompt_embeds,
|
| 836 |
+
pooled_prompt_embeds,
|
| 837 |
+
negative_pooled_prompt_embeds,
|
| 838 |
+
num_images_per_prompt,
|
| 839 |
+
)
|
| 840 |
+
|
| 841 |
+
# 2. Define call parameters
|
| 842 |
+
if prompt is not None and isinstance(prompt, str):
|
| 843 |
+
batch_size = 1
|
| 844 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 845 |
+
batch_size = len(prompt)
|
| 846 |
+
else:
|
| 847 |
+
batch_size = prompt_embeds.shape[0]
|
| 848 |
+
|
| 849 |
+
device = self._execution_device
|
| 850 |
+
|
| 851 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 852 |
+
# of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
|
| 853 |
+
# corresponds to doing no classifier free guidance.
|
| 854 |
+
do_classifier_free_guidance = guidance_scale > 1.0
|
| 855 |
+
|
| 856 |
+
# 3. Encode input prompt
|
| 857 |
+
text_encoder_lora_scale = (
|
| 858 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 859 |
+
)
|
| 860 |
+
(
|
| 861 |
+
prompt_embeds,
|
| 862 |
+
negative_prompt_embeds,
|
| 863 |
+
pooled_prompt_embeds,
|
| 864 |
+
negative_pooled_prompt_embeds,
|
| 865 |
+
) = self.encode_prompt(
|
| 866 |
+
prompt=prompt,
|
| 867 |
+
prompt_2=prompt_2,
|
| 868 |
+
device=device,
|
| 869 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 870 |
+
do_classifier_free_guidance=do_classifier_free_guidance,
|
| 871 |
+
negative_prompt=negative_prompt,
|
| 872 |
+
negative_prompt_2=negative_prompt_2,
|
| 873 |
+
prompt_embeds=prompt_embeds,
|
| 874 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 875 |
+
pooled_prompt_embeds=pooled_prompt_embeds,
|
| 876 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 877 |
+
lora_scale=text_encoder_lora_scale,
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
# 4. Prepare timesteps
|
| 881 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 882 |
+
|
| 883 |
+
timesteps = self.scheduler.timesteps
|
| 884 |
+
|
| 885 |
+
# 5. Prepare latent variables
|
| 886 |
+
num_channels_latents = self.unet.config.in_channels
|
| 887 |
+
latents = self.prepare_latents(
|
| 888 |
+
batch_size * num_images_per_prompt,
|
| 889 |
+
num_channels_latents,
|
| 890 |
+
height // scale_num,
|
| 891 |
+
width // scale_num,
|
| 892 |
+
prompt_embeds.dtype,
|
| 893 |
+
device,
|
| 894 |
+
generator,
|
| 895 |
+
latents,
|
| 896 |
+
)
|
| 897 |
+
|
| 898 |
+
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
| 899 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
| 900 |
+
|
| 901 |
+
# 7. Prepare added time ids & embeddings
|
| 902 |
+
add_text_embeds = pooled_prompt_embeds
|
| 903 |
+
add_time_ids = self._get_add_time_ids(
|
| 904 |
+
original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
|
| 905 |
+
)
|
| 906 |
+
if negative_original_size is not None and negative_target_size is not None:
|
| 907 |
+
negative_add_time_ids = self._get_add_time_ids(
|
| 908 |
+
negative_original_size,
|
| 909 |
+
negative_crops_coords_top_left,
|
| 910 |
+
negative_target_size,
|
| 911 |
+
dtype=prompt_embeds.dtype,
|
| 912 |
+
)
|
| 913 |
+
else:
|
| 914 |
+
negative_add_time_ids = add_time_ids
|
| 915 |
+
|
| 916 |
+
if do_classifier_free_guidance:
|
| 917 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 918 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
| 919 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
| 920 |
+
|
| 921 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 922 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 923 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 924 |
+
|
| 925 |
+
# 8. Denoising loop
|
| 926 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
| 927 |
+
|
| 928 |
+
# 7.1 Apply denoising_end
|
| 929 |
+
if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
|
| 930 |
+
discrete_timestep_cutoff = int(
|
| 931 |
+
round(
|
| 932 |
+
self.scheduler.config.num_train_timesteps
|
| 933 |
+
- (denoising_end * self.scheduler.config.num_train_timesteps)
|
| 934 |
+
)
|
| 935 |
+
)
|
| 936 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 937 |
+
timesteps = timesteps[:num_inference_steps]
|
| 938 |
+
|
| 939 |
+
output_images = []
|
| 940 |
+
|
| 941 |
+
############################################################### Phase 1 #################################################################
|
| 942 |
+
|
| 943 |
+
print("### Phase 1 Denoising ###")
|
| 944 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 945 |
+
for i, t in enumerate(timesteps):
|
| 946 |
+
latents_for_view = latents
|
| 947 |
+
|
| 948 |
+
# expand the latents if we are doing classifier free guidance
|
| 949 |
+
latent_model_input = latents.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents
|
| 950 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 951 |
+
|
| 952 |
+
# predict the noise residual
|
| 953 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 954 |
+
noise_pred = self.unet(
|
| 955 |
+
latent_model_input,
|
| 956 |
+
t,
|
| 957 |
+
encoder_hidden_states=prompt_embeds,
|
| 958 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 959 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 960 |
+
return_dict=False,
|
| 961 |
+
)[0]
|
| 962 |
+
|
| 963 |
+
# perform guidance
|
| 964 |
+
if do_classifier_free_guidance:
|
| 965 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 966 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 967 |
+
|
| 968 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 969 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 970 |
+
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
|
| 971 |
+
|
| 972 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 973 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
| 974 |
+
|
| 975 |
+
# call the callback, if provided
|
| 976 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 977 |
+
progress_bar.update()
|
| 978 |
+
if callback is not None and i % callback_steps == 0:
|
| 979 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 980 |
+
callback(step_idx, t, latents)
|
| 981 |
+
|
| 982 |
+
anchor_mean = latents.mean()
|
| 983 |
+
anchor_std = latents.std()
|
| 984 |
+
if not output_type == "latent":
|
| 985 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 986 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 987 |
+
|
| 988 |
+
if needs_upcasting:
|
| 989 |
+
self.upcast_vae()
|
| 990 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 991 |
+
print("### Phase 1 Decoding ###")
|
| 992 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 993 |
+
# cast back to fp16 if needed
|
| 994 |
+
if needs_upcasting:
|
| 995 |
+
self.vae.to(dtype=torch.float16)
|
| 996 |
+
|
| 997 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 998 |
+
if show_image:
|
| 999 |
+
plt.figure(figsize=(10, 10))
|
| 1000 |
+
plt.imshow(image[0])
|
| 1001 |
+
plt.axis("off") # Turn off axis numbers and ticks
|
| 1002 |
+
plt.show()
|
| 1003 |
+
output_images.append(image[0])
|
| 1004 |
+
|
| 1005 |
+
####################################################### Phase 2+ #####################################################
|
| 1006 |
+
|
| 1007 |
+
for current_scale_num in range(2, scale_num + 1):
|
| 1008 |
+
print("### Phase {} Denoising ###".format(current_scale_num))
|
| 1009 |
+
current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1010 |
+
current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
|
| 1011 |
+
if height > width:
|
| 1012 |
+
current_width = int(current_width * aspect_ratio)
|
| 1013 |
+
else:
|
| 1014 |
+
current_height = int(current_height * aspect_ratio)
|
| 1015 |
+
|
| 1016 |
+
latents = F.interpolate(
|
| 1017 |
+
latents,
|
| 1018 |
+
size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)),
|
| 1019 |
+
mode="bicubic",
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
noise_latents = []
|
| 1023 |
+
noise = torch.randn_like(latents)
|
| 1024 |
+
for timestep in timesteps:
|
| 1025 |
+
noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
|
| 1026 |
+
noise_latents.append(noise_latent)
|
| 1027 |
+
latents = noise_latents[0]
|
| 1028 |
+
|
| 1029 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
| 1030 |
+
for i, t in enumerate(timesteps):
|
| 1031 |
+
count = torch.zeros_like(latents)
|
| 1032 |
+
value = torch.zeros_like(latents)
|
| 1033 |
+
cosine_factor = (
|
| 1034 |
+
0.5
|
| 1035 |
+
* (
|
| 1036 |
+
1
|
| 1037 |
+
+ torch.cos(
|
| 1038 |
+
torch.pi
|
| 1039 |
+
* (self.scheduler.config.num_train_timesteps - t)
|
| 1040 |
+
/ self.scheduler.config.num_train_timesteps
|
| 1041 |
+
)
|
| 1042 |
+
).cpu()
|
| 1043 |
+
)
|
| 1044 |
+
|
| 1045 |
+
c1 = cosine_factor**cosine_scale_1
|
| 1046 |
+
latents = latents * (1 - c1) + noise_latents[i] * c1
|
| 1047 |
+
|
| 1048 |
+
############################################# MultiDiffusion #############################################
|
| 1049 |
+
|
| 1050 |
+
views = self.get_views(
|
| 1051 |
+
current_height,
|
| 1052 |
+
current_width,
|
| 1053 |
+
stride=stride,
|
| 1054 |
+
window_size=self.unet.config.sample_size,
|
| 1055 |
+
random_jitter=True,
|
| 1056 |
+
)
|
| 1057 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1058 |
+
|
| 1059 |
+
jitter_range = (self.unet.config.sample_size - stride) // 4
|
| 1060 |
+
latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), "constant", 0)
|
| 1061 |
+
|
| 1062 |
+
count_local = torch.zeros_like(latents_)
|
| 1063 |
+
value_local = torch.zeros_like(latents_)
|
| 1064 |
+
|
| 1065 |
+
for j, batch_view in enumerate(views_batch):
|
| 1066 |
+
vb_size = len(batch_view)
|
| 1067 |
+
|
| 1068 |
+
# get the latents corresponding to the current view coordinates
|
| 1069 |
+
latents_for_view = torch.cat(
|
| 1070 |
+
[
|
| 1071 |
+
latents_[:, :, h_start:h_end, w_start:w_end]
|
| 1072 |
+
for h_start, h_end, w_start, w_end in batch_view
|
| 1073 |
+
]
|
| 1074 |
+
)
|
| 1075 |
+
|
| 1076 |
+
# expand the latents if we are doing classifier free guidance
|
| 1077 |
+
latent_model_input = latents_for_view
|
| 1078 |
+
latent_model_input = (
|
| 1079 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1080 |
+
if do_classifier_free_guidance
|
| 1081 |
+
else latent_model_input
|
| 1082 |
+
)
|
| 1083 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1084 |
+
|
| 1085 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1086 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1087 |
+
add_time_ids_input = []
|
| 1088 |
+
for h_start, h_end, w_start, w_end in batch_view:
|
| 1089 |
+
add_time_ids_ = add_time_ids.clone()
|
| 1090 |
+
add_time_ids_[:, 2] = h_start * self.vae_scale_factor
|
| 1091 |
+
add_time_ids_[:, 3] = w_start * self.vae_scale_factor
|
| 1092 |
+
add_time_ids_input.append(add_time_ids_)
|
| 1093 |
+
add_time_ids_input = torch.cat(add_time_ids_input)
|
| 1094 |
+
|
| 1095 |
+
# predict the noise residual
|
| 1096 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1097 |
+
noise_pred = self.unet(
|
| 1098 |
+
latent_model_input,
|
| 1099 |
+
t,
|
| 1100 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1101 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1102 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1103 |
+
return_dict=False,
|
| 1104 |
+
)[0]
|
| 1105 |
+
|
| 1106 |
+
if do_classifier_free_guidance:
|
| 1107 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1108 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1109 |
+
|
| 1110 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1111 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 1112 |
+
noise_pred = rescale_noise_cfg(
|
| 1113 |
+
noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
|
| 1114 |
+
)
|
| 1115 |
+
|
| 1116 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1117 |
+
self.scheduler._init_step_index(t)
|
| 1118 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1119 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
|
| 1120 |
+
)[0]
|
| 1121 |
+
|
| 1122 |
+
# extract value from batch
|
| 1123 |
+
for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
|
| 1124 |
+
latents_denoised_batch.chunk(vb_size), batch_view
|
| 1125 |
+
):
|
| 1126 |
+
value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
|
| 1127 |
+
count_local[:, :, h_start:h_end, w_start:w_end] += 1
|
| 1128 |
+
|
| 1129 |
+
value_local = value_local[
|
| 1130 |
+
:,
|
| 1131 |
+
:,
|
| 1132 |
+
jitter_range : jitter_range + current_height // self.vae_scale_factor,
|
| 1133 |
+
jitter_range : jitter_range + current_width // self.vae_scale_factor,
|
| 1134 |
+
]
|
| 1135 |
+
count_local = count_local[
|
| 1136 |
+
:,
|
| 1137 |
+
:,
|
| 1138 |
+
jitter_range : jitter_range + current_height // self.vae_scale_factor,
|
| 1139 |
+
jitter_range : jitter_range + current_width // self.vae_scale_factor,
|
| 1140 |
+
]
|
| 1141 |
+
|
| 1142 |
+
c2 = cosine_factor**cosine_scale_2
|
| 1143 |
+
|
| 1144 |
+
value += value_local / count_local * (1 - c2)
|
| 1145 |
+
count += torch.ones_like(value_local) * (1 - c2)
|
| 1146 |
+
|
| 1147 |
+
############################################# Dilated Sampling #############################################
|
| 1148 |
+
|
| 1149 |
+
views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
|
| 1150 |
+
views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
|
| 1151 |
+
|
| 1152 |
+
h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
|
| 1153 |
+
w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
|
| 1154 |
+
latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), "constant", 0)
|
| 1155 |
+
|
| 1156 |
+
count_global = torch.zeros_like(latents_)
|
| 1157 |
+
value_global = torch.zeros_like(latents_)
|
| 1158 |
+
|
| 1159 |
+
c3 = 0.99 * cosine_factor**cosine_scale_3 + 1e-2
|
| 1160 |
+
std_, mean_ = latents_.std(), latents_.mean()
|
| 1161 |
+
latents_gaussian = gaussian_filter(
|
| 1162 |
+
latents_, kernel_size=(2 * current_scale_num - 1), sigma=sigma * c3
|
| 1163 |
+
)
|
| 1164 |
+
latents_gaussian = (
|
| 1165 |
+
latents_gaussian - latents_gaussian.mean()
|
| 1166 |
+
) / latents_gaussian.std() * std_ + mean_
|
| 1167 |
+
|
| 1168 |
+
for j, batch_view in enumerate(views_batch):
|
| 1169 |
+
latents_for_view = torch.cat(
|
| 1170 |
+
[latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
|
| 1171 |
+
)
|
| 1172 |
+
latents_for_view_gaussian = torch.cat(
|
| 1173 |
+
[latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
|
| 1174 |
+
)
|
| 1175 |
+
|
| 1176 |
+
vb_size = latents_for_view.size(0)
|
| 1177 |
+
|
| 1178 |
+
# expand the latents if we are doing classifier free guidance
|
| 1179 |
+
latent_model_input = latents_for_view_gaussian
|
| 1180 |
+
latent_model_input = (
|
| 1181 |
+
latent_model_input.repeat_interleave(2, dim=0)
|
| 1182 |
+
if do_classifier_free_guidance
|
| 1183 |
+
else latent_model_input
|
| 1184 |
+
)
|
| 1185 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1186 |
+
|
| 1187 |
+
prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
|
| 1188 |
+
add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
|
| 1189 |
+
add_time_ids_input = torch.cat([add_time_ids] * vb_size)
|
| 1190 |
+
|
| 1191 |
+
# predict the noise residual
|
| 1192 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
|
| 1193 |
+
noise_pred = self.unet(
|
| 1194 |
+
latent_model_input,
|
| 1195 |
+
t,
|
| 1196 |
+
encoder_hidden_states=prompt_embeds_input,
|
| 1197 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1198 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1199 |
+
return_dict=False,
|
| 1200 |
+
)[0]
|
| 1201 |
+
|
| 1202 |
+
if do_classifier_free_guidance:
|
| 1203 |
+
noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
|
| 1204 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1205 |
+
|
| 1206 |
+
if do_classifier_free_guidance and guidance_rescale > 0.0:
|
| 1207 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 1208 |
+
noise_pred = rescale_noise_cfg(
|
| 1209 |
+
noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
|
| 1210 |
+
)
|
| 1211 |
+
|
| 1212 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1213 |
+
self.scheduler._init_step_index(t)
|
| 1214 |
+
latents_denoised_batch = self.scheduler.step(
|
| 1215 |
+
noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
|
| 1216 |
+
)[0]
|
| 1217 |
+
|
| 1218 |
+
# extract value from batch
|
| 1219 |
+
for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view):
|
| 1220 |
+
value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
|
| 1221 |
+
count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
|
| 1222 |
+
|
| 1223 |
+
c2 = cosine_factor**cosine_scale_2
|
| 1224 |
+
|
| 1225 |
+
value_global = value_global[:, :, h_pad:, w_pad:]
|
| 1226 |
+
|
| 1227 |
+
value += value_global * c2
|
| 1228 |
+
count += torch.ones_like(value_global) * c2
|
| 1229 |
+
|
| 1230 |
+
###########################################################
|
| 1231 |
+
|
| 1232 |
+
latents = torch.where(count > 0, value / count, value)
|
| 1233 |
+
|
| 1234 |
+
# call the callback, if provided
|
| 1235 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1236 |
+
progress_bar.update()
|
| 1237 |
+
if callback is not None and i % callback_steps == 0:
|
| 1238 |
+
step_idx = i // getattr(self.scheduler, "order", 1)
|
| 1239 |
+
callback(step_idx, t, latents)
|
| 1240 |
+
|
| 1241 |
+
#########################################################################################################################################
|
| 1242 |
+
|
| 1243 |
+
latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
|
| 1244 |
+
if not output_type == "latent":
|
| 1245 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1246 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1247 |
+
|
| 1248 |
+
if needs_upcasting:
|
| 1249 |
+
self.upcast_vae()
|
| 1250 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1251 |
+
|
| 1252 |
+
print("### Phase {} Decoding ###".format(current_scale_num))
|
| 1253 |
+
if multi_decoder:
|
| 1254 |
+
image = self.tiled_decode(latents, current_height, current_width)
|
| 1255 |
+
else:
|
| 1256 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1257 |
+
|
| 1258 |
+
# cast back to fp16 if needed
|
| 1259 |
+
if needs_upcasting:
|
| 1260 |
+
self.vae.to(dtype=torch.float16)
|
| 1261 |
+
else:
|
| 1262 |
+
image = latents
|
| 1263 |
+
|
| 1264 |
+
if not output_type == "latent":
|
| 1265 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1266 |
+
if show_image:
|
| 1267 |
+
plt.figure(figsize=(10, 10))
|
| 1268 |
+
plt.imshow(image[0])
|
| 1269 |
+
plt.axis("off") # Turn off axis numbers and ticks
|
| 1270 |
+
plt.show()
|
| 1271 |
+
output_images.append(image[0])
|
| 1272 |
+
|
| 1273 |
+
# Offload all models
|
| 1274 |
+
self.maybe_free_model_hooks()
|
| 1275 |
+
|
| 1276 |
+
return output_images
|
| 1277 |
+
|
| 1278 |
+
# Override to properly handle the loading and unloading of the additional text encoder.
|
| 1279 |
+
def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
|
| 1280 |
+
# We could have accessed the unet config from `lora_state_dict()` too. We pass
|
| 1281 |
+
# it here explicitly to be able to tell that it's coming from an SDXL
|
| 1282 |
+
# pipeline.
|
| 1283 |
+
|
| 1284 |
+
# Remove any existing hooks.
|
| 1285 |
+
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
| 1286 |
+
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
| 1287 |
+
else:
|
| 1288 |
+
raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
|
| 1289 |
+
|
| 1290 |
+
is_model_cpu_offload = False
|
| 1291 |
+
is_sequential_cpu_offload = False
|
| 1292 |
+
recursive = False
|
| 1293 |
+
for _, component in self.components.items():
|
| 1294 |
+
if isinstance(component, torch.nn.Module):
|
| 1295 |
+
if hasattr(component, "_hf_hook"):
|
| 1296 |
+
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
| 1297 |
+
is_sequential_cpu_offload = (
|
| 1298 |
+
isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
| 1299 |
+
or hasattr(component._hf_hook, "hooks")
|
| 1300 |
+
and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)
|
| 1301 |
+
)
|
| 1302 |
+
logger.info(
|
| 1303 |
+
"Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
|
| 1304 |
+
)
|
| 1305 |
+
recursive = is_sequential_cpu_offload
|
| 1306 |
+
remove_hook_from_module(component, recurse=recursive)
|
| 1307 |
+
state_dict, network_alphas = self.lora_state_dict(
|
| 1308 |
+
pretrained_model_name_or_path_or_dict,
|
| 1309 |
+
unet_config=self.unet.config,
|
| 1310 |
+
**kwargs,
|
| 1311 |
+
)
|
| 1312 |
+
self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
|
| 1313 |
+
|
| 1314 |
+
text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
|
| 1315 |
+
if len(text_encoder_state_dict) > 0:
|
| 1316 |
+
self.load_lora_into_text_encoder(
|
| 1317 |
+
text_encoder_state_dict,
|
| 1318 |
+
network_alphas=network_alphas,
|
| 1319 |
+
text_encoder=self.text_encoder,
|
| 1320 |
+
prefix="text_encoder",
|
| 1321 |
+
lora_scale=self.lora_scale,
|
| 1322 |
+
)
|
| 1323 |
+
|
| 1324 |
+
text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
|
| 1325 |
+
if len(text_encoder_2_state_dict) > 0:
|
| 1326 |
+
self.load_lora_into_text_encoder(
|
| 1327 |
+
text_encoder_2_state_dict,
|
| 1328 |
+
network_alphas=network_alphas,
|
| 1329 |
+
text_encoder=self.text_encoder_2,
|
| 1330 |
+
prefix="text_encoder_2",
|
| 1331 |
+
lora_scale=self.lora_scale,
|
| 1332 |
+
)
|
| 1333 |
+
|
| 1334 |
+
# Offload back.
|
| 1335 |
+
if is_model_cpu_offload:
|
| 1336 |
+
self.enable_model_cpu_offload()
|
| 1337 |
+
elif is_sequential_cpu_offload:
|
| 1338 |
+
self.enable_sequential_cpu_offload()
|
| 1339 |
+
|
| 1340 |
+
@classmethod
|
| 1341 |
+
def save_lora_weights(
|
| 1342 |
+
cls,
|
| 1343 |
+
save_directory: Union[str, os.PathLike],
|
| 1344 |
+
unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1345 |
+
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1346 |
+
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
|
| 1347 |
+
is_main_process: bool = True,
|
| 1348 |
+
weight_name: str = None,
|
| 1349 |
+
save_function: Callable = None,
|
| 1350 |
+
safe_serialization: bool = True,
|
| 1351 |
+
):
|
| 1352 |
+
state_dict = {}
|
| 1353 |
+
|
| 1354 |
+
def pack_weights(layers, prefix):
|
| 1355 |
+
layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
|
| 1356 |
+
layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
|
| 1357 |
+
return layers_state_dict
|
| 1358 |
+
|
| 1359 |
+
if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
|
| 1360 |
+
raise ValueError(
|
| 1361 |
+
"You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
|
| 1362 |
+
)
|
| 1363 |
+
|
| 1364 |
+
if unet_lora_layers:
|
| 1365 |
+
state_dict.update(pack_weights(unet_lora_layers, "unet"))
|
| 1366 |
+
|
| 1367 |
+
if text_encoder_lora_layers and text_encoder_2_lora_layers:
|
| 1368 |
+
state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
|
| 1369 |
+
state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
|
| 1370 |
+
|
| 1371 |
+
cls.write_lora_layers(
|
| 1372 |
+
state_dict=state_dict,
|
| 1373 |
+
save_directory=save_directory,
|
| 1374 |
+
is_main_process=is_main_process,
|
| 1375 |
+
weight_name=weight_name,
|
| 1376 |
+
save_function=save_function,
|
| 1377 |
+
safe_serialization=safe_serialization,
|
| 1378 |
+
)
|
| 1379 |
+
|
| 1380 |
+
def _remove_text_encoder_monkey_patch(self):
|
| 1381 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
|
| 1382 |
+
self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
|
v0.36.0/pipeline_fabric.py
ADDED
|
@@ -0,0 +1,755 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 FABRIC authors and the HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
from typing import List, Optional, Union
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
from packaging import version
|
| 18 |
+
from PIL import Image
|
| 19 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 20 |
+
|
| 21 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel
|
| 22 |
+
from diffusers.configuration_utils import FrozenDict
|
| 23 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 24 |
+
from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
|
| 25 |
+
from diffusers.models.attention import BasicTransformerBlock
|
| 26 |
+
from diffusers.models.attention_processor import LoRAAttnProcessor
|
| 27 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 28 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
| 29 |
+
from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers
|
| 30 |
+
from diffusers.utils import (
|
| 31 |
+
deprecate,
|
| 32 |
+
logging,
|
| 33 |
+
replace_example_docstring,
|
| 34 |
+
)
|
| 35 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 39 |
+
|
| 40 |
+
EXAMPLE_DOC_STRING = """
|
| 41 |
+
Examples:
|
| 42 |
+
```py
|
| 43 |
+
>>> from diffusers import DiffusionPipeline
|
| 44 |
+
>>> import torch
|
| 45 |
+
|
| 46 |
+
>>> model_id = "dreamlike-art/dreamlike-photoreal-2.0"
|
| 47 |
+
>>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric")
|
| 48 |
+
>>> pipe = pipe.to("cuda")
|
| 49 |
+
>>> prompt = "a giant standing in a fantasy landscape best quality"
|
| 50 |
+
>>> liked = [] # list of images for positive feedback
|
| 51 |
+
>>> disliked = [] # list of images for negative feedback
|
| 52 |
+
>>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0]
|
| 53 |
+
```
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class FabricCrossAttnProcessor:
|
| 58 |
+
def __init__(self):
|
| 59 |
+
self.attntion_probs = None
|
| 60 |
+
|
| 61 |
+
def __call__(
|
| 62 |
+
self,
|
| 63 |
+
attn,
|
| 64 |
+
hidden_states,
|
| 65 |
+
encoder_hidden_states=None,
|
| 66 |
+
attention_mask=None,
|
| 67 |
+
weights=None,
|
| 68 |
+
lora_scale=1.0,
|
| 69 |
+
):
|
| 70 |
+
batch_size, sequence_length, _ = (
|
| 71 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 72 |
+
)
|
| 73 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 74 |
+
|
| 75 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 76 |
+
query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states)
|
| 77 |
+
else:
|
| 78 |
+
query = attn.to_q(hidden_states)
|
| 79 |
+
|
| 80 |
+
if encoder_hidden_states is None:
|
| 81 |
+
encoder_hidden_states = hidden_states
|
| 82 |
+
elif attn.norm_cross:
|
| 83 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 84 |
+
|
| 85 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 86 |
+
key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states)
|
| 87 |
+
value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states)
|
| 88 |
+
else:
|
| 89 |
+
key = attn.to_k(encoder_hidden_states)
|
| 90 |
+
value = attn.to_v(encoder_hidden_states)
|
| 91 |
+
|
| 92 |
+
query = attn.head_to_batch_dim(query)
|
| 93 |
+
key = attn.head_to_batch_dim(key)
|
| 94 |
+
value = attn.head_to_batch_dim(value)
|
| 95 |
+
|
| 96 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 97 |
+
|
| 98 |
+
if weights is not None:
|
| 99 |
+
if weights.shape[0] != 1:
|
| 100 |
+
weights = weights.repeat_interleave(attn.heads, dim=0)
|
| 101 |
+
attention_probs = attention_probs * weights[:, None]
|
| 102 |
+
attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True)
|
| 103 |
+
|
| 104 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 105 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 106 |
+
|
| 107 |
+
# linear proj
|
| 108 |
+
if isinstance(attn.processor, LoRAAttnProcessor):
|
| 109 |
+
hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states)
|
| 110 |
+
else:
|
| 111 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 112 |
+
# dropout
|
| 113 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 114 |
+
|
| 115 |
+
return hidden_states
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class FabricPipeline(DiffusionPipeline):
|
| 119 |
+
r"""
|
| 120 |
+
Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images.
|
| 121 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
| 122 |
+
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
vae ([`AutoencoderKL`]):
|
| 126 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 127 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 128 |
+
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
| 129 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 130 |
+
A `CLIPTokenizer` to tokenize text.
|
| 131 |
+
unet ([`UNet2DConditionModel`]):
|
| 132 |
+
A `UNet2DConditionModel` to denoise the encoded image latents.
|
| 133 |
+
scheduler ([`EulerAncestralDiscreteScheduler`]):
|
| 134 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
| 135 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 136 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 137 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 138 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 139 |
+
about a model's potential harms.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __init__(
|
| 143 |
+
self,
|
| 144 |
+
vae: AutoencoderKL,
|
| 145 |
+
text_encoder: CLIPTextModel,
|
| 146 |
+
tokenizer: CLIPTokenizer,
|
| 147 |
+
unet: UNet2DConditionModel,
|
| 148 |
+
scheduler: KarrasDiffusionSchedulers,
|
| 149 |
+
requires_safety_checker: bool = True,
|
| 150 |
+
):
|
| 151 |
+
super().__init__()
|
| 152 |
+
|
| 153 |
+
is_unet_version_less_0_9_0 = (
|
| 154 |
+
unet is not None
|
| 155 |
+
and hasattr(unet.config, "_diffusers_version")
|
| 156 |
+
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
|
| 157 |
+
)
|
| 158 |
+
is_unet_sample_size_less_64 = (
|
| 159 |
+
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 160 |
+
)
|
| 161 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 162 |
+
deprecation_message = (
|
| 163 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 164 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 165 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 166 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 167 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 168 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 169 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 170 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 171 |
+
" the `unet/config.json` file"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 175 |
+
new_config = dict(unet.config)
|
| 176 |
+
new_config["sample_size"] = 64
|
| 177 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 178 |
+
|
| 179 |
+
self.register_modules(
|
| 180 |
+
unet=unet,
|
| 181 |
+
vae=vae,
|
| 182 |
+
text_encoder=text_encoder,
|
| 183 |
+
tokenizer=tokenizer,
|
| 184 |
+
scheduler=scheduler,
|
| 185 |
+
)
|
| 186 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
| 187 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 188 |
+
|
| 189 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
|
| 190 |
+
def _encode_prompt(
|
| 191 |
+
self,
|
| 192 |
+
prompt,
|
| 193 |
+
device,
|
| 194 |
+
num_images_per_prompt,
|
| 195 |
+
do_classifier_free_guidance,
|
| 196 |
+
negative_prompt=None,
|
| 197 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
| 198 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 199 |
+
lora_scale: Optional[float] = None,
|
| 200 |
+
):
|
| 201 |
+
r"""
|
| 202 |
+
Encodes the prompt into text encoder hidden states.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 206 |
+
prompt to be encoded
|
| 207 |
+
device: (`torch.device`):
|
| 208 |
+
torch device
|
| 209 |
+
num_images_per_prompt (`int`):
|
| 210 |
+
number of images that should be generated per prompt
|
| 211 |
+
do_classifier_free_guidance (`bool`):
|
| 212 |
+
whether to use classifier free guidance or not
|
| 213 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 214 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 215 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 216 |
+
less than `1`).
|
| 217 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
| 218 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 219 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 220 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 221 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 222 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 223 |
+
argument.
|
| 224 |
+
lora_scale (`float`, *optional*):
|
| 225 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 226 |
+
"""
|
| 227 |
+
# set lora scale so that monkey patched LoRA
|
| 228 |
+
# function of text encoder can correctly access it
|
| 229 |
+
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
|
| 230 |
+
self._lora_scale = lora_scale
|
| 231 |
+
|
| 232 |
+
if prompt is not None and isinstance(prompt, str):
|
| 233 |
+
batch_size = 1
|
| 234 |
+
elif prompt is not None and isinstance(prompt, list):
|
| 235 |
+
batch_size = len(prompt)
|
| 236 |
+
else:
|
| 237 |
+
batch_size = prompt_embeds.shape[0]
|
| 238 |
+
|
| 239 |
+
if prompt_embeds is None:
|
| 240 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 241 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 242 |
+
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 243 |
+
|
| 244 |
+
text_inputs = self.tokenizer(
|
| 245 |
+
prompt,
|
| 246 |
+
padding="max_length",
|
| 247 |
+
max_length=self.tokenizer.model_max_length,
|
| 248 |
+
truncation=True,
|
| 249 |
+
return_tensors="pt",
|
| 250 |
+
)
|
| 251 |
+
text_input_ids = text_inputs.input_ids
|
| 252 |
+
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
| 253 |
+
|
| 254 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 255 |
+
text_input_ids, untruncated_ids
|
| 256 |
+
):
|
| 257 |
+
removed_text = self.tokenizer.batch_decode(
|
| 258 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 259 |
+
)
|
| 260 |
+
logger.warning(
|
| 261 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 262 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 266 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 267 |
+
else:
|
| 268 |
+
attention_mask = None
|
| 269 |
+
|
| 270 |
+
prompt_embeds = self.text_encoder(
|
| 271 |
+
text_input_ids.to(device),
|
| 272 |
+
attention_mask=attention_mask,
|
| 273 |
+
)
|
| 274 |
+
prompt_embeds = prompt_embeds[0]
|
| 275 |
+
|
| 276 |
+
if self.text_encoder is not None:
|
| 277 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 278 |
+
elif self.unet is not None:
|
| 279 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 280 |
+
else:
|
| 281 |
+
prompt_embeds_dtype = prompt_embeds.dtype
|
| 282 |
+
|
| 283 |
+
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 284 |
+
|
| 285 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
| 286 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
| 287 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 288 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
| 289 |
+
|
| 290 |
+
# get unconditional embeddings for classifier free guidance
|
| 291 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
| 292 |
+
uncond_tokens: List[str]
|
| 293 |
+
if negative_prompt is None:
|
| 294 |
+
uncond_tokens = [""] * batch_size
|
| 295 |
+
elif prompt is not None and type(prompt) is not type(negative_prompt):
|
| 296 |
+
raise TypeError(
|
| 297 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
| 298 |
+
f" {type(prompt)}."
|
| 299 |
+
)
|
| 300 |
+
elif isinstance(negative_prompt, str):
|
| 301 |
+
uncond_tokens = [negative_prompt]
|
| 302 |
+
elif batch_size != len(negative_prompt):
|
| 303 |
+
raise ValueError(
|
| 304 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
| 305 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 306 |
+
" the batch size of `prompt`."
|
| 307 |
+
)
|
| 308 |
+
else:
|
| 309 |
+
uncond_tokens = negative_prompt
|
| 310 |
+
|
| 311 |
+
# textual inversion: process multi-vector tokens if necessary
|
| 312 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 313 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 314 |
+
|
| 315 |
+
max_length = prompt_embeds.shape[1]
|
| 316 |
+
uncond_input = self.tokenizer(
|
| 317 |
+
uncond_tokens,
|
| 318 |
+
padding="max_length",
|
| 319 |
+
max_length=max_length,
|
| 320 |
+
truncation=True,
|
| 321 |
+
return_tensors="pt",
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 325 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 326 |
+
else:
|
| 327 |
+
attention_mask = None
|
| 328 |
+
|
| 329 |
+
negative_prompt_embeds = self.text_encoder(
|
| 330 |
+
uncond_input.input_ids.to(device),
|
| 331 |
+
attention_mask=attention_mask,
|
| 332 |
+
)
|
| 333 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 334 |
+
|
| 335 |
+
if do_classifier_free_guidance:
|
| 336 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 337 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 338 |
+
|
| 339 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 340 |
+
|
| 341 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 342 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 343 |
+
|
| 344 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 345 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 346 |
+
# to avoid doing two forward passes
|
| 347 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
| 348 |
+
|
| 349 |
+
return prompt_embeds
|
| 350 |
+
|
| 351 |
+
def get_unet_hidden_states(self, z_all, t, prompt_embd):
|
| 352 |
+
cached_hidden_states = []
|
| 353 |
+
for module in self.unet.modules():
|
| 354 |
+
if isinstance(module, BasicTransformerBlock):
|
| 355 |
+
|
| 356 |
+
def new_forward(self, hidden_states, *args, **kwargs):
|
| 357 |
+
cached_hidden_states.append(hidden_states.clone().detach().cpu())
|
| 358 |
+
return self.old_forward(hidden_states, *args, **kwargs)
|
| 359 |
+
|
| 360 |
+
module.attn1.old_forward = module.attn1.forward
|
| 361 |
+
module.attn1.forward = new_forward.__get__(module.attn1)
|
| 362 |
+
|
| 363 |
+
# run forward pass to cache hidden states, output can be discarded
|
| 364 |
+
_ = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 365 |
+
|
| 366 |
+
# restore original forward pass
|
| 367 |
+
for module in self.unet.modules():
|
| 368 |
+
if isinstance(module, BasicTransformerBlock):
|
| 369 |
+
module.attn1.forward = module.attn1.old_forward
|
| 370 |
+
del module.attn1.old_forward
|
| 371 |
+
|
| 372 |
+
return cached_hidden_states
|
| 373 |
+
|
| 374 |
+
def unet_forward_with_cached_hidden_states(
|
| 375 |
+
self,
|
| 376 |
+
z_all,
|
| 377 |
+
t,
|
| 378 |
+
prompt_embd,
|
| 379 |
+
cached_pos_hiddens: Optional[List[torch.Tensor]] = None,
|
| 380 |
+
cached_neg_hiddens: Optional[List[torch.Tensor]] = None,
|
| 381 |
+
pos_weights=(0.8, 0.8),
|
| 382 |
+
neg_weights=(0.5, 0.5),
|
| 383 |
+
):
|
| 384 |
+
if cached_pos_hiddens is None and cached_neg_hiddens is None:
|
| 385 |
+
return self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 386 |
+
|
| 387 |
+
local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
|
| 388 |
+
local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
|
| 389 |
+
for block, pos_weight, neg_weight in zip(
|
| 390 |
+
self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks,
|
| 391 |
+
local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1],
|
| 392 |
+
local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1],
|
| 393 |
+
):
|
| 394 |
+
for module in block.modules():
|
| 395 |
+
if isinstance(module, BasicTransformerBlock):
|
| 396 |
+
|
| 397 |
+
def new_forward(
|
| 398 |
+
self,
|
| 399 |
+
hidden_states,
|
| 400 |
+
pos_weight=pos_weight,
|
| 401 |
+
neg_weight=neg_weight,
|
| 402 |
+
**kwargs,
|
| 403 |
+
):
|
| 404 |
+
cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0)
|
| 405 |
+
batch_size, d_model = cond_hiddens.shape[:2]
|
| 406 |
+
device, dtype = hidden_states.device, hidden_states.dtype
|
| 407 |
+
|
| 408 |
+
weights = torch.ones(batch_size, d_model, device=device, dtype=dtype)
|
| 409 |
+
out_pos = self.old_forward(hidden_states)
|
| 410 |
+
out_neg = self.old_forward(hidden_states)
|
| 411 |
+
|
| 412 |
+
if cached_pos_hiddens is not None:
|
| 413 |
+
cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device)
|
| 414 |
+
cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1)
|
| 415 |
+
pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model)
|
| 416 |
+
pos_weights[:, d_model:] = pos_weight
|
| 417 |
+
attn_with_weights = FabricCrossAttnProcessor()
|
| 418 |
+
out_pos = attn_with_weights(
|
| 419 |
+
self,
|
| 420 |
+
cond_hiddens,
|
| 421 |
+
encoder_hidden_states=cond_pos_hs,
|
| 422 |
+
weights=pos_weights,
|
| 423 |
+
)
|
| 424 |
+
else:
|
| 425 |
+
out_pos = self.old_forward(cond_hiddens)
|
| 426 |
+
|
| 427 |
+
if cached_neg_hiddens is not None:
|
| 428 |
+
cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device)
|
| 429 |
+
uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1)
|
| 430 |
+
neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model)
|
| 431 |
+
neg_weights[:, d_model:] = neg_weight
|
| 432 |
+
attn_with_weights = FabricCrossAttnProcessor()
|
| 433 |
+
out_neg = attn_with_weights(
|
| 434 |
+
self,
|
| 435 |
+
uncond_hiddens,
|
| 436 |
+
encoder_hidden_states=uncond_neg_hs,
|
| 437 |
+
weights=neg_weights,
|
| 438 |
+
)
|
| 439 |
+
else:
|
| 440 |
+
out_neg = self.old_forward(uncond_hiddens)
|
| 441 |
+
|
| 442 |
+
out = torch.cat([out_pos, out_neg], dim=0)
|
| 443 |
+
return out
|
| 444 |
+
|
| 445 |
+
module.attn1.old_forward = module.attn1.forward
|
| 446 |
+
module.attn1.forward = new_forward.__get__(module.attn1)
|
| 447 |
+
|
| 448 |
+
out = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
|
| 449 |
+
|
| 450 |
+
# restore original forward pass
|
| 451 |
+
for module in self.unet.modules():
|
| 452 |
+
if isinstance(module, BasicTransformerBlock):
|
| 453 |
+
module.attn1.forward = module.attn1.old_forward
|
| 454 |
+
del module.attn1.old_forward
|
| 455 |
+
|
| 456 |
+
return out
|
| 457 |
+
|
| 458 |
+
def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor:
|
| 459 |
+
images_t = [self.image_to_tensor(img, dim, dtype) for img in images]
|
| 460 |
+
images_t = torch.stack(images_t).to(device)
|
| 461 |
+
latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator)
|
| 462 |
+
|
| 463 |
+
return torch.cat([latents], dim=0)
|
| 464 |
+
|
| 465 |
+
def check_inputs(
|
| 466 |
+
self,
|
| 467 |
+
prompt,
|
| 468 |
+
negative_prompt=None,
|
| 469 |
+
liked=None,
|
| 470 |
+
disliked=None,
|
| 471 |
+
height=None,
|
| 472 |
+
width=None,
|
| 473 |
+
):
|
| 474 |
+
if prompt is None:
|
| 475 |
+
raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.")
|
| 476 |
+
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
| 477 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 478 |
+
|
| 479 |
+
if negative_prompt is not None and (
|
| 480 |
+
not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
|
| 481 |
+
):
|
| 482 |
+
raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
|
| 483 |
+
|
| 484 |
+
if liked is not None and not isinstance(liked, list):
|
| 485 |
+
raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}")
|
| 486 |
+
|
| 487 |
+
if disliked is not None and not isinstance(disliked, list):
|
| 488 |
+
raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}")
|
| 489 |
+
|
| 490 |
+
if height is not None and not isinstance(height, int):
|
| 491 |
+
raise ValueError(f"`height` has to be of type `int` but is {type(height)}")
|
| 492 |
+
|
| 493 |
+
if width is not None and not isinstance(width, int):
|
| 494 |
+
raise ValueError(f"`width` has to be of type `int` but is {type(width)}")
|
| 495 |
+
|
| 496 |
+
@torch.no_grad()
|
| 497 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 498 |
+
def __call__(
|
| 499 |
+
self,
|
| 500 |
+
prompt: Optional[Union[str, List[str]]] = "",
|
| 501 |
+
negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality",
|
| 502 |
+
liked: Optional[Union[List[str], List[Image.Image]]] = [],
|
| 503 |
+
disliked: Optional[Union[List[str], List[Image.Image]]] = [],
|
| 504 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 505 |
+
height: int = 512,
|
| 506 |
+
width: int = 512,
|
| 507 |
+
return_dict: bool = True,
|
| 508 |
+
num_images: int = 4,
|
| 509 |
+
guidance_scale: float = 7.0,
|
| 510 |
+
num_inference_steps: int = 20,
|
| 511 |
+
output_type: Optional[str] = "pil",
|
| 512 |
+
feedback_start_ratio: float = 0.33,
|
| 513 |
+
feedback_end_ratio: float = 0.66,
|
| 514 |
+
min_weight: float = 0.05,
|
| 515 |
+
max_weight: float = 0.8,
|
| 516 |
+
neg_scale: float = 0.5,
|
| 517 |
+
pos_bottleneck_scale: float = 1.0,
|
| 518 |
+
neg_bottleneck_scale: float = 1.0,
|
| 519 |
+
latents: Optional[torch.Tensor] = None,
|
| 520 |
+
):
|
| 521 |
+
r"""
|
| 522 |
+
The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The
|
| 523 |
+
feedback can be given as a list of liked and disliked images.
|
| 524 |
+
|
| 525 |
+
Args:
|
| 526 |
+
prompt (`str` or `List[str]`, *optional*):
|
| 527 |
+
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`
|
| 528 |
+
instead.
|
| 529 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 530 |
+
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
|
| 531 |
+
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
|
| 532 |
+
liked (`List[Image.Image]` or `List[str]`, *optional*):
|
| 533 |
+
Encourages images with liked features.
|
| 534 |
+
disliked (`List[Image.Image]` or `List[str]`, *optional*):
|
| 535 |
+
Discourages images with disliked features.
|
| 536 |
+
generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*):
|
| 537 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to
|
| 538 |
+
make generation deterministic.
|
| 539 |
+
height (`int`, *optional*, defaults to 512):
|
| 540 |
+
Height of the generated image.
|
| 541 |
+
width (`int`, *optional*, defaults to 512):
|
| 542 |
+
Width of the generated image.
|
| 543 |
+
num_images (`int`, *optional*, defaults to 4):
|
| 544 |
+
The number of images to generate per prompt.
|
| 545 |
+
guidance_scale (`float`, *optional*, defaults to 7.0):
|
| 546 |
+
A higher guidance scale value encourages the model to generate images closely linked to the text
|
| 547 |
+
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
|
| 548 |
+
num_inference_steps (`int`, *optional*, defaults to 20):
|
| 549 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 550 |
+
expense of slower inference.
|
| 551 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 552 |
+
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
| 553 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 554 |
+
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
| 555 |
+
plain tuple.
|
| 556 |
+
feedback_start_ratio (`float`, *optional*, defaults to `.33`):
|
| 557 |
+
Start point for providing feedback (between 0 and 1).
|
| 558 |
+
feedback_end_ratio (`float`, *optional*, defaults to `.66`):
|
| 559 |
+
End point for providing feedback (between 0 and 1).
|
| 560 |
+
min_weight (`float`, *optional*, defaults to `.05`):
|
| 561 |
+
Minimum weight for feedback.
|
| 562 |
+
max_weight (`float`, *optional*, defaults tp `1.0`):
|
| 563 |
+
Maximum weight for feedback.
|
| 564 |
+
neg_scale (`float`, *optional*, defaults to `.5`):
|
| 565 |
+
Scale factor for negative feedback.
|
| 566 |
+
|
| 567 |
+
Examples:
|
| 568 |
+
|
| 569 |
+
Returns:
|
| 570 |
+
[`~pipelines.fabric.FabricPipelineOutput`] or `tuple`:
|
| 571 |
+
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
|
| 572 |
+
otherwise a `tuple` is returned where the first element is a list with the generated images and the
|
| 573 |
+
second element is a list of `bool`s indicating whether the corresponding generated image contains
|
| 574 |
+
"not-safe-for-work" (nsfw) content.
|
| 575 |
+
|
| 576 |
+
"""
|
| 577 |
+
|
| 578 |
+
self.check_inputs(prompt, negative_prompt, liked, disliked)
|
| 579 |
+
|
| 580 |
+
device = self._execution_device
|
| 581 |
+
dtype = self.unet.dtype
|
| 582 |
+
|
| 583 |
+
if isinstance(prompt, str) and prompt is not None:
|
| 584 |
+
batch_size = 1
|
| 585 |
+
elif isinstance(prompt, list) and prompt is not None:
|
| 586 |
+
batch_size = len(prompt)
|
| 587 |
+
else:
|
| 588 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
| 589 |
+
|
| 590 |
+
if isinstance(negative_prompt, str):
|
| 591 |
+
negative_prompt = negative_prompt
|
| 592 |
+
elif isinstance(negative_prompt, list):
|
| 593 |
+
negative_prompt = negative_prompt
|
| 594 |
+
else:
|
| 595 |
+
assert len(negative_prompt) == batch_size
|
| 596 |
+
|
| 597 |
+
shape = (
|
| 598 |
+
batch_size * num_images,
|
| 599 |
+
self.unet.config.in_channels,
|
| 600 |
+
height // self.vae_scale_factor,
|
| 601 |
+
width // self.vae_scale_factor,
|
| 602 |
+
)
|
| 603 |
+
latent_noise = randn_tensor(
|
| 604 |
+
shape,
|
| 605 |
+
device=device,
|
| 606 |
+
dtype=dtype,
|
| 607 |
+
generator=generator,
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
positive_latents = (
|
| 611 |
+
self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator)
|
| 612 |
+
if liked and len(liked) > 0
|
| 613 |
+
else torch.tensor(
|
| 614 |
+
[],
|
| 615 |
+
device=device,
|
| 616 |
+
dtype=dtype,
|
| 617 |
+
)
|
| 618 |
+
)
|
| 619 |
+
negative_latents = (
|
| 620 |
+
self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator)
|
| 621 |
+
if disliked and len(disliked) > 0
|
| 622 |
+
else torch.tensor(
|
| 623 |
+
[],
|
| 624 |
+
device=device,
|
| 625 |
+
dtype=dtype,
|
| 626 |
+
)
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
do_classifier_free_guidance = guidance_scale > 0.1
|
| 630 |
+
|
| 631 |
+
(prompt_neg_embs, prompt_pos_embs) = self._encode_prompt(
|
| 632 |
+
prompt,
|
| 633 |
+
device,
|
| 634 |
+
num_images,
|
| 635 |
+
do_classifier_free_guidance,
|
| 636 |
+
negative_prompt,
|
| 637 |
+
).split([num_images * batch_size, num_images * batch_size])
|
| 638 |
+
|
| 639 |
+
batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0)
|
| 640 |
+
|
| 641 |
+
null_tokens = self.tokenizer(
|
| 642 |
+
[""],
|
| 643 |
+
return_tensors="pt",
|
| 644 |
+
max_length=self.tokenizer.model_max_length,
|
| 645 |
+
padding="max_length",
|
| 646 |
+
truncation=True,
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 650 |
+
attention_mask = null_tokens.attention_mask.to(device)
|
| 651 |
+
else:
|
| 652 |
+
attention_mask = None
|
| 653 |
+
|
| 654 |
+
null_prompt_emb = self.text_encoder(
|
| 655 |
+
input_ids=null_tokens.input_ids.to(device),
|
| 656 |
+
attention_mask=attention_mask,
|
| 657 |
+
).last_hidden_state
|
| 658 |
+
|
| 659 |
+
null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype)
|
| 660 |
+
|
| 661 |
+
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 662 |
+
timesteps = self.scheduler.timesteps
|
| 663 |
+
latent_noise = latent_noise * self.scheduler.init_noise_sigma
|
| 664 |
+
|
| 665 |
+
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
| 666 |
+
|
| 667 |
+
ref_start_idx = round(len(timesteps) * feedback_start_ratio)
|
| 668 |
+
ref_end_idx = round(len(timesteps) * feedback_end_ratio)
|
| 669 |
+
|
| 670 |
+
with self.progress_bar(total=num_inference_steps) as pbar:
|
| 671 |
+
for i, t in enumerate(timesteps):
|
| 672 |
+
sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0
|
| 673 |
+
if hasattr(self.scheduler, "sigmas"):
|
| 674 |
+
sigma = self.scheduler.sigmas[i]
|
| 675 |
+
|
| 676 |
+
alpha_hat = 1 / (sigma**2 + 1)
|
| 677 |
+
|
| 678 |
+
z_single = self.scheduler.scale_model_input(latent_noise, t)
|
| 679 |
+
z_all = torch.cat([z_single] * 2, dim=0)
|
| 680 |
+
z_ref = torch.cat([positive_latents, negative_latents], dim=0)
|
| 681 |
+
|
| 682 |
+
if i >= ref_start_idx and i <= ref_end_idx:
|
| 683 |
+
weight_factor = max_weight
|
| 684 |
+
else:
|
| 685 |
+
weight_factor = min_weight
|
| 686 |
+
|
| 687 |
+
pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale)
|
| 688 |
+
neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale)
|
| 689 |
+
|
| 690 |
+
if z_ref.size(0) > 0 and weight_factor > 0:
|
| 691 |
+
noise = torch.randn_like(z_ref)
|
| 692 |
+
if isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
|
| 693 |
+
z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype)
|
| 694 |
+
else:
|
| 695 |
+
z_ref_noised = self.scheduler.add_noise(z_ref, noise, t)
|
| 696 |
+
|
| 697 |
+
ref_prompt_embd = torch.cat(
|
| 698 |
+
[null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0
|
| 699 |
+
)
|
| 700 |
+
cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd)
|
| 701 |
+
|
| 702 |
+
n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0]
|
| 703 |
+
cached_pos_hs, cached_neg_hs = [], []
|
| 704 |
+
for hs in cached_hidden_states:
|
| 705 |
+
cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0)
|
| 706 |
+
cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1)
|
| 707 |
+
cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1)
|
| 708 |
+
cached_pos_hs.append(cached_pos)
|
| 709 |
+
cached_neg_hs.append(cached_neg)
|
| 710 |
+
|
| 711 |
+
if n_pos == 0:
|
| 712 |
+
cached_pos_hs = None
|
| 713 |
+
if n_neg == 0:
|
| 714 |
+
cached_neg_hs = None
|
| 715 |
+
else:
|
| 716 |
+
cached_pos_hs, cached_neg_hs = None, None
|
| 717 |
+
unet_out = self.unet_forward_with_cached_hidden_states(
|
| 718 |
+
z_all,
|
| 719 |
+
t,
|
| 720 |
+
prompt_embd=batched_prompt_embd,
|
| 721 |
+
cached_pos_hiddens=cached_pos_hs,
|
| 722 |
+
cached_neg_hiddens=cached_neg_hs,
|
| 723 |
+
pos_weights=pos_ws,
|
| 724 |
+
neg_weights=neg_ws,
|
| 725 |
+
)[0]
|
| 726 |
+
|
| 727 |
+
noise_cond, noise_uncond = unet_out.chunk(2)
|
| 728 |
+
guidance = noise_cond - noise_uncond
|
| 729 |
+
noise_pred = noise_uncond + guidance_scale * guidance
|
| 730 |
+
latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0]
|
| 731 |
+
|
| 732 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 733 |
+
pbar.update()
|
| 734 |
+
|
| 735 |
+
y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 736 |
+
imgs = self.image_processor.postprocess(
|
| 737 |
+
y,
|
| 738 |
+
output_type=output_type,
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
if not return_dict:
|
| 742 |
+
return imgs
|
| 743 |
+
|
| 744 |
+
return StableDiffusionPipelineOutput(imgs, False)
|
| 745 |
+
|
| 746 |
+
def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype):
|
| 747 |
+
"""
|
| 748 |
+
Convert latent PIL image to a torch tensor for further processing.
|
| 749 |
+
"""
|
| 750 |
+
if isinstance(image, str):
|
| 751 |
+
image = Image.open(image)
|
| 752 |
+
if not image.mode == "RGB":
|
| 753 |
+
image = image.convert("RGB")
|
| 754 |
+
image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0]
|
| 755 |
+
return image.type(dtype)
|
v0.36.0/pipeline_faithdiff_stable_diffusion_xl.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|