Upload folder using huggingface_hub
Browse files
main/README.md
CHANGED
|
@@ -5488,7 +5488,7 @@ Editing at Scale", many thanks to their contribution!
|
|
| 5488 |
|
| 5489 |
This implementation of Flux Kontext allows users to pass multiple reference images. Each image is encoded separately, and the resulting latent vectors are concatenated.
|
| 5490 |
|
| 5491 |
-
As explained in Section 3 of [the paper](https://
|
| 5492 |
|
| 5493 |
## Example Usage
|
| 5494 |
|
|
|
|
| 5488 |
|
| 5489 |
This implementation of Flux Kontext allows users to pass multiple reference images. Each image is encoded separately, and the resulting latent vectors are concatenated.
|
| 5490 |
|
| 5491 |
+
As explained in Section 3 of [the paper](https://huggingface.co/papers/2506.15742), the model's sequence concatenation mechanism can extend its capabilities to handle multiple reference images. However, note that the current version of Flux Kontext was not trained for this use case. In practice, stacking along the first axis does not yield correct results, while stacking along the other two axes appears to work.
|
| 5492 |
|
| 5493 |
## Example Usage
|
| 5494 |
|
main/regional_prompting_stable_diffusion.py
CHANGED
|
@@ -490,7 +490,7 @@ class RegionalPromptingStableDiffusionPipeline(
|
|
| 490 |
def prepare_extra_step_kwargs(self, generator, eta):
|
| 491 |
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 492 |
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 493 |
-
# eta corresponds to η in DDIM paper: https://
|
| 494 |
# and should be between [0, 1]
|
| 495 |
|
| 496 |
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
|
@@ -841,7 +841,7 @@ class RegionalPromptingStableDiffusionPipeline(
|
|
| 841 |
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 842 |
The number of images to generate per prompt.
|
| 843 |
eta (`float`, *optional*, defaults to 0.0):
|
| 844 |
-
Corresponds to parameter eta (η) from the [DDIM](https://
|
| 845 |
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 846 |
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 847 |
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
|
@@ -872,7 +872,7 @@ class RegionalPromptingStableDiffusionPipeline(
|
|
| 872 |
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 873 |
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 874 |
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 875 |
-
Flawed](https://
|
| 876 |
using zero terminal SNR.
|
| 877 |
clip_skip (`int`, *optional*):
|
| 878 |
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
|
@@ -1062,7 +1062,7 @@ class RegionalPromptingStableDiffusionPipeline(
|
|
| 1062 |
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1063 |
|
| 1064 |
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1065 |
-
# Based on 3.4. in https://
|
| 1066 |
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1067 |
|
| 1068 |
# compute the previous noisy sample x_t -> x_t-1
|
|
@@ -1668,7 +1668,7 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
|
| 1668 |
r"""
|
| 1669 |
Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
|
| 1670 |
Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
|
| 1671 |
-
Flawed](https://
|
| 1672 |
|
| 1673 |
Args:
|
| 1674 |
noise_cfg (`torch.Tensor`):
|
|
|
|
| 490 |
def prepare_extra_step_kwargs(self, generator, eta):
|
| 491 |
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 492 |
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 493 |
+
# eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
|
| 494 |
# and should be between [0, 1]
|
| 495 |
|
| 496 |
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
|
|
|
| 841 |
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 842 |
The number of images to generate per prompt.
|
| 843 |
eta (`float`, *optional*, defaults to 0.0):
|
| 844 |
+
Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
|
| 845 |
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 846 |
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 847 |
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
|
|
|
| 872 |
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 873 |
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 874 |
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 875 |
+
Flawed](https://huggingface.co/papers/2305.08891). Guidance rescale factor should fix overexposure when
|
| 876 |
using zero terminal SNR.
|
| 877 |
clip_skip (`int`, *optional*):
|
| 878 |
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
|
|
|
| 1062 |
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1063 |
|
| 1064 |
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
|
| 1065 |
+
# Based on 3.4. in https://huggingface.co/papers/2305.08891
|
| 1066 |
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
|
| 1067 |
|
| 1068 |
# compute the previous noisy sample x_t -> x_t-1
|
|
|
|
| 1668 |
r"""
|
| 1669 |
Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
|
| 1670 |
Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
|
| 1671 |
+
Flawed](https://huggingface.co/papers/2305.08891).
|
| 1672 |
|
| 1673 |
Args:
|
| 1674 |
noise_cfg (`torch.Tensor`):
|