diff --git a/README.md b/README.md
index bdbf7e6858e3..140e2ff49b9c 100644
--- a/README.md
+++ b/README.md
@@ -20,21 +20,11 @@ limitations under the License.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
### This fork designed for use with Skquark's [**Stable Diffusion Deluxe** Web App](https://colab.research.google.com/github/Skquark/AI-Friends/blob/main/Stable_Diffusion_Deluxe.ipynb) and [**Enhanced Stable Diffusion**](https://colab.research.google.com/github/Skquark/structured-prompt-generator/blob/main/Enhanced_Stable_Diffusion_with_diffusers.ipynb) Colab Notebooks. Check out [DiffusionDeluxe.com](https://diffusiondeluxe.com) to see what features are integrated into the app's easy UI, created by Alan Bedian.
diff --git a/docker/diffusers-doc-builder/Dockerfile b/docker/diffusers-doc-builder/Dockerfile
index cd8fa66983c5..c9fc62707cb0 100644
--- a/docker/diffusers-doc-builder/Dockerfile
+++ b/docker/diffusers-doc-builder/Dockerfile
@@ -42,7 +42,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers \
diff --git a/docker/diffusers-flax-cpu/Dockerfile b/docker/diffusers-flax-cpu/Dockerfile
index 0f944e9c0843..86a49171d290 100644
--- a/docker/diffusers-flax-cpu/Dockerfile
+++ b/docker/diffusers-flax-cpu/Dockerfile
@@ -40,7 +40,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers
diff --git a/docker/diffusers-flax-tpu/Dockerfile b/docker/diffusers-flax-tpu/Dockerfile
index c4af36608bac..b40cd55a1c16 100644
--- a/docker/diffusers-flax-tpu/Dockerfile
+++ b/docker/diffusers-flax-tpu/Dockerfile
@@ -41,8 +41,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
hf-doc-builder \
huggingface-hub \
Jinja2 \
- librosa \
- numpy \
+ librosa \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers
diff --git a/docker/diffusers-onnxruntime-cpu/Dockerfile b/docker/diffusers-onnxruntime-cpu/Dockerfile
index a14aedc0dc91..a5a6e98605cb 100644
--- a/docker/diffusers-onnxruntime-cpu/Dockerfile
+++ b/docker/diffusers-onnxruntime-cpu/Dockerfile
@@ -40,7 +40,7 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers
diff --git a/docker/diffusers-onnxruntime-cuda/Dockerfile b/docker/diffusers-onnxruntime-cuda/Dockerfile
index c12f96748d18..20192175538e 100644
--- a/docker/diffusers-onnxruntime-cuda/Dockerfile
+++ b/docker/diffusers-onnxruntime-cuda/Dockerfile
@@ -40,7 +40,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers
diff --git a/docker/diffusers-pytorch-compile-cuda/Dockerfile b/docker/diffusers-pytorch-compile-cuda/Dockerfile
index fbee3389edc2..eac7e5fb14fb 100644
--- a/docker/diffusers-pytorch-compile-cuda/Dockerfile
+++ b/docker/diffusers-pytorch-compile-cuda/Dockerfile
@@ -39,7 +39,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers
diff --git a/docker/diffusers-pytorch-cpu/Dockerfile b/docker/diffusers-pytorch-cpu/Dockerfile
index af48ac415ec6..e2986e0dd166 100644
--- a/docker/diffusers-pytorch-cpu/Dockerfile
+++ b/docker/diffusers-pytorch-cpu/Dockerfile
@@ -40,7 +40,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers matplotlib
diff --git a/docker/diffusers-pytorch-cuda/Dockerfile b/docker/diffusers-pytorch-cuda/Dockerfile
index 88805f2140d7..f672b7536e29 100644
--- a/docker/diffusers-pytorch-cuda/Dockerfile
+++ b/docker/diffusers-pytorch-cuda/Dockerfile
@@ -39,7 +39,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers \
diff --git a/docker/diffusers-pytorch-xformers-cuda/Dockerfile b/docker/diffusers-pytorch-xformers-cuda/Dockerfile
index 503b01a66019..998e1a5fd2ff 100644
--- a/docker/diffusers-pytorch-xformers-cuda/Dockerfile
+++ b/docker/diffusers-pytorch-xformers-cuda/Dockerfile
@@ -39,7 +39,7 @@ RUN python3.10 -m pip install --no-cache-dir --upgrade pip uv==0.1.11 && \
huggingface-hub \
Jinja2 \
librosa \
- numpy \
+ numpy==1.26.4 \
scipy \
tensorboard \
transformers \
diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml
index 02f063b6016e..c306e1eb99e7 100644
--- a/docs/source/en/_toctree.yml
+++ b/docs/source/en/_toctree.yml
@@ -253,6 +253,8 @@
title: PriorTransformer
- local: api/models/controlnet
title: ControlNetModel
+ - local: api/models/controlnet_sd3
+ title: SD3ControlNetModel
title: Models
- isExpanded: false
sections:
@@ -276,6 +278,8 @@
title: Consistency Models
- local: api/pipelines/controlnet
title: ControlNet
+ - local: api/pipelines/controlnet_sd3
+ title: ControlNet with Stable Diffusion 3
- local: api/pipelines/controlnet_sdxl
title: ControlNet with Stable Diffusion XL
- local: api/pipelines/controlnetxs
diff --git a/docs/source/en/api/loaders/single_file.md b/docs/source/en/api/loaders/single_file.md
index 80b494ceb2e7..0af0ce6488d4 100644
--- a/docs/source/en/api/loaders/single_file.md
+++ b/docs/source/en/api/loaders/single_file.md
@@ -35,6 +35,7 @@ The [`~loaders.FromSingleFileMixin.from_single_file`] method allows you to load:
- [`StableDiffusionXLInstructPix2PixPipeline`]
- [`StableDiffusionXLControlNetPipeline`]
- [`StableDiffusionXLKDiffusionPipeline`]
+- [`StableDiffusion3Pipeline`]
- [`LatentConsistencyModelPipeline`]
- [`LatentConsistencyModelImg2ImgPipeline`]
- [`StableDiffusionControlNetXSPipeline`]
@@ -49,6 +50,7 @@ The [`~loaders.FromSingleFileMixin.from_single_file`] method allows you to load:
- [`StableCascadeUNet`]
- [`AutoencoderKL`]
- [`ControlNetModel`]
+- [`SD3Transformer2DModel`]
## FromSingleFileMixin
diff --git a/docs/source/en/api/models/controlnet_sd3.md b/docs/source/en/api/models/controlnet_sd3.md
new file mode 100644
index 000000000000..59db64546fa2
--- /dev/null
+++ b/docs/source/en/api/models/controlnet_sd3.md
@@ -0,0 +1,42 @@
+
+
+# SD3ControlNetModel
+
+SD3ControlNetModel is an implementation of ControlNet for Stable Diffusion 3.
+
+The ControlNet model was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, Maneesh Agrawala. It provides a greater degree of control over text-to-image generation by conditioning the model on additional inputs such as edge maps, depth maps, segmentation maps, and keypoints for pose detection.
+
+The abstract from the paper is:
+
+*We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.*
+
+## Loading from the original format
+
+By default the [`SD3ControlNetModel`] should be loaded with [`~ModelMixin.from_pretrained`].
+
+```py
+from diffusers import StableDiffusion3ControlNetPipeline
+from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
+
+controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny")
+pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
+```
+
+## SD3ControlNetModel
+
+[[autodoc]] SD3ControlNetModel
+
+## SD3ControlNetOutput
+
+[[autodoc]] models.controlnet_sd3.SD3ControlNetOutput
+
diff --git a/docs/source/en/api/pipelines/controlnet_sd3.md b/docs/source/en/api/pipelines/controlnet_sd3.md
new file mode 100644
index 000000000000..31dd21f1dd36
--- /dev/null
+++ b/docs/source/en/api/pipelines/controlnet_sd3.md
@@ -0,0 +1,39 @@
+
+
+# ControlNet with Stable Diffusion 3
+
+StableDiffusion3ControlNetPipeline is an implementation of ControlNet for Stable Diffusion 3.
+
+ControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://huggingface.co/papers/2302.05543) by Lvmin Zhang, Anyi Rao, and Maneesh Agrawala.
+
+With a ControlNet model, you can provide an additional control image to condition and control Stable Diffusion generation. For example, if you provide a depth map, the ControlNet model generates an image that'll preserve the spatial information from the depth map. It is a more flexible and accurate way to control the image generation process.
+
+The abstract from the paper is:
+
+*We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with "zero convolutions" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, eg, edges, depth, segmentation, human pose, etc, with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.*
+
+This code is implemented by [The InstantX Team](https://huggingface.co/InstantX). You can find pre-trained checkpoints for SD3-ControlNet on [The InstantX Team](https://huggingface.co/InstantX) Hub profile.
+
+
+
+Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines.
+
+
+
+## StableDiffusion3ControlNetPipeline
+[[autodoc]] StableDiffusion3ControlNetPipeline
+ - all
+ - __call__
+
+## StableDiffusion3PipelineOutput
+[[autodoc]] pipelines.stable_diffusion_3.pipeline_output.StableDiffusion3PipelineOutput
diff --git a/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_3.md b/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_3.md
index 4c38ba85356c..309a25332698 100644
--- a/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_3.md
+++ b/docs/source/en/api/pipelines/stable_diffusion/stable_diffusion_3.md
@@ -21,9 +21,9 @@ The abstract from the paper is:
## Usage Example
-_As the model is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), fill in the form and accept the gate. Once you are in, you need to login so that your system knows you’ve accepted the gate._
+_As the model is gated, before using it with diffusers you first need to go to the [Stable Diffusion 3 Medium Hugging Face page](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers), fill in the form and accept the gate. Once you are in, you need to login so that your system knows you’ve accepted the gate._
-Use the command below to log in:
+Use the command below to log in:
```bash
huggingface-cli login
@@ -197,6 +197,27 @@ image.save("sd3_hello_world.png")
Check out the full script [here](https://gist.github.com/sayakpaul/508d89d7aad4f454900813da5d42ca97).
+## Tiny AutoEncoder for Stable Diffusion 3
+
+Tiny AutoEncoder for Stable Diffusion (TAESD3) is a tiny distilled version of Stable Diffusion 3's VAE by [Ollin Boer Bohan](https://github.com/madebyollin/taesd) that can decode [`StableDiffusion3Pipeline`] latents almost instantly.
+
+To use with Stable Diffusion 3:
+
+```python
+import torch
+from diffusers import StableDiffusion3Pipeline, AutoencoderTiny
+
+pipe = StableDiffusion3Pipeline.from_pretrained(
+ "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16
+)
+pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd3", torch_dtype=torch.float16)
+pipe = pipe.to("cuda")
+
+prompt = "slice of delicious New York-style berry cheesecake"
+image = pipe(prompt, num_inference_steps=25).images[0]
+image.save("cheesecake.png")
+```
+
## Loading the original checkpoints via `from_single_file`
The `SD3Transformer2DModel` and `StableDiffusion3Pipeline` classes support loading the original checkpoints via the `from_single_file` method. This method allows you to load the original checkpoint files that were used to train the models.
@@ -211,17 +232,38 @@ model = SD3Transformer2DModel.from_single_file("https://huggingface.co/stability
## Loading the single checkpoint for the `StableDiffusion3Pipeline`
+### Loading the single file checkpoint without T5
+
```python
+import torch
from diffusers import StableDiffusion3Pipeline
-from transformers import T5EncoderModel
-text_encoder_3 = T5EncoderModel.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", subfolder="text_encoder_3", torch_dtype=torch.float16)
-pipe = StableDiffusion3Pipeline.from_single_file("https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium_incl_clips.safetensors", torch_dtype=torch.float16, text_encoder_3=text_encoder_3)
+pipe = StableDiffusion3Pipeline.from_single_file(
+ "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium_incl_clips.safetensors",
+ torch_dtype=torch.float16,
+ text_encoder_3=None
+)
+pipe.enable_model_cpu_offload()
+
+image = pipe("a picture of a cat holding a sign that says hello world").images[0]
+image.save('sd3-single-file.png')
```
-
-`from_single_file` support for the `fp8` version of the checkpoints is coming soon. Watch this space.
-
+### Loading the single file checkpoint with T5
+
+```python
+import torch
+from diffusers import StableDiffusion3Pipeline
+
+pipe = StableDiffusion3Pipeline.from_single_file(
+ "https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/sd3_medium_incl_clips_t5xxlfp8.safetensors",
+ torch_dtype=torch.float16,
+)
+pipe.enable_model_cpu_offload()
+
+image = pipe("a picture of a cat holding a sign that says hello world").images[0]
+image.save('sd3-single-file-t5-fp8.png')
+```
## StableDiffusion3Pipeline
diff --git a/docs/source/en/training/controlnet.md b/docs/source/en/training/controlnet.md
index 99343a142e82..89b333524d45 100644
--- a/docs/source/en/training/controlnet.md
+++ b/docs/source/en/training/controlnet.md
@@ -349,7 +349,7 @@ control_image = load_image("./conditioning_image_1.png")
prompt = "pale golden rod circle with old lace background"
generator = torch.manual_seed(0)
-image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
+image = pipeline(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
image.save("./output.png")
```
@@ -363,4 +363,4 @@ The SDXL training script is discussed in more detail in the [SDXL training](sdxl
Congratulations on training your own ControlNet! To learn more about how to use your new model, the following guides may be helpful:
-- Learn how to [use a ControlNet](../using-diffusers/controlnet) for inference on a variety of tasks.
\ No newline at end of file
+- Learn how to [use a ControlNet](../using-diffusers/controlnet) for inference on a variety of tasks.
diff --git a/docs/source/en/training/text2image.md b/docs/source/en/training/text2image.md
index f69e9a710e8f..c8abb189a91f 100644
--- a/docs/source/en/training/text2image.md
+++ b/docs/source/en/training/text2image.md
@@ -181,7 +181,7 @@ accelerate launch --mixed_precision="fp16" train_text_to_image.py \
--max_train_steps=15000 \
--learning_rate=1e-05 \
--max_grad_norm=1 \
- --enable_xformers_memory_efficient_attention
+ --enable_xformers_memory_efficient_attention \
--lr_scheduler="constant" --lr_warmup_steps=0 \
--output_dir="sd-naruto-model" \
--push_to_hub
diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py
index 72287c9f6537..1cab12ac5df2 100644
--- a/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py
+++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py
@@ -71,7 +71,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py
index 32e882acb56e..ca311128e043 100644
--- a/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py
+++ b/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py
@@ -78,7 +78,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/community/marigold_depth_estimation.py b/examples/community/marigold_depth_estimation.py
index 0ed7107d6b5c..dc8b253dbf6f 100644
--- a/examples/community/marigold_depth_estimation.py
+++ b/examples/community/marigold_depth_estimation.py
@@ -43,7 +43,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
class MarigoldDepthOutput(BaseOutput):
diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py b/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py
index faf1ee5b5a1f..bedecf7a2eae 100644
--- a/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py
+++ b/examples/consistency_distillation/train_lcm_distill_lora_sd_wds.py
@@ -73,7 +73,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py b/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
index 7754a8e08b87..19a1f5abaaa2 100644
--- a/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
+++ b/examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
@@ -66,7 +66,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py b/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py
index e72de4b7877d..293ceaff12d6 100644
--- a/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py
+++ b/examples/consistency_distillation/train_lcm_distill_lora_sdxl_wds.py
@@ -79,7 +79,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/consistency_distillation/train_lcm_distill_sd_wds.py b/examples/consistency_distillation/train_lcm_distill_sd_wds.py
index e80bb5571f6a..0e457ff41327 100644
--- a/examples/consistency_distillation/train_lcm_distill_sd_wds.py
+++ b/examples/consistency_distillation/train_lcm_distill_sd_wds.py
@@ -72,7 +72,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py b/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py
index 96b323ff92f7..58545c49a897 100644
--- a/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py
+++ b/examples/consistency_distillation/train_lcm_distill_sdxl_wds.py
@@ -78,7 +78,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/controlnet/train_controlnet.py b/examples/controlnet/train_controlnet.py
index 61d97a4f1d55..b4efe0bd574a 100644
--- a/examples/controlnet/train_controlnet.py
+++ b/examples/controlnet/train_controlnet.py
@@ -60,7 +60,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/controlnet/train_controlnet_flax.py b/examples/controlnet/train_controlnet_flax.py
index 85f7b57fbe47..4cc386ecaeb0 100644
--- a/examples/controlnet/train_controlnet_flax.py
+++ b/examples/controlnet/train_controlnet_flax.py
@@ -60,7 +60,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = logging.getLogger(__name__)
diff --git a/examples/controlnet/train_controlnet_sdxl.py b/examples/controlnet/train_controlnet_sdxl.py
index e09747fe5677..c05a70507af3 100644
--- a/examples/controlnet/train_controlnet_sdxl.py
+++ b/examples/controlnet/train_controlnet_sdxl.py
@@ -61,7 +61,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
if is_torch_npu_available():
diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py
index 7b8d9c7dbb10..936246ab5649 100644
--- a/examples/custom_diffusion/train_custom_diffusion.py
+++ b/examples/custom_diffusion/train_custom_diffusion.py
@@ -63,7 +63,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/dreambooth/README_sd3.md b/examples/dreambooth/README_sd3.md
index bdeb089ac526..5139b118aa43 100644
--- a/examples/dreambooth/README_sd3.md
+++ b/examples/dreambooth/README_sd3.md
@@ -106,6 +106,9 @@ To better track our training experiments, we're using the following flags in the
* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
+> [!NOTE]
+> If you want to train using long prompts with the T5 text encoder, you can use `--max_sequence_length` to set the token limit. The default is 77, but it can be increased to as high as 512. Note that this will use more resources and may slow down the training in some cases.
+
> [!TIP]
> You can pass `--use_8bit_adam` to reduce the memory requirements of training. Make sure to install `bitsandbytes` if you want to do so.
diff --git a/examples/dreambooth/requirements_sd3.txt b/examples/dreambooth/requirements_sd3.txt
index 84e418376b48..1c1ad1f3a738 100644
--- a/examples/dreambooth/requirements_sd3.txt
+++ b/examples/dreambooth/requirements_sd3.txt
@@ -4,4 +4,5 @@ transformers>=4.41.2
ftfy
tensorboard
Jinja2
-peft== 0.11.1
\ No newline at end of file
+peft==0.11.1
+sentencepiece
\ No newline at end of file
diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py
index ef4b7455cc7f..57fd49f57ce7 100644
--- a/examples/dreambooth/train_dreambooth.py
+++ b/examples/dreambooth/train_dreambooth.py
@@ -63,7 +63,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/dreambooth/train_dreambooth_flax.py b/examples/dreambooth/train_dreambooth_flax.py
index ff09ff20cd95..df5fa741ed0b 100644
--- a/examples/dreambooth/train_dreambooth_flax.py
+++ b/examples/dreambooth/train_dreambooth_flax.py
@@ -35,7 +35,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
# Cache compiled models across invocations of this script.
cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache"))
diff --git a/examples/dreambooth/train_dreambooth_lora.py b/examples/dreambooth/train_dreambooth_lora.py
index 25b720b654b6..c8af49ac0373 100644
--- a/examples/dreambooth/train_dreambooth_lora.py
+++ b/examples/dreambooth/train_dreambooth_lora.py
@@ -70,7 +70,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/dreambooth/train_dreambooth_lora_sd3.py b/examples/dreambooth/train_dreambooth_lora_sd3.py
index 5e8bc7bab818..74c745739975 100644
--- a/examples/dreambooth/train_dreambooth_lora_sd3.py
+++ b/examples/dreambooth/train_dreambooth_lora_sd3.py
@@ -53,7 +53,11 @@
StableDiffusion3Pipeline,
)
from diffusers.optimization import get_scheduler
-from diffusers.training_utils import cast_training_params
+from diffusers.training_utils import (
+ cast_training_params,
+ compute_density_for_timestep_sampling,
+ compute_loss_weighting_for_sd3,
+)
from diffusers.utils import (
check_min_version,
convert_unet_state_dict_to_peft,
@@ -67,7 +71,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.28.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
@@ -298,6 +302,12 @@ def parse_args(input_args=None):
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
+ parser.add_argument(
+ "--max_sequence_length",
+ type=int,
+ default=77,
+ help="Maximum sequence length to use with with the T5 text encoder",
+ )
parser.add_argument(
"--validation_prompt",
type=str,
@@ -467,11 +477,20 @@ def parse_args(input_args=None):
),
)
parser.add_argument(
- "--weighting_scheme", type=str, default="sigma_sqrt", choices=["sigma_sqrt", "logit_normal", "mode"]
+ "--weighting_scheme", type=str, default="sigma_sqrt", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"]
+ )
+ parser.add_argument(
+ "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme."
+ )
+ parser.add_argument(
+ "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme."
+ )
+ parser.add_argument(
+ "--mode_scale",
+ type=float,
+ default=1.29,
+ help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
)
- parser.add_argument("--logit_mean", type=float, default=0.0)
- parser.add_argument("--logit_std", type=float, default=1.0)
- parser.add_argument("--mode_scale", type=float, default=1.29)
parser.add_argument(
"--optimizer",
type=str,
@@ -830,6 +849,7 @@ def tokenize_prompt(tokenizer, prompt):
def _encode_prompt_with_t5(
text_encoder,
tokenizer,
+ max_sequence_length,
prompt=None,
num_images_per_prompt=1,
device=None,
@@ -840,7 +860,7 @@ def _encode_prompt_with_t5(
text_inputs = tokenizer(
prompt,
padding="max_length",
- max_length=77,
+ max_length=max_sequence_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
@@ -897,6 +917,7 @@ def encode_prompt(
text_encoders,
tokenizers,
prompt: str,
+ max_sequence_length,
device=None,
num_images_per_prompt: int = 1,
):
@@ -924,6 +945,7 @@ def encode_prompt(
t5_prompt_embed = _encode_prompt_with_t5(
text_encoders[-1],
tokenizers[-1],
+ max_sequence_length,
prompt=prompt,
num_images_per_prompt=num_images_per_prompt,
device=device if device is not None else text_encoders[-1].device,
@@ -1297,7 +1319,9 @@ def load_model_hook(models, input_dir):
def compute_text_embeddings(prompt, text_encoders, tokenizers):
with torch.no_grad():
- prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
+ text_encoders, tokenizers, prompt, args.max_sequence_length
+ )
prompt_embeds = prompt_embeds.to(accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
return prompt_embeds, pooled_prompt_embeds
@@ -1316,6 +1340,9 @@ def compute_text_embeddings(prompt, text_encoders, tokenizers):
# Clear the memory here
if not train_dataset.custom_instance_prompts:
del tokenizers, text_encoders
+ # Explicitly delete the objects as well, otherwise only the lists are deleted and the original references remain, preventing garbage collection
+ del tokenizer_one, tokenizer_two, tokenizer_three
+ del text_encoder_one, text_encoder_two, text_encoder_three
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
@@ -1462,7 +1489,15 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
bsz = model_input.shape[0]
# Sample a random timestep for each image
- indices = torch.randint(0, noise_scheduler_copy.config.num_train_timesteps, (bsz,))
+ # for weighting schemes where we sample timesteps non-uniformly
+ u = compute_density_for_timestep_sampling(
+ weighting_scheme=args.weighting_scheme,
+ batch_size=bsz,
+ logit_mean=args.logit_mean,
+ logit_std=args.logit_std,
+ mode_scale=args.mode_scale,
+ )
+ indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device)
# Add noise according to flow matching.
@@ -1482,20 +1517,11 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
# Preconditioning of the model outputs.
model_pred = model_pred * (-sigmas) + noisy_model_input
- # TODO (kashif, sayakpaul): weighting sceme needs to be experimented with :)
- if args.weighting_scheme == "sigma_sqrt":
- weighting = (sigmas**-2.0).float()
- elif args.weighting_scheme == "logit_normal":
- # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$).
- u = torch.normal(mean=args.logit_mean, std=args.logit_std, size=(bsz,), device=accelerator.device)
- weighting = torch.nn.functional.sigmoid(u)
- elif args.weighting_scheme == "mode":
- # See sec 3.1 in the SD3 paper (20).
- u = torch.rand(size=(bsz,), device=accelerator.device)
- weighting = 1 - u - args.mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u)
-
- # simplified flow matching aka 0-rectified flow matching loss
- # target = model_input - noise
+ # these weighting schemes use a uniform timestep sampling
+ # and instead post-weight the loss
+ weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
+
+ # flow matching loss
target = model_input
if args.with_prior_preservation:
diff --git a/examples/dreambooth/train_dreambooth_lora_sdxl.py b/examples/dreambooth/train_dreambooth_lora_sdxl.py
index a6ffb9fa42ae..0c0358406889 100644
--- a/examples/dreambooth/train_dreambooth_lora_sdxl.py
+++ b/examples/dreambooth/train_dreambooth_lora_sdxl.py
@@ -78,7 +78,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
@@ -1289,8 +1289,8 @@ def load_model_hook(models, input_dir):
models = [unet_]
if args.train_text_encoder:
models.extend([text_encoder_one_, text_encoder_two_])
- # only upcast trainable parameters (LoRA) into fp32
- cast_training_params(models)
+ # only upcast trainable parameters (LoRA) into fp32
+ cast_training_params(models)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
diff --git a/examples/dreambooth/train_dreambooth_sd3.py b/examples/dreambooth/train_dreambooth_sd3.py
index adcea652db74..e516f49eb876 100644
--- a/examples/dreambooth/train_dreambooth_sd3.py
+++ b/examples/dreambooth/train_dreambooth_sd3.py
@@ -51,6 +51,7 @@
StableDiffusion3Pipeline,
)
from diffusers.optimization import get_scheduler
+from diffusers.training_utils import compute_density_for_timestep_sampling, compute_loss_weighting_for_sd3
from diffusers.utils import (
check_min_version,
is_wandb_available,
@@ -63,7 +64,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.28.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
@@ -297,6 +298,12 @@ def parse_args(input_args=None):
default=None,
help="The prompt to specify images in the same class as provided instance images.",
)
+ parser.add_argument(
+ "--max_sequence_length",
+ type=int,
+ default=77,
+ help="Maximum sequence length to use with with the T5 text encoder",
+ )
parser.add_argument(
"--validation_prompt",
type=str,
@@ -465,11 +472,20 @@ def parse_args(input_args=None):
),
)
parser.add_argument(
- "--weighting_scheme", type=str, default="sigma_sqrt", choices=["sigma_sqrt", "logit_normal", "mode"]
+ "--weighting_scheme", type=str, default="sigma_sqrt", choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"]
+ )
+ parser.add_argument(
+ "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme."
+ )
+ parser.add_argument(
+ "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme."
+ )
+ parser.add_argument(
+ "--mode_scale",
+ type=float,
+ default=1.29,
+ help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
)
- parser.add_argument("--logit_mean", type=float, default=0.0)
- parser.add_argument("--logit_std", type=float, default=1.0)
- parser.add_argument("--mode_scale", type=float, default=1.29)
parser.add_argument(
"--optimizer",
type=str,
@@ -828,6 +844,7 @@ def tokenize_prompt(tokenizer, prompt):
def _encode_prompt_with_t5(
text_encoder,
tokenizer,
+ max_sequence_length,
prompt=None,
num_images_per_prompt=1,
device=None,
@@ -838,7 +855,7 @@ def _encode_prompt_with_t5(
text_inputs = tokenizer(
prompt,
padding="max_length",
- max_length=77,
+ max_length=max_sequence_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
@@ -895,6 +912,7 @@ def encode_prompt(
text_encoders,
tokenizers,
prompt: str,
+ max_sequence_length,
device=None,
num_images_per_prompt: int = 1,
):
@@ -922,6 +940,7 @@ def encode_prompt(
t5_prompt_embed = _encode_prompt_with_t5(
text_encoders[-1],
tokenizers[-1],
+ max_sequence_length,
prompt=prompt,
num_images_per_prompt=num_images_per_prompt,
device=device if device is not None else text_encoders[-1].device,
@@ -1324,7 +1343,9 @@ def load_model_hook(models, input_dir):
def compute_text_embeddings(prompt, text_encoders, tokenizers):
with torch.no_grad():
- prompt_embeds, pooled_prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
+ prompt_embeds, pooled_prompt_embeds = encode_prompt(
+ text_encoders, tokenizers, prompt, args.max_sequence_length
+ )
prompt_embeds = prompt_embeds.to(accelerator.device)
pooled_prompt_embeds = pooled_prompt_embeds.to(accelerator.device)
return prompt_embeds, pooled_prompt_embeds
@@ -1347,6 +1368,9 @@ def compute_text_embeddings(prompt, text_encoders, tokenizers):
# Clear the memory here
if not args.train_text_encoder and not train_dataset.custom_instance_prompts:
del tokenizers, text_encoders
+ # Explicitly delete the objects as well, otherwise only the lists are deleted and the original references remain, preventing garbage collection
+ del tokenizer_one, tokenizer_two, tokenizer_three
+ del text_encoder_one, text_encoder_two, text_encoder_three
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
@@ -1526,7 +1550,15 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
bsz = model_input.shape[0]
# Sample a random timestep for each image
- indices = torch.randint(0, noise_scheduler_copy.config.num_train_timesteps, (bsz,))
+ # for weighting schemes where we sample timesteps non-uniformly
+ u = compute_density_for_timestep_sampling(
+ weighting_scheme=args.weighting_scheme,
+ batch_size=bsz,
+ logit_mean=args.logit_mean,
+ logit_std=args.logit_std,
+ mode_scale=args.mode_scale,
+ )
+ indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device)
# Add noise according to flow matching.
@@ -1560,21 +1592,11 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
# Follow: Section 5 of https://arxiv.org/abs/2206.00364.
# Preconditioning of the model outputs.
model_pred = model_pred * (-sigmas) + noisy_model_input
+ # these weighting schemes use a uniform timestep sampling
+ # and instead post-weight the loss
+ weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
- # TODO (kashif, sayakpaul): weighting sceme needs to be experimented with :)
- if args.weighting_scheme == "sigma_sqrt":
- weighting = (sigmas**-2.0).float()
- elif args.weighting_scheme == "logit_normal":
- # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$).
- u = torch.normal(mean=args.logit_mean, std=args.logit_std, size=(bsz,), device=accelerator.device)
- weighting = torch.nn.functional.sigmoid(u)
- elif args.weighting_scheme == "mode":
- # See sec 3.1 in the SD3 paper (20).
- u = torch.rand(size=(bsz,), device=accelerator.device)
- weighting = 1 - u - args.mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u)
-
- # simplified flow matching aka 0-rectified flow matching loss
- # target = model_input - noise
+ # flow matching loss
target = model_input
if args.with_prior_preservation:
diff --git a/examples/instruct_pix2pix/train_instruct_pix2pix.py b/examples/instruct_pix2pix/train_instruct_pix2pix.py
index 7517b7c5983d..050cb858acda 100644
--- a/examples/instruct_pix2pix/train_instruct_pix2pix.py
+++ b/examples/instruct_pix2pix/train_instruct_pix2pix.py
@@ -57,7 +57,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py b/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py
index 16043ac2701e..bb6fa1b284c3 100644
--- a/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py
+++ b/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py
@@ -60,7 +60,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py
index 109e1a1c984a..fbcbef4d93ba 100644
--- a/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py
+++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py
@@ -52,7 +52,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py
index af8327ec1b85..ca9879d3bc74 100644
--- a/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py
+++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py
@@ -46,7 +46,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_prior.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_prior.py
index f14fa627ab9e..fa91b5833a58 100644
--- a/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_prior.py
+++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_prior.py
@@ -46,7 +46,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py b/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py
index 377eb81e8212..60ac2b471e43 100644
--- a/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py
+++ b/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py
@@ -51,7 +51,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/research_projects/scheduled_huber_loss_training/dreambooth/train_dreambooth_lora_sdxl.py b/examples/research_projects/scheduled_huber_loss_training/dreambooth/train_dreambooth_lora_sdxl.py
index e8c9cb796ab1..00f95509be5a 100644
--- a/examples/research_projects/scheduled_huber_loss_training/dreambooth/train_dreambooth_lora_sdxl.py
+++ b/examples/research_projects/scheduled_huber_loss_training/dreambooth/train_dreambooth_lora_sdxl.py
@@ -1363,8 +1363,8 @@ def load_model_hook(models, input_dir):
models = [unet_]
if args.train_text_encoder:
models.extend([text_encoder_one_, text_encoder_two_])
- # only upcast trainable parameters (LoRA) into fp32
- cast_training_params(models)
+ # only upcast trainable parameters (LoRA) into fp32
+ cast_training_params(models)
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
diff --git a/examples/t2i_adapter/train_t2i_adapter_sdxl.py b/examples/t2i_adapter/train_t2i_adapter_sdxl.py
index d99c368910a0..2a2302bc9764 100644
--- a/examples/t2i_adapter/train_t2i_adapter_sdxl.py
+++ b/examples/t2i_adapter/train_t2i_adapter_sdxl.py
@@ -60,7 +60,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py
index bf2557e35f0c..1d36fd8cc79a 100644
--- a/examples/text_to_image/train_text_to_image.py
+++ b/examples/text_to_image/train_text_to_image.py
@@ -57,7 +57,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/text_to_image/train_text_to_image_flax.py b/examples/text_to_image/train_text_to_image_flax.py
index 474cca3595e3..6060902b76e9 100644
--- a/examples/text_to_image/train_text_to_image_flax.py
+++ b/examples/text_to_image/train_text_to_image_flax.py
@@ -49,7 +49,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = logging.getLogger(__name__)
diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py
index 16ac971c08a0..bd93f3e37c7f 100644
--- a/examples/text_to_image/train_text_to_image_lora.py
+++ b/examples/text_to_image/train_text_to_image_lora.py
@@ -56,7 +56,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/text_to_image/train_text_to_image_lora_sdxl.py b/examples/text_to_image/train_text_to_image_lora_sdxl.py
index 3d266811ffe8..af7eeb805292 100644
--- a/examples/text_to_image/train_text_to_image_lora_sdxl.py
+++ b/examples/text_to_image/train_text_to_image_lora_sdxl.py
@@ -68,7 +68,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
if is_torch_npu_available():
diff --git a/examples/text_to_image/train_text_to_image_sdxl.py b/examples/text_to_image/train_text_to_image_sdxl.py
index 19abbc9ec682..73b0961ec504 100644
--- a/examples/text_to_image/train_text_to_image_sdxl.py
+++ b/examples/text_to_image/train_text_to_image_sdxl.py
@@ -55,7 +55,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
if is_torch_npu_available():
diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py
index cd19cae776ed..f91c8cf5d573 100644
--- a/examples/textual_inversion/textual_inversion.py
+++ b/examples/textual_inversion/textual_inversion.py
@@ -81,7 +81,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/textual_inversion/textual_inversion_flax.py b/examples/textual_inversion/textual_inversion_flax.py
index a62b36c4de3e..2da280b705f1 100644
--- a/examples/textual_inversion/textual_inversion_flax.py
+++ b/examples/textual_inversion/textual_inversion_flax.py
@@ -56,7 +56,7 @@
# ------------------------------------------------------------------------------
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = logging.getLogger(__name__)
diff --git a/examples/textual_inversion/textual_inversion_sdxl.py b/examples/textual_inversion/textual_inversion_sdxl.py
index f13835d928a8..5bd9165fc5ab 100644
--- a/examples/textual_inversion/textual_inversion_sdxl.py
+++ b/examples/textual_inversion/textual_inversion_sdxl.py
@@ -76,7 +76,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__)
diff --git a/examples/unconditional_image_generation/train_unconditional.py b/examples/unconditional_image_generation/train_unconditional.py
index c1f37eae21d6..06a6108b4b5b 100644
--- a/examples/unconditional_image_generation/train_unconditional.py
+++ b/examples/unconditional_image_generation/train_unconditional.py
@@ -29,7 +29,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/vqgan/train_vqgan.py b/examples/vqgan/train_vqgan.py
index e33eef777264..df823808cf3b 100644
--- a/examples/vqgan/train_vqgan.py
+++ b/examples/vqgan/train_vqgan.py
@@ -50,7 +50,7 @@
import wandb
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py b/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py
index 44fbee463b9f..1fb133d6678f 100644
--- a/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py
+++ b/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py
@@ -50,7 +50,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/examples/wuerstchen/text_to_image/train_text_to_image_prior.py b/examples/wuerstchen/text_to_image/train_text_to_image_prior.py
index 1f015b0b2005..6617bc22667f 100644
--- a/examples/wuerstchen/text_to_image/train_text_to_image_prior.py
+++ b/examples/wuerstchen/text_to_image/train_text_to_image_prior.py
@@ -51,7 +51,7 @@
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.29.0.dev0")
+check_min_version("0.30.0.dev0")
logger = get_logger(__name__, log_level="INFO")
diff --git a/setup.py b/setup.py
index fd82a2adb2c6..262aee2aa62b 100644
--- a/setup.py
+++ b/setup.py
@@ -95,7 +95,7 @@
# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
_deps = [
"Pillow", # keep the PIL.Image.Resampling deprecation away
- "accelerate>=0.29.3",
+ "accelerate>=0.31.0",
"compel==0.1.8",
"datasets",
"filelock",
@@ -132,7 +132,7 @@
"tensorboard",
"torch>=1.4",
"torchvision",
- "transformers>=4.25.1",
+ "transformers>=4.41.2",
"urllib3<=2.0.0",
"black",
]
@@ -254,7 +254,7 @@ def run(self):
setup(
name="diffusers",
- version="0.29.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
+ version="0.30.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
description="State-of-the-art diffusion in PyTorch and JAX.",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py
index 94dfa9fa5b4d..cad0ca544026 100644
--- a/src/diffusers/__init__.py
+++ b/src/diffusers/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "0.29.0.dev0"
+__version__ = "0.30.0.dev0"
from typing import TYPE_CHECKING
@@ -91,6 +91,8 @@
"MultiAdapter",
"PixArtTransformer2DModel",
"PriorTransformer",
+ "SD3ControlNetModel",
+ "SD3MultiControlNetModel",
"SD3Transformer2DModel",
"StableCascadeUNet",
"T2IAdapter",
@@ -278,6 +280,7 @@
"StableCascadeCombinedPipeline",
"StableCascadeDecoderPipeline",
"StableCascadePriorPipeline",
+ "StableDiffusion3ControlNetPipeline",
"StableDiffusion3Img2ImgPipeline",
"StableDiffusion3Pipeline",
"StableDiffusionAdapterPipeline",
@@ -501,6 +504,8 @@
MultiAdapter,
PixArtTransformer2DModel,
PriorTransformer,
+ SD3ControlNetModel,
+ SD3MultiControlNetModel,
SD3Transformer2DModel,
T2IAdapter,
T5FilmDecoder,
@@ -666,6 +671,7 @@
StableCascadeCombinedPipeline,
StableCascadeDecoderPipeline,
StableCascadePriorPipeline,
+ StableDiffusion3ControlNetPipeline,
StableDiffusion3Img2ImgPipeline,
StableDiffusion3Pipeline,
StableDiffusionAdapterPipeline,
diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py
index 9413be5e4eed..9e7bf242eca7 100644
--- a/src/diffusers/dependency_versions_table.py
+++ b/src/diffusers/dependency_versions_table.py
@@ -3,7 +3,7 @@
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
- "accelerate": "accelerate>=0.29.3",
+ "accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
@@ -40,7 +40,7 @@
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
- "transformers": "transformers>=4.25.1",
+ "transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
}
diff --git a/src/diffusers/image_processor.py b/src/diffusers/image_processor.py
index a49586e51c08..8738ff49fa0f 100644
--- a/src/diffusers/image_processor.py
+++ b/src/diffusers/image_processor.py
@@ -569,7 +569,7 @@ def preprocess(
channel = image.shape[1]
# don't need any preprocess if the image is latents
- if channel == 4:
+ if channel == self.vae_latent_channels:
return image
height, width = self.get_default_height_width(image, height, width)
@@ -585,7 +585,6 @@ def preprocess(
FutureWarning,
)
do_normalize = False
-
if do_normalize:
image = self.normalize(image)
diff --git a/src/diffusers/loaders/lora.py b/src/diffusers/loaders/lora.py
index 120a89533db6..194183896dae 100644
--- a/src/diffusers/loaders/lora.py
+++ b/src/diffusers/loaders/lora.py
@@ -462,17 +462,18 @@ def load_lora_into_text_encoder(
text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)
for name, _ in text_encoder_attn_modules(text_encoder):
- rank_key = f"{name}.out_proj.lora_B.weight"
- rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
-
- patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
- if patch_mlp:
- for name, _ in text_encoder_mlp_modules(text_encoder):
- rank_key_fc1 = f"{name}.fc1.lora_B.weight"
- rank_key_fc2 = f"{name}.fc2.lora_B.weight"
-
- rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
- rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
+ for module in ("out_proj", "q_proj", "k_proj", "v_proj"):
+ rank_key = f"{name}.{module}.lora_B.weight"
+ if rank_key not in text_encoder_lora_state_dict:
+ continue
+ rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
+
+ for name, _ in text_encoder_mlp_modules(text_encoder):
+ for module in ("fc1", "fc2"):
+ rank_key = f"{name}.{module}.lora_B.weight"
+ if rank_key not in text_encoder_lora_state_dict:
+ continue
+ rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]
if network_alphas is not None:
alpha_keys = [
diff --git a/src/diffusers/loaders/single_file.py b/src/diffusers/loaders/single_file.py
index d2b69b234ab0..d7bf67288c0a 100644
--- a/src/diffusers/loaders/single_file.py
+++ b/src/diffusers/loaders/single_file.py
@@ -28,9 +28,11 @@
_legacy_load_safety_checker,
_legacy_load_scheduler,
create_diffusers_clip_model_from_ldm,
+ create_diffusers_t5_model_from_checkpoint,
fetch_diffusers_config,
fetch_original_config,
is_clip_model_in_single_file,
+ is_t5_in_single_file,
load_single_file_checkpoint,
)
@@ -118,6 +120,16 @@ def load_single_file_sub_model(
is_legacy_loading=is_legacy_loading,
)
+ elif is_transformers_model and is_t5_in_single_file(checkpoint):
+ loaded_sub_model = create_diffusers_t5_model_from_checkpoint(
+ class_obj,
+ checkpoint=checkpoint,
+ config=cached_model_config_path,
+ subfolder=name,
+ torch_dtype=torch_dtype,
+ local_files_only=local_files_only,
+ )
+
elif is_tokenizer and is_legacy_loading:
loaded_sub_model = _legacy_load_clip_tokenizer(
class_obj, checkpoint=checkpoint, config=cached_model_config_path, local_files_only=local_files_only
diff --git a/src/diffusers/loaders/single_file_model.py b/src/diffusers/loaders/single_file_model.py
index f576ecf262cf..f537a3f44917 100644
--- a/src/diffusers/loaders/single_file_model.py
+++ b/src/diffusers/loaders/single_file_model.py
@@ -276,16 +276,18 @@ def from_single_file(cls, pretrained_model_link_or_path_or_dict: Optional[str] =
if is_accelerate_available():
unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype)
- if model._keys_to_ignore_on_load_unexpected is not None:
- for pat in model._keys_to_ignore_on_load_unexpected:
- unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
- if len(unexpected_keys) > 0:
- logger.warning(
- f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
- )
else:
- model.load_state_dict(diffusers_format_checkpoint)
+ _, unexpected_keys = model.load_state_dict(diffusers_format_checkpoint, strict=False)
+
+ if model._keys_to_ignore_on_load_unexpected is not None:
+ for pat in model._keys_to_ignore_on_load_unexpected:
+ unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
+
+ if len(unexpected_keys) > 0:
+ logger.warning(
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
+ )
if torch_dtype is not None:
model.to(torch_dtype)
diff --git a/src/diffusers/loaders/single_file_utils.py b/src/diffusers/loaders/single_file_utils.py
index d788aa11d37d..e0a660020a06 100644
--- a/src/diffusers/loaders/single_file_utils.py
+++ b/src/diffusers/loaders/single_file_utils.py
@@ -252,7 +252,6 @@
LDM_CLIP_PREFIX_TO_REMOVE = [
"cond_stage_model.transformer.",
"conditioner.embedders.0.transformer.",
- "text_encoders.clip_l.transformer.",
]
OPEN_CLIP_PREFIX = "conditioner.embedders.0.model."
LDM_OPEN_CLIP_TEXT_PROJECTION_DIM = 1024
@@ -399,11 +398,14 @@ def is_open_clip_sdxl_model(checkpoint):
def is_open_clip_sd3_model(checkpoint):
- is_open_clip_sdxl_refiner_model(checkpoint)
+ if CHECKPOINT_KEY_NAMES["open_clip_sd3"] in checkpoint:
+ return True
+
+ return False
def is_open_clip_sdxl_refiner_model(checkpoint):
- if CHECKPOINT_KEY_NAMES["open_clip_sd3"] in checkpoint:
+ if CHECKPOINT_KEY_NAMES["open_clip_sdxl_refiner"] in checkpoint:
return True
return False
@@ -1233,11 +1235,14 @@ def convert_ldm_vae_checkpoint(checkpoint, config):
return new_checkpoint
-def convert_ldm_clip_checkpoint(checkpoint):
+def convert_ldm_clip_checkpoint(checkpoint, remove_prefix=None):
keys = list(checkpoint.keys())
text_model_dict = {}
- remove_prefixes = LDM_CLIP_PREFIX_TO_REMOVE
+ remove_prefixes = []
+ remove_prefixes.extend(LDM_CLIP_PREFIX_TO_REMOVE)
+ if remove_prefix:
+ remove_prefixes.append(remove_prefix)
for key in keys:
for prefix in remove_prefixes:
@@ -1263,8 +1268,6 @@ def convert_open_clip_checkpoint(
else:
text_proj_dim = LDM_OPEN_CLIP_TEXT_PROJECTION_DIM
- text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
-
keys = list(checkpoint.keys())
keys_to_ignore = SD_2_TEXT_ENCODER_KEYS_TO_IGNORE
@@ -1313,9 +1316,6 @@ def convert_open_clip_checkpoint(
else:
text_model_dict[diffusers_key] = checkpoint.get(key)
- if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
- text_model_dict.pop("text_model.embeddings.position_ids", None)
-
return text_model_dict
@@ -1376,6 +1376,13 @@ def create_diffusers_clip_model_from_ldm(
):
diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint)
+ elif (
+ is_clip_sd3_model(checkpoint)
+ and checkpoint[CHECKPOINT_KEY_NAMES["clip_sd3"]].shape[-1] == position_embedding_dim
+ ):
+ diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_l.transformer.")
+ diffusers_format_checkpoint["text_projection.weight"] = torch.eye(position_embedding_dim)
+
elif is_open_clip_model(checkpoint):
prefix = "cond_stage_model.model."
diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix)
@@ -1391,26 +1398,28 @@ def create_diffusers_clip_model_from_ldm(
prefix = "conditioner.embedders.0.model."
diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix)
- elif is_open_clip_sd3_model(checkpoint):
- prefix = "text_encoders.clip_g.transformer."
- diffusers_format_checkpoint = convert_open_clip_checkpoint(model, checkpoint, prefix=prefix)
+ elif (
+ is_open_clip_sd3_model(checkpoint)
+ and checkpoint[CHECKPOINT_KEY_NAMES["open_clip_sd3"]].shape[-1] == position_embedding_dim
+ ):
+ diffusers_format_checkpoint = convert_ldm_clip_checkpoint(checkpoint, "text_encoders.clip_g.transformer.")
else:
raise ValueError("The provided checkpoint does not seem to contain a valid CLIP model.")
if is_accelerate_available():
unexpected_keys = load_model_dict_into_meta(model, diffusers_format_checkpoint, dtype=torch_dtype)
- if model._keys_to_ignore_on_load_unexpected is not None:
- for pat in model._keys_to_ignore_on_load_unexpected:
- unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
+ else:
+ _, unexpected_keys = model.load_state_dict(diffusers_format_checkpoint, strict=False)
- if len(unexpected_keys) > 0:
- logger.warning(
- f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
- )
+ if model._keys_to_ignore_on_load_unexpected is not None:
+ for pat in model._keys_to_ignore_on_load_unexpected:
+ unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
- else:
- model.load_state_dict(diffusers_format_checkpoint)
+ if len(unexpected_keys) > 0:
+ logger.warning(
+ f"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \n {[', '.join(unexpected_keys)]}"
+ )
if torch_dtype is not None:
model.to(torch_dtype)
@@ -1755,7 +1764,7 @@ def convert_sd3_t5_checkpoint_to_diffusers(checkpoint):
keys = list(checkpoint.keys())
text_model_dict = {}
- remove_prefixes = ["text_encoders.t5xxl.transformer.encoder."]
+ remove_prefixes = ["text_encoders.t5xxl.transformer."]
for key in keys:
for prefix in remove_prefixes:
diff --git a/src/diffusers/models/__init__.py b/src/diffusers/models/__init__.py
index b28fc537d99d..e863c8010a3d 100644
--- a/src/diffusers/models/__init__.py
+++ b/src/diffusers/models/__init__.py
@@ -33,6 +33,7 @@
_import_structure["autoencoders.consistency_decoder_vae"] = ["ConsistencyDecoderVAE"]
_import_structure["autoencoders.vq_model"] = ["VQModel"]
_import_structure["controlnet"] = ["ControlNetModel"]
+ _import_structure["controlnet_sd3"] = ["SD3ControlNetModel", "SD3MultiControlNetModel"]
_import_structure["controlnet_xs"] = ["ControlNetXSAdapter", "UNetControlNetXSModel"]
_import_structure["embeddings"] = ["ImageProjection"]
_import_structure["modeling_utils"] = ["ModelMixin"]
@@ -74,6 +75,7 @@
VQModel,
)
from .controlnet import ControlNetModel
+ from .controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel
from .controlnet_xs import ControlNetXSAdapter, UNetControlNetXSModel
from .embeddings import ImageProjection
from .modeling_utils import ModelMixin
diff --git a/src/diffusers/models/autoencoders/autoencoder_tiny.py b/src/diffusers/models/autoencoders/autoencoder_tiny.py
index 39b885b45222..6e503478fe2b 100644
--- a/src/diffusers/models/autoencoders/autoencoder_tiny.py
+++ b/src/diffusers/models/autoencoders/autoencoder_tiny.py
@@ -111,6 +111,7 @@ def __init__(
latent_shift: float = 0.5,
force_upcast: bool = False,
scaling_factor: float = 1.0,
+ shift_factor: float = 0.0,
):
super().__init__()
diff --git a/src/diffusers/models/controlnet_sd3.py b/src/diffusers/models/controlnet_sd3.py
new file mode 100644
index 000000000000..d32b662b463b
--- /dev/null
+++ b/src/diffusers/models/controlnet_sd3.py
@@ -0,0 +1,418 @@
+# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn as nn
+
+from ..configuration_utils import ConfigMixin, register_to_config
+from ..loaders import FromOriginalModelMixin, PeftAdapterMixin
+from ..models.attention import JointTransformerBlock
+from ..models.attention_processor import Attention, AttentionProcessor
+from ..models.modeling_utils import ModelMixin
+from ..utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
+from .controlnet import BaseOutput, zero_module
+from .embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
+from .transformers.transformer_2d import Transformer2DModelOutput
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+@dataclass
+class SD3ControlNetOutput(BaseOutput):
+ controlnet_block_samples: Tuple[torch.Tensor]
+
+
+class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
+ _supports_gradient_checkpointing = True
+
+ @register_to_config
+ def __init__(
+ self,
+ sample_size: int = 128,
+ patch_size: int = 2,
+ in_channels: int = 16,
+ num_layers: int = 18,
+ attention_head_dim: int = 64,
+ num_attention_heads: int = 18,
+ joint_attention_dim: int = 4096,
+ caption_projection_dim: int = 1152,
+ pooled_projection_dim: int = 2048,
+ out_channels: int = 16,
+ pos_embed_max_size: int = 96,
+ ):
+ super().__init__()
+ default_out_channels = in_channels
+ self.out_channels = out_channels if out_channels is not None else default_out_channels
+ self.inner_dim = num_attention_heads * attention_head_dim
+
+ self.pos_embed = PatchEmbed(
+ height=sample_size,
+ width=sample_size,
+ patch_size=patch_size,
+ in_channels=in_channels,
+ embed_dim=self.inner_dim,
+ pos_embed_max_size=pos_embed_max_size,
+ )
+ self.time_text_embed = CombinedTimestepTextProjEmbeddings(
+ embedding_dim=self.inner_dim, pooled_projection_dim=pooled_projection_dim
+ )
+ self.context_embedder = nn.Linear(joint_attention_dim, caption_projection_dim)
+
+ # `attention_head_dim` is doubled to account for the mixing.
+ # It needs to crafted when we get the actual checkpoints.
+ self.transformer_blocks = nn.ModuleList(
+ [
+ JointTransformerBlock(
+ dim=self.inner_dim,
+ num_attention_heads=num_attention_heads,
+ attention_head_dim=self.inner_dim,
+ context_pre_only=False,
+ )
+ for i in range(num_layers)
+ ]
+ )
+
+ # controlnet_blocks
+ self.controlnet_blocks = nn.ModuleList([])
+ for _ in range(len(self.transformer_blocks)):
+ controlnet_block = nn.Linear(self.inner_dim, self.inner_dim)
+ controlnet_block = zero_module(controlnet_block)
+ self.controlnet_blocks.append(controlnet_block)
+ pos_embed_input = PatchEmbed(
+ height=sample_size,
+ width=sample_size,
+ patch_size=patch_size,
+ in_channels=in_channels,
+ embed_dim=self.inner_dim,
+ pos_embed_type=None,
+ )
+ self.pos_embed_input = zero_module(pos_embed_input)
+
+ self.gradient_checkpointing = False
+
+ # Copied from diffusers.models.unets.unet_3d_condition.UNet3DConditionModel.enable_forward_chunking
+ def enable_forward_chunking(self, chunk_size: Optional[int] = None, dim: int = 0) -> None:
+ """
+ Sets the attention processor to use [feed forward
+ chunking](https://huggingface.co/blog/reformer#2-chunked-feed-forward-layers).
+
+ Parameters:
+ chunk_size (`int`, *optional*):
+ The chunk size of the feed-forward layers. If not specified, will run feed-forward layer individually
+ over each tensor of dim=`dim`.
+ dim (`int`, *optional*, defaults to `0`):
+ The dimension over which the feed-forward computation should be chunked. Choose between dim=0 (batch)
+ or dim=1 (sequence length).
+ """
+ if dim not in [0, 1]:
+ raise ValueError(f"Make sure to set `dim` to either 0 or 1, not {dim}")
+
+ # By default chunk size is 1
+ chunk_size = chunk_size or 1
+
+ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int):
+ if hasattr(module, "set_chunk_feed_forward"):
+ module.set_chunk_feed_forward(chunk_size=chunk_size, dim=dim)
+
+ for child in module.children():
+ fn_recursive_feed_forward(child, chunk_size, dim)
+
+ for module in self.children():
+ fn_recursive_feed_forward(module, chunk_size, dim)
+
+ @property
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
+ r"""
+ Returns:
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
+ indexed by its weight name.
+ """
+ # set recursively
+ processors = {}
+
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
+ if hasattr(module, "get_processor"):
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
+
+ for sub_name, child in module.named_children():
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
+
+ return processors
+
+ for name, module in self.named_children():
+ fn_recursive_add_processors(name, module, processors)
+
+ return processors
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
+ r"""
+ Sets the attention processor to use to compute attention.
+
+ Parameters:
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
+ for **all** `Attention` layers.
+
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
+ processor. This is strongly recommended when setting trainable attention processors.
+
+ """
+ count = len(self.attn_processors.keys())
+
+ if isinstance(processor, dict) and len(processor) != count:
+ raise ValueError(
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
+ )
+
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
+ if hasattr(module, "set_processor"):
+ if not isinstance(processor, dict):
+ module.set_processor(processor)
+ else:
+ module.set_processor(processor.pop(f"{name}.processor"))
+
+ for sub_name, child in module.named_children():
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
+
+ for name, module in self.named_children():
+ fn_recursive_attn_processor(name, module, processor)
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
+ def fuse_qkv_projections(self):
+ """
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
+ are fused. For cross-attention modules, key and value projection matrices are fused.
+
+
+
+ This API is 🧪 experimental.
+
+
+ """
+ self.original_attn_processors = None
+
+ for _, attn_processor in self.attn_processors.items():
+ if "Added" in str(attn_processor.__class__.__name__):
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
+
+ self.original_attn_processors = self.attn_processors
+
+ for module in self.modules():
+ if isinstance(module, Attention):
+ module.fuse_projections(fuse=True)
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
+ def unfuse_qkv_projections(self):
+ """Disables the fused QKV projection if enabled.
+
+
+
+ This API is 🧪 experimental.
+
+
+
+ """
+ if self.original_attn_processors is not None:
+ self.set_attn_processor(self.original_attn_processors)
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if hasattr(module, "gradient_checkpointing"):
+ module.gradient_checkpointing = value
+
+ @classmethod
+ def from_transformer(cls, transformer, num_layers=None, load_weights_from_transformer=True):
+ config = transformer.config
+ config["num_layers"] = num_layers or config.num_layers
+ controlnet = cls(**config)
+
+ if load_weights_from_transformer:
+ controlnet.pos_embed.load_state_dict(transformer.pos_embed.state_dict(), strict=False)
+ controlnet.time_text_embed.load_state_dict(transformer.time_text_embed.state_dict(), strict=False)
+ controlnet.context_embedder.load_state_dict(transformer.context_embedder.state_dict(), strict=False)
+ controlnet.transformer_blocks.load_state_dict(transformer.transformer_blocks.state_dict())
+
+ controlnet.pos_embed_input = zero_module(controlnet.pos_embed_input)
+
+ return controlnet
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ controlnet_cond: torch.Tensor,
+ conditioning_scale: float = 1.0,
+ encoder_hidden_states: torch.FloatTensor = None,
+ pooled_projections: torch.FloatTensor = None,
+ timestep: torch.LongTensor = None,
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
+ return_dict: bool = True,
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
+ """
+ The [`SD3Transformer2DModel`] forward method.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):
+ Input `hidden_states`.
+ controlnet_cond (`torch.Tensor`):
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
+ conditioning_scale (`float`, defaults to `1.0`):
+ The scale factor for ControlNet outputs.
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`):
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
+ pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected
+ from the embeddings of input conditions.
+ timestep ( `torch.LongTensor`):
+ Used to indicate denoising step.
+ joint_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
+ tuple.
+
+ Returns:
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
+ `tuple` where the first element is the sample tensor.
+ """
+ if joint_attention_kwargs is not None:
+ joint_attention_kwargs = joint_attention_kwargs.copy()
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
+ else:
+ lora_scale = 1.0
+
+ if USE_PEFT_BACKEND:
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
+ scale_lora_layers(self, lora_scale)
+ else:
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
+ logger.warning(
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
+ )
+
+ height, width = hidden_states.shape[-2:]
+
+ hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too.
+ temb = self.time_text_embed(timestep, pooled_projections)
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
+
+ # add
+ hidden_states = hidden_states + self.pos_embed_input(controlnet_cond)
+
+ block_res_samples = ()
+
+ for block in self.transformer_blocks:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module, return_dict=None):
+ def custom_forward(*inputs):
+ if return_dict is not None:
+ return module(*inputs, return_dict=return_dict)
+ else:
+ return module(*inputs)
+
+ return custom_forward
+
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ hidden_states,
+ encoder_hidden_states,
+ temb,
+ **ckpt_kwargs,
+ )
+
+ else:
+ encoder_hidden_states, hidden_states = block(
+ hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
+ )
+
+ block_res_samples = block_res_samples + (hidden_states,)
+
+ controlnet_block_res_samples = ()
+ for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
+ block_res_sample = controlnet_block(block_res_sample)
+ controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
+
+ # 6. scaling
+ controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
+
+ if USE_PEFT_BACKEND:
+ # remove `lora_scale` from each PEFT layer
+ unscale_lora_layers(self, lora_scale)
+
+ if not return_dict:
+ return (controlnet_block_res_samples,)
+
+ return SD3ControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
+
+
+class SD3MultiControlNetModel(ModelMixin):
+ r"""
+ `SD3ControlNetModel` wrapper class for Multi-SD3ControlNet
+
+ This module is a wrapper for multiple instances of the `SD3ControlNetModel`. The `forward()` API is designed to be
+ compatible with `SD3ControlNetModel`.
+
+ Args:
+ controlnets (`List[SD3ControlNetModel]`):
+ Provides additional conditioning to the unet during the denoising process. You must set multiple
+ `SD3ControlNetModel` as a list.
+ """
+
+ def __init__(self, controlnets):
+ super().__init__()
+ self.nets = nn.ModuleList(controlnets)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ controlnet_cond: List[torch.tensor],
+ conditioning_scale: List[float],
+ pooled_projections: torch.FloatTensor,
+ encoder_hidden_states: torch.FloatTensor = None,
+ timestep: torch.LongTensor = None,
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
+ return_dict: bool = True,
+ ) -> Union[SD3ControlNetOutput, Tuple]:
+ for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
+ block_samples = controlnet(
+ hidden_states=hidden_states,
+ timestep=timestep,
+ encoder_hidden_states=encoder_hidden_states,
+ pooled_projections=pooled_projections,
+ controlnet_cond=image,
+ conditioning_scale=scale,
+ joint_attention_kwargs=joint_attention_kwargs,
+ return_dict=return_dict,
+ )
+
+ # merge samples
+ if i == 0:
+ control_block_samples = block_samples
+ else:
+ control_block_samples = [
+ control_block_sample + block_sample
+ for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
+ ]
+ control_block_samples = (tuple(control_block_samples),)
+
+ return control_block_samples
diff --git a/src/diffusers/models/controlnet_xs.py b/src/diffusers/models/controlnet_xs.py
index c2a52a351532..a060dc1bbe1a 100644
--- a/src/diffusers/models/controlnet_xs.py
+++ b/src/diffusers/models/controlnet_xs.py
@@ -114,6 +114,7 @@ def get_down_block_adapter(
cross_attention_dim: Optional[int] = 1024,
add_downsample: bool = True,
upcast_attention: Optional[bool] = False,
+ use_linear_projection: Optional[bool] = True,
):
num_layers = 2 # only support sd + sdxl
@@ -152,7 +153,7 @@ def get_down_block_adapter(
in_channels=ctrl_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=max_norm_num_groups),
)
@@ -200,6 +201,7 @@ def get_mid_block_adapter(
num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
upcast_attention: bool = False,
+ use_linear_projection: bool = True,
):
# Before the midblock application, information is concatted from base to control.
# Concat doesn't require change in number of channels
@@ -214,7 +216,7 @@ def get_mid_block_adapter(
resnet_groups=find_largest_factor(gcd(ctrl_channels, ctrl_channels + base_channels), max_norm_num_groups),
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
@@ -308,6 +310,7 @@ def __init__(
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
upcast_attention: bool = True,
max_norm_num_groups: int = 32,
+ use_linear_projection: bool = True,
):
super().__init__()
@@ -381,6 +384,7 @@ def __init__(
cross_attention_dim=cross_attention_dim[i],
add_downsample=not is_final_block,
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
)
@@ -393,6 +397,7 @@ def __init__(
num_attention_heads=num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
# up
@@ -489,6 +494,7 @@ def from_unet(
transformer_layers_per_block=unet.config.transformer_layers_per_block,
upcast_attention=unet.config.upcast_attention,
max_norm_num_groups=unet.config.norm_num_groups,
+ use_linear_projection=unet.config.use_linear_projection,
)
# ensure that the ControlNetXSAdapter is the same dtype as the UNet2DConditionModel
@@ -538,6 +544,7 @@ def __init__(
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
upcast_attention: bool = True,
+ use_linear_projection: bool = True,
time_cond_proj_dim: Optional[int] = None,
projection_class_embeddings_input_dim: Optional[int] = None,
# additional controlnet configs
@@ -595,7 +602,12 @@ def __init__(
time_embed_dim,
cond_proj_dim=time_cond_proj_dim,
)
- self.ctrl_time_embedding = TimestepEmbedding(in_channels=time_embed_input_dim, time_embed_dim=time_embed_dim)
+ if ctrl_learn_time_embedding:
+ self.ctrl_time_embedding = TimestepEmbedding(
+ in_channels=time_embed_input_dim, time_embed_dim=time_embed_dim
+ )
+ else:
+ self.ctrl_time_embedding = None
if addition_embed_type is None:
self.base_add_time_proj = None
@@ -632,6 +644,7 @@ def __init__(
cross_attention_dim=cross_attention_dim[i],
add_downsample=not is_final_block,
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
)
@@ -647,6 +660,7 @@ def __init__(
ctrl_num_attention_heads=ctrl_num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
# # Create up blocks
@@ -690,6 +704,7 @@ def __init__(
add_upsample=not is_final_block,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
+ use_linear_projection=use_linear_projection,
)
)
@@ -754,6 +769,7 @@ def from_unet(
"addition_embed_type",
"addition_time_embed_dim",
"upcast_attention",
+ "use_linear_projection",
"time_cond_proj_dim",
"projection_class_embeddings_input_dim",
]
@@ -1219,6 +1235,7 @@ def __init__(
cross_attention_dim: Optional[int] = 1024,
add_downsample: bool = True,
upcast_attention: Optional[bool] = False,
+ use_linear_projection: Optional[bool] = True,
):
super().__init__()
base_resnets = []
@@ -1270,7 +1287,7 @@ def __init__(
in_channels=base_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
)
@@ -1282,7 +1299,7 @@ def __init__(
in_channels=ctrl_out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=find_largest_factor(ctrl_out_channels, max_factor=ctrl_max_norm_num_groups),
)
@@ -1342,6 +1359,7 @@ def get_first_cross_attention(block):
ctrl_num_attention_heads = get_first_cross_attention(ctrl_downblock).heads
cross_attention_dim = get_first_cross_attention(base_downblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_downblock).upcast_attention
+ use_linear_projection = base_downblock.attentions[0].use_linear_projection
else:
has_crossattn = False
transformer_layers_per_block = None
@@ -1349,6 +1367,7 @@ def get_first_cross_attention(block):
ctrl_num_attention_heads = None
cross_attention_dim = None
upcast_attention = None
+ use_linear_projection = None
add_downsample = base_downblock.downsamplers is not None
# create model
@@ -1367,6 +1386,7 @@ def get_first_cross_attention(block):
cross_attention_dim=cross_attention_dim,
add_downsample=add_downsample,
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
# # load weights
@@ -1527,6 +1547,7 @@ def __init__(
ctrl_num_attention_heads: Optional[int] = 1,
cross_attention_dim: Optional[int] = 1024,
upcast_attention: bool = False,
+ use_linear_projection: Optional[bool] = True,
):
super().__init__()
@@ -1541,7 +1562,7 @@ def __init__(
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=base_num_attention_heads,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
@@ -1556,7 +1577,7 @@ def __init__(
),
cross_attention_dim=cross_attention_dim,
num_attention_heads=ctrl_num_attention_heads,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
@@ -1590,6 +1611,7 @@ def get_first_cross_attention(midblock):
ctrl_num_attention_heads = get_first_cross_attention(ctrl_midblock).heads
cross_attention_dim = get_first_cross_attention(base_midblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_midblock).upcast_attention
+ use_linear_projection = base_midblock.attentions[0].use_linear_projection
# create model
model = cls(
@@ -1603,6 +1625,7 @@ def get_first_cross_attention(midblock):
ctrl_num_attention_heads=ctrl_num_attention_heads,
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
# load weights
@@ -1677,6 +1700,7 @@ def __init__(
cross_attention_dim: int = 1024,
add_upsample: bool = True,
upcast_attention: bool = False,
+ use_linear_projection: Optional[bool] = True,
):
super().__init__()
resnets = []
@@ -1714,7 +1738,7 @@ def __init__(
in_channels=out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
- use_linear_projection=True,
+ use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
norm_num_groups=norm_num_groups,
)
@@ -1753,12 +1777,14 @@ def get_first_cross_attention(block):
num_attention_heads = get_first_cross_attention(base_upblock).heads
cross_attention_dim = get_first_cross_attention(base_upblock).cross_attention_dim
upcast_attention = get_first_cross_attention(base_upblock).upcast_attention
+ use_linear_projection = base_upblock.attentions[0].use_linear_projection
else:
has_crossattn = False
transformer_layers_per_block = None
num_attention_heads = None
cross_attention_dim = None
upcast_attention = None
+ use_linear_projection = None
add_upsample = base_upblock.upsamplers is not None
# create model
@@ -1776,6 +1802,7 @@ def get_first_cross_attention(block):
cross_attention_dim=cross_attention_dim,
add_upsample=add_upsample,
upcast_attention=upcast_attention,
+ use_linear_projection=use_linear_projection,
)
# load weights
diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py
index d927f2418f84..ab98d4cea5a4 100644
--- a/src/diffusers/models/modeling_utils.py
+++ b/src/diffusers/models/modeling_utils.py
@@ -462,7 +462,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be defined for each
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
- same device.
+ same device. Defaults to `None`, meaning that the model will be loaded on CPU.
Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
more information about each option see [designing a device
@@ -774,7 +774,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
else: # else let accelerate handle loading and dispatching.
# Load weights and dispatch according to the device_map
# by default the device_map is None and the weights are loaded on the CPU
+ force_hook = True
device_map = _determine_device_map(model, device_map, max_memory, torch_dtype)
+ if device_map is None and is_sharded:
+ # we load the parameters on the cpu
+ device_map = {"": "cpu"}
+ force_hook = False
try:
accelerate.load_checkpoint_and_dispatch(
model,
@@ -784,7 +789,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
offload_folder=offload_folder,
offload_state_dict=offload_state_dict,
dtype=torch_dtype,
- force_hooks=True,
+ force_hooks=force_hook,
strict=True,
)
except AttributeError as e:
@@ -808,12 +813,14 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P
model._temp_convert_self_to_deprecated_attention_blocks()
accelerate.load_checkpoint_and_dispatch(
model,
- model_file,
+ model_file if not is_sharded else sharded_ckpt_cached_folder,
device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_state_dict=offload_state_dict,
dtype=torch_dtype,
+ force_hook=force_hook,
+ strict=True,
)
model._undo_temp_convert_self_to_deprecated_attention_blocks()
else:
diff --git a/src/diffusers/models/transformers/transformer_sd3.py b/src/diffusers/models/transformers/transformer_sd3.py
index 4b159511e25d..677ed4e28700 100644
--- a/src/diffusers/models/transformers/transformer_sd3.py
+++ b/src/diffusers/models/transformers/transformer_sd3.py
@@ -1,4 +1,4 @@
-# Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved.
+# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +13,7 @@
# limitations under the License.
-from typing import Any, Dict, Optional, Union
+from typing import Any, Dict, List, Optional, Union
import torch
import torch.nn as nn
@@ -245,6 +245,7 @@ def forward(
encoder_hidden_states: torch.FloatTensor = None,
pooled_projections: torch.FloatTensor = None,
timestep: torch.LongTensor = None,
+ block_controlnet_hidden_states: List = None,
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
return_dict: bool = True,
) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
@@ -260,6 +261,8 @@ def forward(
from the embeddings of input conditions.
timestep ( `torch.LongTensor`):
Used to indicate denoising step.
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
+ A list of tensors that if specified are added to the residuals of transformer blocks.
joint_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
@@ -293,7 +296,7 @@ def forward(
temb = self.time_text_embed(timestep, pooled_projections)
encoder_hidden_states = self.context_embedder(encoder_hidden_states)
- for block in self.transformer_blocks:
+ for index_block, block in enumerate(self.transformer_blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
@@ -306,7 +309,7 @@ def custom_forward(*inputs):
return custom_forward
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
- hidden_states = torch.utils.checkpoint.checkpoint(
+ encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
encoder_hidden_states,
@@ -319,6 +322,11 @@ def custom_forward(*inputs):
hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb
)
+ # controlnet residual
+ if block_controlnet_hidden_states is not None and block.context_pre_only is False:
+ interval_control = len(self.transformer_blocks) // len(block_controlnet_hidden_states)
+ hidden_states = hidden_states + block_controlnet_hidden_states[index_block // interval_control]
+
hidden_states = self.norm_out(hidden_states, temb)
hidden_states = self.proj_out(hidden_states)
diff --git a/src/diffusers/models/unets/unet_1d_blocks.py b/src/diffusers/models/unets/unet_1d_blocks.py
index 3c7c1cbecee9..8fc27e94c474 100644
--- a/src/diffusers/models/unets/unet_1d_blocks.py
+++ b/src/diffusers/models/unets/unet_1d_blocks.py
@@ -200,7 +200,7 @@ def __init__(
self.upsample = None
if add_upsample:
- self.upsample = Downsample1D(out_channels, use_conv=True)
+ self.upsample = Upsample1D(out_channels, use_conv=True)
self.downsample = None
if add_downsample:
diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py
index 8b2c8a1b2119..a2041d6ea07b 100644
--- a/src/diffusers/pipelines/__init__.py
+++ b/src/diffusers/pipelines/__init__.py
@@ -20,6 +20,7 @@
_dummy_objects = {}
_import_structure = {
"controlnet": [],
+ "controlnet_sd3": [],
"controlnet_xs": [],
"deprecated": [],
"latent_diffusion": [],
@@ -142,6 +143,11 @@
"StableDiffusionXLControlNetXSPipeline",
]
)
+ _import_structure["controlnet_sd3"].extend(
+ [
+ "StableDiffusion3ControlNetPipeline",
+ ]
+ )
_import_structure["deepfloyd_if"] = [
"IFImg2ImgPipeline",
"IFImg2ImgSuperResolutionPipeline",
@@ -394,6 +400,9 @@
StableDiffusionXLControlNetInpaintPipeline,
StableDiffusionXLControlNetPipeline,
)
+ from .controlnet_sd3 import (
+ StableDiffusion3ControlNetPipeline,
+ )
from .controlnet_xs import (
StableDiffusionControlNetXSPipeline,
StableDiffusionXLControlNetXSPipeline,
diff --git a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py
index 626367029e33..a2c49652739f 100644
--- a/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py
+++ b/src/diffusers/pipelines/controlnet/pipeline_controlnet_inpaint.py
@@ -118,129 +118,6 @@ def retrieve_latents(
raise AttributeError("Could not access latents of provided encoder_output")
-# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image
-def prepare_mask_and_masked_image(image, mask, height, width, return_image=False):
- """
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
- ``image`` and ``1`` for the ``mask``.
-
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
-
- Args:
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
-
-
- Raises:
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
- (ot the other way around).
-
- Returns:
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
- dimensions: ``batch x channels x height x width``.
- """
- deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
- deprecate(
- "prepare_mask_and_masked_image",
- "0.30.0",
- deprecation_message,
- )
- if image is None:
- raise ValueError("`image` input cannot be undefined.")
-
- if mask is None:
- raise ValueError("`mask_image` input cannot be undefined.")
-
- if isinstance(image, torch.Tensor):
- if not isinstance(mask, torch.Tensor):
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
-
- # Batch single image
- if image.ndim == 3:
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
- image = image.unsqueeze(0)
-
- # Batch and add channel dim for single mask
- if mask.ndim == 2:
- mask = mask.unsqueeze(0).unsqueeze(0)
-
- # Batch single mask or add channel dim
- if mask.ndim == 3:
- # Single batched mask, no channel dim or single mask not batched but channel dim
- if mask.shape[0] == 1:
- mask = mask.unsqueeze(0)
-
- # Batched masks no channel dim
- else:
- mask = mask.unsqueeze(1)
-
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
-
- # Check image is in [-1, 1]
- if image.min() < -1 or image.max() > 1:
- raise ValueError("Image should be in [-1, 1] range")
-
- # Check mask is in [0, 1]
- if mask.min() < 0 or mask.max() > 1:
- raise ValueError("Mask should be in [0, 1] range")
-
- # Binarize mask
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- # Image as float32
- image = image.to(dtype=torch.float32)
- elif isinstance(mask, torch.Tensor):
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
- else:
- # preprocess image
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
- image = [image]
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
- # resize all images w.r.t passed height an width
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
- image = np.concatenate([i[None, :] for i in image], axis=0)
-
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- # preprocess mask
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
- mask = [mask]
-
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
- mask = mask.astype(np.float32) / 255.0
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
-
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- # n.b. ensure backwards compatibility as old function does not return image
- if return_image:
- return mask, masked_image, image
-
- return mask, masked_image
-
-
class StableDiffusionControlNetInpaintPipeline(
DiffusionPipeline,
StableDiffusionMixin,
diff --git a/src/diffusers/pipelines/controlnet_sd3/__init__.py b/src/diffusers/pipelines/controlnet_sd3/__init__.py
new file mode 100644
index 000000000000..65a3855a0adc
--- /dev/null
+++ b/src/diffusers/pipelines/controlnet_sd3/__init__.py
@@ -0,0 +1,53 @@
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ DIFFUSERS_SLOW_IMPORT,
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ get_objects_from_module,
+ is_flax_available,
+ is_torch_available,
+ is_transformers_available,
+)
+
+
+_dummy_objects = {}
+_import_structure = {}
+
+try:
+ if not (is_transformers_available() and is_torch_available()):
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ from ...utils import dummy_torch_and_transformers_objects # noqa F403
+
+ _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
+else:
+ _import_structure["pipeline_stable_diffusion_3_controlnet"] = ["StableDiffusion3ControlNetPipeline"]
+
+if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
+ try:
+ if not (is_transformers_available() and is_torch_available()):
+ raise OptionalDependencyNotAvailable()
+
+ except OptionalDependencyNotAvailable:
+ from ...utils.dummy_torch_and_transformers_objects import *
+ else:
+ from .pipeline_stable_diffusion_3_controlnet import StableDiffusion3ControlNetPipeline
+
+ try:
+ if not (is_transformers_available() and is_flax_available()):
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(
+ __name__,
+ globals()["__file__"],
+ _import_structure,
+ module_spec=__spec__,
+ )
+ for name, value in _dummy_objects.items():
+ setattr(sys.modules[__name__], name, value)
diff --git a/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py
new file mode 100644
index 000000000000..188408d4cb2e
--- /dev/null
+++ b/src/diffusers/pipelines/controlnet_sd3/pipeline_stable_diffusion_3_controlnet.py
@@ -0,0 +1,1062 @@
+# Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+from transformers import (
+ CLIPTextModelWithProjection,
+ CLIPTokenizer,
+ T5EncoderModel,
+ T5TokenizerFast,
+)
+
+from ...image_processor import PipelineImageInput, VaeImageProcessor
+from ...loaders import FromSingleFileMixin, SD3LoraLoaderMixin
+from ...models.autoencoders import AutoencoderKL
+from ...models.controlnet_sd3 import SD3ControlNetModel, SD3MultiControlNetModel
+from ...models.transformers import SD3Transformer2DModel
+from ...schedulers import FlowMatchEulerDiscreteScheduler
+from ...utils import (
+ is_torch_xla_available,
+ logging,
+ replace_example_docstring,
+)
+from ...utils.torch_utils import randn_tensor
+from ..pipeline_utils import DiffusionPipeline
+from ..stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput
+
+
+if is_torch_xla_available():
+ import torch_xla.core.xla_model as xm
+
+ XLA_AVAILABLE = True
+else:
+ XLA_AVAILABLE = False
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+EXAMPLE_DOC_STRING = """
+ Examples:
+ ```py
+ >>> import torch
+ >>> from diffusers import StableDiffusion3ControlNetPipeline
+ >>> from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
+ >>> from diffusers.utils import load_image
+
+ >>> controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
+
+ >>> pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
+ ... "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
+ ... )
+ >>> pipe.to("cuda")
+ >>> control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
+ >>> prompt = "A girl holding a sign that says InstantX"
+ >>> image = pipe(prompt, control_image=control_image, controlnet_conditioning_scale=0.7).images[0]
+ >>> image.save("sd3.png")
+ ```
+"""
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ """
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+
+class StableDiffusion3ControlNetPipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin):
+ r"""
+ Args:
+ transformer ([`SD3Transformer2DModel`]):
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
+ text_encoder ([`CLIPTextModelWithProjection`]):
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant,
+ with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size`
+ as its dimension.
+ text_encoder_2 ([`CLIPTextModelWithProjection`]):
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
+ specifically the
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
+ variant.
+ text_encoder_3 ([`T5EncoderModel`]):
+ Frozen text-encoder. Stable Diffusion 3 uses
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
+ [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
+ tokenizer (`CLIPTokenizer`):
+ Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ tokenizer_2 (`CLIPTokenizer`):
+ Second Tokenizer of class
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
+ tokenizer_3 (`T5TokenizerFast`):
+ Tokenizer of class
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
+ controlnet ([`SD3ControlNetModel`] or `List[SD3ControlNetModel]` or [`SD3MultiControlNetModel`]):
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
+ additional conditioning.
+ """
+
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae"
+ _optional_components = []
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"]
+
+ def __init__(
+ self,
+ transformer: SD3Transformer2DModel,
+ scheduler: FlowMatchEulerDiscreteScheduler,
+ vae: AutoencoderKL,
+ text_encoder: CLIPTextModelWithProjection,
+ tokenizer: CLIPTokenizer,
+ text_encoder_2: CLIPTextModelWithProjection,
+ tokenizer_2: CLIPTokenizer,
+ text_encoder_3: T5EncoderModel,
+ tokenizer_3: T5TokenizerFast,
+ controlnet: Union[
+ SD3ControlNetModel, List[SD3ControlNetModel], Tuple[SD3ControlNetModel], SD3MultiControlNetModel
+ ],
+ ):
+ super().__init__()
+
+ self.register_modules(
+ vae=vae,
+ text_encoder=text_encoder,
+ text_encoder_2=text_encoder_2,
+ text_encoder_3=text_encoder_3,
+ tokenizer=tokenizer,
+ tokenizer_2=tokenizer_2,
+ tokenizer_3=tokenizer_3,
+ transformer=transformer,
+ scheduler=scheduler,
+ controlnet=controlnet,
+ )
+ self.vae_scale_factor = (
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
+ )
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
+ self.tokenizer_max_length = (
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
+ )
+ self.default_sample_size = (
+ self.transformer.config.sample_size
+ if hasattr(self, "transformer") and self.transformer is not None
+ else 128
+ )
+
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds
+ def _get_t5_prompt_embeds(
+ self,
+ prompt: Union[str, List[str]] = None,
+ num_images_per_prompt: int = 1,
+ max_sequence_length: int = 256,
+ device: Optional[torch.device] = None,
+ dtype: Optional[torch.dtype] = None,
+ ):
+ device = device or self._execution_device
+ dtype = dtype or self.text_encoder.dtype
+
+ prompt = [prompt] if isinstance(prompt, str) else prompt
+ batch_size = len(prompt)
+
+ if self.text_encoder_3 is None:
+ return torch.zeros(
+ (
+ batch_size * num_images_per_prompt,
+ self.tokenizer_max_length,
+ self.transformer.config.joint_attention_dim,
+ ),
+ device=device,
+ dtype=dtype,
+ )
+
+ text_inputs = self.tokenizer_3(
+ prompt,
+ padding="max_length",
+ max_length=max_sequence_length,
+ truncation=True,
+ add_special_tokens=True,
+ return_tensors="pt",
+ )
+ text_input_ids = text_inputs.input_ids
+ untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids
+
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
+ removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
+ logger.warning(
+ "The following part of your input was truncated because `max_sequence_length` is set to "
+ f" {max_sequence_length} tokens: {removed_text}"
+ )
+
+ prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
+
+ dtype = self.text_encoder_3.dtype
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
+
+ _, seq_len, _ = prompt_embeds.shape
+
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ return prompt_embeds
+
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds
+ def _get_clip_prompt_embeds(
+ self,
+ prompt: Union[str, List[str]],
+ num_images_per_prompt: int = 1,
+ device: Optional[torch.device] = None,
+ clip_skip: Optional[int] = None,
+ clip_model_index: int = 0,
+ ):
+ device = device or self._execution_device
+
+ clip_tokenizers = [self.tokenizer, self.tokenizer_2]
+ clip_text_encoders = [self.text_encoder, self.text_encoder_2]
+
+ tokenizer = clip_tokenizers[clip_model_index]
+ text_encoder = clip_text_encoders[clip_model_index]
+
+ prompt = [prompt] if isinstance(prompt, str) else prompt
+ batch_size = len(prompt)
+
+ text_inputs = tokenizer(
+ prompt,
+ padding="max_length",
+ max_length=self.tokenizer_max_length,
+ truncation=True,
+ return_tensors="pt",
+ )
+
+ text_input_ids = text_inputs.input_ids
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
+ logger.warning(
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
+ )
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
+ pooled_prompt_embeds = prompt_embeds[0]
+
+ if clip_skip is None:
+ prompt_embeds = prompt_embeds.hidden_states[-2]
+ else:
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
+
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
+
+ _, seq_len, _ = prompt_embeds.shape
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
+
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1)
+ pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1)
+
+ return prompt_embeds, pooled_prompt_embeds
+
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt
+ def encode_prompt(
+ self,
+ prompt: Union[str, List[str]],
+ prompt_2: Union[str, List[str]],
+ prompt_3: Union[str, List[str]],
+ device: Optional[torch.device] = None,
+ num_images_per_prompt: int = 1,
+ do_classifier_free_guidance: bool = True,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ clip_skip: Optional[int] = None,
+ max_sequence_length: int = 256,
+ ):
+ r"""
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ prompt to be encoded
+ prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
+ used in all text-encoders
+ prompt_3 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
+ used in all text-encoders
+ device: (`torch.device`):
+ torch device
+ num_images_per_prompt (`int`):
+ number of images that should be generated per prompt
+ do_classifier_free_guidance (`bool`):
+ whether to use classifier free guidance or not
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
+ `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
+ input argument.
+ clip_skip (`int`, *optional*):
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
+ the output of the pre-final layer will be used for computing the prompt embeddings.
+ """
+ device = device or self._execution_device
+
+ prompt = [prompt] if isinstance(prompt, str) else prompt
+ if prompt is not None:
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ if prompt_embeds is None:
+ prompt_2 = prompt_2 or prompt
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
+
+ prompt_3 = prompt_3 or prompt
+ prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3
+
+ prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds(
+ prompt=prompt,
+ device=device,
+ num_images_per_prompt=num_images_per_prompt,
+ clip_skip=clip_skip,
+ clip_model_index=0,
+ )
+ prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds(
+ prompt=prompt_2,
+ device=device,
+ num_images_per_prompt=num_images_per_prompt,
+ clip_skip=clip_skip,
+ clip_model_index=1,
+ )
+ clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1)
+
+ t5_prompt_embed = self._get_t5_prompt_embeds(
+ prompt=prompt_3,
+ num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
+ )
+
+ clip_prompt_embeds = torch.nn.functional.pad(
+ clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])
+ )
+
+ prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2)
+ pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1)
+
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
+ negative_prompt = negative_prompt or ""
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
+ negative_prompt_3 = negative_prompt_3 or negative_prompt
+
+ # normalize str to list
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
+ negative_prompt_2 = (
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
+ )
+ negative_prompt_3 = (
+ batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3
+ )
+
+ if prompt is not None and type(prompt) is not type(negative_prompt):
+ raise TypeError(
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
+ f" {type(prompt)}."
+ )
+ elif batch_size != len(negative_prompt):
+ raise ValueError(
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
+ " the batch size of `prompt`."
+ )
+
+ negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds(
+ negative_prompt,
+ device=device,
+ num_images_per_prompt=num_images_per_prompt,
+ clip_skip=None,
+ clip_model_index=0,
+ )
+ negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds(
+ negative_prompt_2,
+ device=device,
+ num_images_per_prompt=num_images_per_prompt,
+ clip_skip=None,
+ clip_model_index=1,
+ )
+ negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
+
+ t5_negative_prompt_embed = self._get_t5_prompt_embeds(
+ prompt=negative_prompt_3,
+ num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
+ )
+
+ negative_clip_prompt_embeds = torch.nn.functional.pad(
+ negative_clip_prompt_embeds,
+ (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]),
+ )
+
+ negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2)
+ negative_pooled_prompt_embeds = torch.cat(
+ [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1
+ )
+
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
+
+ def check_inputs(
+ self,
+ prompt,
+ prompt_2,
+ prompt_3,
+ height,
+ width,
+ negative_prompt=None,
+ negative_prompt_2=None,
+ negative_prompt_3=None,
+ prompt_embeds=None,
+ negative_prompt_embeds=None,
+ pooled_prompt_embeds=None,
+ negative_pooled_prompt_embeds=None,
+ callback_on_step_end_tensor_inputs=None,
+ ):
+ if height % 8 != 0 or width % 8 != 0:
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
+
+ if callback_on_step_end_tensor_inputs is not None and not all(
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
+ ):
+ raise ValueError(
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
+ )
+
+ if prompt is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt_2 is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt_3 is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt is None and prompt_embeds is None:
+ raise ValueError(
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
+ )
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
+ elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)):
+ raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}")
+
+ if negative_prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+ elif negative_prompt_3 is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
+ f" {negative_prompt_embeds.shape}."
+ )
+
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
+ raise ValueError(
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
+ )
+
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
+ raise ValueError(
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
+ )
+
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.prepare_latents
+ def prepare_latents(
+ self,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ ):
+ if latents is not None:
+ return latents.to(device=device, dtype=dtype)
+
+ shape = (
+ batch_size,
+ num_channels_latents,
+ int(height) // self.vae_scale_factor,
+ int(width) // self.vae_scale_factor,
+ )
+
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+
+ return latents
+
+ def prepare_image(
+ self,
+ image,
+ width,
+ height,
+ batch_size,
+ num_images_per_prompt,
+ device,
+ dtype,
+ do_classifier_free_guidance=False,
+ guess_mode=False,
+ ):
+ if isinstance(image, torch.Tensor):
+ pass
+ else:
+ image = self.image_processor.preprocess(image, height=height, width=width)
+
+ image_batch_size = image.shape[0]
+
+ if image_batch_size == 1:
+ repeat_by = batch_size
+ else:
+ # image batch size is the same as prompt batch size
+ repeat_by = num_images_per_prompt
+
+ image = image.repeat_interleave(repeat_by, dim=0)
+
+ image = image.to(device=device, dtype=dtype)
+
+ if do_classifier_free_guidance and not guess_mode:
+ image = torch.cat([image] * 2)
+
+ return image
+
+ @property
+ def guidance_scale(self):
+ return self._guidance_scale
+
+ @property
+ def clip_skip(self):
+ return self._clip_skip
+
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ @property
+ def do_classifier_free_guidance(self):
+ return self._guidance_scale > 1
+
+ @property
+ def joint_attention_kwargs(self):
+ return self._joint_attention_kwargs
+
+ @property
+ def num_timesteps(self):
+ return self._num_timesteps
+
+ @property
+ def interrupt(self):
+ return self._interrupt
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Union[str, List[str]] = None,
+ prompt_2: Optional[Union[str, List[str]]] = None,
+ prompt_3: Optional[Union[str, List[str]]] = None,
+ height: Optional[int] = None,
+ width: Optional[int] = None,
+ num_inference_steps: int = 28,
+ timesteps: List[int] = None,
+ guidance_scale: float = 7.0,
+ control_guidance_start: Union[float, List[float]] = 0.0,
+ control_guidance_end: Union[float, List[float]] = 1.0,
+ control_image: PipelineImageInput = None,
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
+ controlnet_pooled_projections: Optional[torch.FloatTensor] = None,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
+ num_images_per_prompt: Optional[int] = 1,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: Optional[str] = "pil",
+ return_dict: bool = True,
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
+ clip_skip: Optional[int] = None,
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ ):
+ r"""
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
+ will be used instead
+ prompt_3 (`str` or `List[str]`, *optional*):
+ The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
+ will be used instead
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
+ passed will be used. Must be in descending order.
+ guidance_scale (`float`, *optional*, defaults to 5.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
+ The percentage of total steps at which the ControlNet starts applying.
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The percentage of total steps at which the ControlNet stops applying.
+ control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
+ images must be passed as a list such that each element of the list can be correctly batched for input
+ to a single ControlNet.
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
+ the corresponding scale as a list.
+ controlnet_pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`):
+ Embeddings projected from the embeddings of controlnet input conditions.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
+ `text_encoder_2`. If not defined, `negative_prompt` is used instead
+ negative_prompt_3 (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
+ `text_encoder_3`. If not defined, `negative_prompt` is used instead
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
+ The number of images to generate per prompt.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
+ input argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
+ of a plain tuple.
+ joint_attention_kwargs (`dict`, *optional*):
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
+ `self.processor` in
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
+ callback_on_step_end (`Callable`, *optional*):
+ A function that calls at the end of each denoising steps during the inference. The function is called
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
+ `callback_on_step_end_tensor_inputs`.
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
+ `._callback_tensor_inputs` attribute of your pipeline class.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`:
+ [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
+ """
+
+ height = height or self.default_sample_size * self.vae_scale_factor
+ width = width or self.default_sample_size * self.vae_scale_factor
+
+ # align format for control guidance
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
+ mult = len(self.controlnet.nets) if isinstance(self.controlnet, SD3MultiControlNetModel) else 1
+ control_guidance_start, control_guidance_end = (
+ mult * [control_guidance_start],
+ mult * [control_guidance_end],
+ )
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ prompt_2,
+ prompt_3,
+ height,
+ width,
+ negative_prompt=negative_prompt,
+ negative_prompt_2=negative_prompt_2,
+ negative_prompt_3=negative_prompt_3,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ pooled_prompt_embeds=pooled_prompt_embeds,
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
+ )
+
+ self._guidance_scale = guidance_scale
+ self._clip_skip = clip_skip
+ self._joint_attention_kwargs = joint_attention_kwargs
+ self._interrupt = False
+
+ # 2. Define call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+ dtype = self.transformer.dtype
+
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ ) = self.encode_prompt(
+ prompt=prompt,
+ prompt_2=prompt_2,
+ prompt_3=prompt_3,
+ negative_prompt=negative_prompt,
+ negative_prompt_2=negative_prompt_2,
+ negative_prompt_3=negative_prompt_3,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ prompt_embeds=prompt_embeds,
+ negative_prompt_embeds=negative_prompt_embeds,
+ pooled_prompt_embeds=pooled_prompt_embeds,
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
+ device=device,
+ clip_skip=self.clip_skip,
+ num_images_per_prompt=num_images_per_prompt,
+ )
+
+ if self.do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
+ pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
+
+ # 3. Prepare control image
+ if isinstance(self.controlnet, SD3ControlNetModel):
+ control_image = self.prepare_image(
+ image=control_image,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=False,
+ )
+ height, width = control_image.shape[-2:]
+
+ control_image = self.vae.encode(control_image).latent_dist.sample()
+ control_image = control_image * self.vae.config.scaling_factor
+
+ elif isinstance(self.controlnet, SD3MultiControlNetModel):
+ control_images = []
+
+ for control_image_ in control_image:
+ control_image_ = self.prepare_image(
+ image=control_image_,
+ width=width,
+ height=height,
+ batch_size=batch_size * num_images_per_prompt,
+ num_images_per_prompt=num_images_per_prompt,
+ device=device,
+ dtype=dtype,
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
+ guess_mode=False,
+ )
+
+ control_image_ = self.vae.encode(control_image_).latent_dist.sample()
+ control_image_ = control_image_ * self.vae.config.scaling_factor
+
+ control_images.append(control_image_)
+
+ control_image = control_images
+ else:
+ assert False
+
+ if controlnet_pooled_projections is None:
+ controlnet_pooled_projections = torch.zeros_like(pooled_prompt_embeds)
+ else:
+ controlnet_pooled_projections = controlnet_pooled_projections or pooled_prompt_embeds
+
+ # 4. Prepare timesteps
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
+ self._num_timesteps = len(timesteps)
+
+ # 5. Prepare latent variables
+ num_channels_latents = self.transformer.config.in_channels
+ latents = self.prepare_latents(
+ batch_size * num_images_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ )
+
+ # 6. Create tensor stating which controlnets to keep
+ controlnet_keep = []
+ for i in range(len(timesteps)):
+ keeps = [
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
+ for s, e in zip(control_guidance_start, control_guidance_end)
+ ]
+ controlnet_keep.append(keeps[0] if isinstance(self.controlnet, SD3ControlNetModel) else keeps)
+
+ # 7. Denoising loop
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ # expand the latents if we are doing classifier free guidance
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timestep = t.expand(latent_model_input.shape[0])
+
+ if isinstance(controlnet_keep[i], list):
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
+ else:
+ controlnet_cond_scale = controlnet_conditioning_scale
+ if isinstance(controlnet_cond_scale, list):
+ controlnet_cond_scale = controlnet_cond_scale[0]
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
+
+ # controlnet(s) inference
+ control_block_samples = self.controlnet(
+ hidden_states=latent_model_input,
+ timestep=timestep,
+ encoder_hidden_states=prompt_embeds,
+ pooled_projections=controlnet_pooled_projections,
+ joint_attention_kwargs=self.joint_attention_kwargs,
+ controlnet_cond=control_image,
+ conditioning_scale=cond_scale,
+ return_dict=False,
+ )[0]
+
+ noise_pred = self.transformer(
+ hidden_states=latent_model_input,
+ timestep=timestep,
+ encoder_hidden_states=prompt_embeds,
+ pooled_projections=pooled_prompt_embeds,
+ block_controlnet_hidden_states=control_block_samples,
+ joint_attention_kwargs=self.joint_attention_kwargs,
+ return_dict=False,
+ )[0]
+
+ # perform guidance
+ if self.do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ latents_dtype = latents.dtype
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
+
+ if latents.dtype != latents_dtype:
+ if torch.backends.mps.is_available():
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
+ latents = latents.to(latents_dtype)
+
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+ negative_pooled_prompt_embeds = callback_outputs.pop(
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
+ )
+
+ # call the callback, if provided
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+
+ if XLA_AVAILABLE:
+ xm.mark_step()
+
+ if output_type == "latent":
+ image = latents
+
+ else:
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
+
+ image = self.vae.decode(latents, return_dict=False)[0]
+ image = self.image_processor.postprocess(image, output_type=output_type)
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ if not return_dict:
+ return (image,)
+
+ return StableDiffusion3PipelineOutput(images=image)
diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
index 3f694f6e6ed4..2d023f005436 100644
--- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
+++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
@@ -15,7 +15,6 @@
import inspect
from typing import Any, Callable, Dict, List, Optional, Union
-import numpy as np
import PIL.Image
import torch
from packaging import version
@@ -38,128 +37,6 @@
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
- """
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
- ``image`` and ``1`` for the ``mask``.
-
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
-
- Args:
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
-
-
- Raises:
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
- (ot the other way around).
-
- Returns:
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
- dimensions: ``batch x channels x height x width``.
- """
- deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
- deprecate(
- "prepare_mask_and_masked_image",
- "0.30.0",
- deprecation_message,
- )
- if image is None:
- raise ValueError("`image` input cannot be undefined.")
-
- if mask is None:
- raise ValueError("`mask_image` input cannot be undefined.")
-
- if isinstance(image, torch.Tensor):
- if not isinstance(mask, torch.Tensor):
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
-
- # Batch single image
- if image.ndim == 3:
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
- image = image.unsqueeze(0)
-
- # Batch and add channel dim for single mask
- if mask.ndim == 2:
- mask = mask.unsqueeze(0).unsqueeze(0)
-
- # Batch single mask or add channel dim
- if mask.ndim == 3:
- # Single batched mask, no channel dim or single mask not batched but channel dim
- if mask.shape[0] == 1:
- mask = mask.unsqueeze(0)
-
- # Batched masks no channel dim
- else:
- mask = mask.unsqueeze(1)
-
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
-
- # Check image is in [-1, 1]
- if image.min() < -1 or image.max() > 1:
- raise ValueError("Image should be in [-1, 1] range")
-
- # Check mask is in [0, 1]
- if mask.min() < 0 or mask.max() > 1:
- raise ValueError("Mask should be in [0, 1] range")
-
- # Binarize mask
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- # Image as float32
- image = image.to(dtype=torch.float32)
- elif isinstance(mask, torch.Tensor):
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
- else:
- # preprocess image
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
- image = [image]
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
- # resize all images w.r.t passed height an width
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
- image = np.concatenate([i[None, :] for i in image], axis=0)
-
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- # preprocess mask
- if isinstance(mask, (PIL.Image.Image, np.ndarray)):
- mask = [mask]
-
- if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
- mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
- mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
- mask = mask.astype(np.float32) / 255.0
- elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
- mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
-
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- # n.b. ensure backwards compatibility as old function does not return image
- if return_image:
- return mask, masked_image, image
-
- return mask, masked_image
-
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
def retrieve_latents(
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py
index 8e951db68cc8..b8fd0b907684 100644
--- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py
+++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py
@@ -205,6 +205,7 @@ def _get_t5_prompt_embeds(
self,
prompt: Union[str, List[str]] = None,
num_images_per_prompt: int = 1,
+ max_sequence_length: int = 256,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
@@ -216,7 +217,11 @@ def _get_t5_prompt_embeds(
if self.text_encoder_3 is None:
return torch.zeros(
- (batch_size, self.tokenizer_max_length, self.transformer.config.joint_attention_dim),
+ (
+ batch_size * num_images_per_prompt,
+ self.tokenizer_max_length,
+ self.transformer.config.joint_attention_dim,
+ ),
device=device,
dtype=dtype,
)
@@ -224,7 +229,7 @@ def _get_t5_prompt_embeds(
text_inputs = self.tokenizer_3(
prompt,
padding="max_length",
- max_length=self.tokenizer_max_length,
+ max_length=max_sequence_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
@@ -235,8 +240,8 @@ def _get_t5_prompt_embeds(
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer_max_length} tokens: {removed_text}"
+ "The following part of your input was truncated because `max_sequence_length` is set to "
+ f" {max_sequence_length} tokens: {removed_text}"
)
prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
@@ -323,6 +328,7 @@ def encode_prompt(
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
clip_skip: Optional[int] = None,
+ max_sequence_length: int = 256,
):
r"""
@@ -403,6 +409,7 @@ def encode_prompt(
t5_prompt_embed = self._get_t5_prompt_embeds(
prompt=prompt_3,
num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
device=device,
)
@@ -456,7 +463,10 @@ def encode_prompt(
negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
t5_negative_prompt_embed = self._get_t5_prompt_embeds(
- prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, device=device
+ prompt=negative_prompt_3,
+ num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
)
negative_clip_prompt_embeds = torch.nn.functional.pad(
@@ -486,6 +496,7 @@ def check_inputs(
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
+ max_sequence_length=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
@@ -557,6 +568,9 @@ def check_inputs(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
+ if max_sequence_length is not None and max_sequence_length > 512:
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
+
def prepare_latents(
self,
batch_size,
@@ -643,6 +657,7 @@ def __call__(
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ max_sequence_length: int = 256,
):
r"""
Function invoked when calling the pipeline for generation.
@@ -726,6 +741,7 @@ def __call__(
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
Examples:
@@ -753,6 +769,7 @@ def __call__(
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
+ max_sequence_length=max_sequence_length,
)
self._guidance_scale = guidance_scale
@@ -790,6 +807,7 @@ def __call__(
device=device,
clip_skip=self.clip_skip,
num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
)
if self.do_classifier_free_guidance:
diff --git a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py
index e7b5ebc92a55..fda363fc2978 100644
--- a/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py
+++ b/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py
@@ -220,6 +220,7 @@ def _get_t5_prompt_embeds(
self,
prompt: Union[str, List[str]] = None,
num_images_per_prompt: int = 1,
+ max_sequence_length: int = 256,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
@@ -231,7 +232,11 @@ def _get_t5_prompt_embeds(
if self.text_encoder_3 is None:
return torch.zeros(
- (batch_size, self.tokenizer_max_length, self.transformer.config.joint_attention_dim),
+ (
+ batch_size * num_images_per_prompt,
+ self.tokenizer_max_length,
+ self.transformer.config.joint_attention_dim,
+ ),
device=device,
dtype=dtype,
)
@@ -239,7 +244,7 @@ def _get_t5_prompt_embeds(
text_inputs = self.tokenizer_3(
prompt,
padding="max_length",
- max_length=self.tokenizer_max_length,
+ max_length=max_sequence_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
@@ -250,8 +255,8 @@ def _get_t5_prompt_embeds(
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer_max_length} tokens: {removed_text}"
+ "The following part of your input was truncated because `max_sequence_length` is set to "
+ f" {max_sequence_length} tokens: {removed_text}"
)
prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
@@ -340,6 +345,7 @@ def encode_prompt(
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
clip_skip: Optional[int] = None,
+ max_sequence_length: int = 256,
):
r"""
@@ -420,6 +426,7 @@ def encode_prompt(
t5_prompt_embed = self._get_t5_prompt_embeds(
prompt=prompt_3,
num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
device=device,
)
@@ -473,7 +480,10 @@ def encode_prompt(
negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
t5_negative_prompt_embed = self._get_t5_prompt_embeds(
- prompt=negative_prompt_3, num_images_per_prompt=num_images_per_prompt, device=device
+ prompt=negative_prompt_3,
+ num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
+ device=device,
)
negative_clip_prompt_embeds = torch.nn.functional.pad(
@@ -502,6 +512,7 @@ def check_inputs(
pooled_prompt_embeds=None,
negative_pooled_prompt_embeds=None,
callback_on_step_end_tensor_inputs=None,
+ max_sequence_length=None,
):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
@@ -573,6 +584,9 @@ def check_inputs(
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
)
+ if max_sequence_length is not None and max_sequence_length > 512:
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
+
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(num_inference_steps * strength, num_inference_steps)
@@ -591,8 +605,6 @@ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dt
)
image = image.to(device=device, dtype=dtype)
- if image.shape[1] == self.vae.config.latent_channels:
- init_latents = image
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == self.vae.config.latent_channels:
@@ -686,6 +698,7 @@ def __call__(
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ max_sequence_length: int = 256,
):
r"""
Function invoked when calling the pipeline for generation.
@@ -765,6 +778,7 @@ def __call__(
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
Examples:
@@ -788,6 +802,7 @@ def __call__(
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
+ max_sequence_length=max_sequence_length,
)
self._guidance_scale = guidance_scale
@@ -824,6 +839,7 @@ def __call__(
device=device,
clip_skip=self.clip_skip,
num_images_per_prompt=num_images_per_prompt,
+ max_sequence_length=max_sequence_length,
)
if self.do_classifier_free_guidance:
diff --git a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
index 631e309993b1..6fe9b4670ce8 100644
--- a/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
+++ b/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py
@@ -132,124 +132,6 @@ def mask_pil_to_torch(mask, height, width):
return mask
-def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
- """
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
- ``image`` and ``1`` for the ``mask``.
-
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
-
- Args:
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
-
-
- Raises:
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
- (ot the other way around).
-
- Returns:
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
- dimensions: ``batch x channels x height x width``.
- """
-
- # checkpoint. TOD(Yiyi) - need to clean this up later
- deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
- deprecate(
- "prepare_mask_and_masked_image",
- "0.30.0",
- deprecation_message,
- )
- if image is None:
- raise ValueError("`image` input cannot be undefined.")
-
- if mask is None:
- raise ValueError("`mask_image` input cannot be undefined.")
-
- if isinstance(image, torch.Tensor):
- if not isinstance(mask, torch.Tensor):
- mask = mask_pil_to_torch(mask, height, width)
-
- if image.ndim == 3:
- image = image.unsqueeze(0)
-
- # Batch and add channel dim for single mask
- if mask.ndim == 2:
- mask = mask.unsqueeze(0).unsqueeze(0)
-
- # Batch single mask or add channel dim
- if mask.ndim == 3:
- # Single batched mask, no channel dim or single mask not batched but channel dim
- if mask.shape[0] == 1:
- mask = mask.unsqueeze(0)
-
- # Batched masks no channel dim
- else:
- mask = mask.unsqueeze(1)
-
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
- # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
-
- # Check image is in [-1, 1]
- # if image.min() < -1 or image.max() > 1:
- # raise ValueError("Image should be in [-1, 1] range")
-
- # Check mask is in [0, 1]
- if mask.min() < 0 or mask.max() > 1:
- raise ValueError("Mask should be in [0, 1] range")
-
- # Binarize mask
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- # Image as float32
- image = image.to(dtype=torch.float32)
- elif isinstance(mask, torch.Tensor):
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
- else:
- # preprocess image
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
- image = [image]
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
- # resize all images w.r.t passed height an width
- image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
- image = np.concatenate([i[None, :] for i in image], axis=0)
-
- image = image.transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
-
- mask = mask_pil_to_torch(mask, height, width)
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- if image.shape[1] == 4:
- # images are in latent space and thus can't
- # be masked set masked_image to None
- # we assume that the checkpoint is not an inpainting
- # checkpoint. TOD(Yiyi) - need to clean this up later
- masked_image = None
- else:
- masked_image = image * (mask < 0.5)
-
- # n.b. ensure backwards compatibility as old function does not return image
- if return_image:
- return mask, masked_image, image
-
- return mask, masked_image
-
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
def retrieve_latents(
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
diff --git a/src/diffusers/training_utils.py b/src/diffusers/training_utils.py
index b2f561632d91..d3ff926eac8a 100644
--- a/src/diffusers/training_utils.py
+++ b/src/diffusers/training_utils.py
@@ -1,5 +1,6 @@
import contextlib
import copy
+import math
import random
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
@@ -220,6 +221,44 @@ def _set_state_dict_into_text_encoder(
set_peft_model_state_dict(text_encoder, text_encoder_state_dict, adapter_name="default")
+def compute_density_for_timestep_sampling(
+ weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None
+):
+ """Compute the density for sampling the timesteps when doing SD3 training.
+
+ Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.
+
+ SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
+ """
+ if weighting_scheme == "logit_normal":
+ # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$).
+ u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu")
+ u = torch.nn.functional.sigmoid(u)
+ elif weighting_scheme == "mode":
+ u = torch.rand(size=(batch_size,), device="cpu")
+ u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u)
+ else:
+ u = torch.rand(size=(batch_size,), device="cpu")
+ return u
+
+
+def compute_loss_weighting_for_sd3(weighting_scheme: str, sigmas=None):
+ """Computes loss weighting scheme for SD3 training.
+
+ Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528.
+
+ SD3 paper reference: https://arxiv.org/abs/2403.03206v1.
+ """
+ if weighting_scheme == "sigma_sqrt":
+ weighting = (sigmas**-2.0).float()
+ elif weighting_scheme == "cosmap":
+ bot = 1 - 2 * sigmas + 2 * sigmas**2
+ weighting = 2 / (math.pi * bot)
+ else:
+ weighting = torch.ones_like(sigmas)
+ return weighting
+
+
# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14
class EMAModel:
"""
diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py
index 64c642a1d130..007570d81dae 100644
--- a/src/diffusers/utils/dummy_pt_objects.py
+++ b/src/diffusers/utils/dummy_pt_objects.py
@@ -242,6 +242,36 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
+class SD3ControlNetModel(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+
+class SD3MultiControlNetModel(metaclass=DummyObject):
+ _backends = ["torch"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch"])
+
+
class SD3Transformer2DModel(metaclass=DummyObject):
_backends = ["torch"]
diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
index 9b3512a82519..b5d17f3fce65 100644
--- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py
+++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py
@@ -902,6 +902,21 @@ def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
+class StableDiffusion3ControlNetPipeline(metaclass=DummyObject):
+ _backends = ["torch", "transformers"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["torch", "transformers"])
+
+ @classmethod
+ def from_config(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+ @classmethod
+ def from_pretrained(cls, *args, **kwargs):
+ requires_backends(cls, ["torch", "transformers"])
+
+
class StableDiffusion3Img2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
diff --git a/tests/lora/utils.py b/tests/lora/utils.py
index d08a26645602..9a07727db931 100644
--- a/tests/lora/utils.py
+++ b/tests/lora/utils.py
@@ -395,6 +395,69 @@ def test_simple_inference_with_text_lora_save_load(self):
"Loading from saved checkpoints should give same results.",
)
+ def test_simple_inference_with_partial_text_lora(self):
+ """
+ Tests a simple inference with lora attached on the text encoder
+ with different ranks and some adapters removed
+ and makes sure it works as expected
+ """
+ for scheduler_cls in [DDIMScheduler, LCMScheduler]:
+ components, _, _ = self.get_dummy_components(scheduler_cls)
+ # Verify `LoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324).
+ text_lora_config = LoraConfig(
+ r=4,
+ rank_pattern={"q_proj": 1, "k_proj": 2, "v_proj": 3},
+ lora_alpha=4,
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
+ init_lora_weights=False,
+ use_dora=False,
+ )
+ pipe = self.pipeline_class(**components)
+ pipe = pipe.to(torch_device)
+ pipe.set_progress_bar_config(disable=None)
+ _, _, inputs = self.get_dummy_inputs(with_generator=False)
+
+ output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
+ self.assertTrue(output_no_lora.shape == (1, 64, 64, 3))
+
+ pipe.text_encoder.add_adapter(text_lora_config)
+ self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
+ # Gather the state dict for the PEFT model, excluding `layers.4`, to ensure `load_lora_into_text_encoder`
+ # supports missing layers (PR#8324).
+ state_dict = {
+ f"text_encoder.{module_name}": param
+ for module_name, param in get_peft_model_state_dict(pipe.text_encoder).items()
+ if "text_model.encoder.layers.4" not in module_name
+ }
+
+ if self.has_two_text_encoders:
+ pipe.text_encoder_2.add_adapter(text_lora_config)
+ self.assertTrue(
+ check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
+ )
+ state_dict.update(
+ {
+ f"text_encoder_2.{module_name}": param
+ for module_name, param in get_peft_model_state_dict(pipe.text_encoder_2).items()
+ if "text_model.encoder.layers.4" not in module_name
+ }
+ )
+
+ output_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
+ self.assertTrue(
+ not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
+ )
+
+ # Unload lora and load it back using the pipe.load_lora_weights machinery
+ pipe.unload_lora_weights()
+ pipe.load_lora_weights(state_dict)
+
+ output_partial_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
+ self.assertTrue(
+ not np.allclose(output_partial_lora, output_lora, atol=1e-3, rtol=1e-3),
+ "Removing adapters should change the output",
+ )
+
def test_simple_inference_save_pretrained(self):
"""
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py
index a8564e0baf7b..5a1901d49a7d 100644
--- a/tests/models/test_modeling_common.py
+++ b/tests/models/test_modeling_common.py
@@ -872,6 +872,39 @@ def test_model_parallelism(self):
@require_torch_gpu
def test_sharded_checkpoints(self):
+ config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
+ model = self.model_class(**config).eval()
+ model = model.to(torch_device)
+
+ torch.manual_seed(0)
+ base_output = model(**inputs_dict)
+
+ model_size = compute_module_sizes(model)[""]
+ max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small.
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB")
+ self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))
+
+ # Now check if the right number of shards exists. First, let's get the number of shards.
+ # Since this number can be dependent on the model being tested, it's important that we calculate it
+ # instead of hardcoding it.
+ with open(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) as f:
+ weight_map_dict = json.load(f)["weight_map"]
+ first_key = list(weight_map_dict.keys())[0]
+ weight_loc = weight_map_dict[first_key] # e.g., diffusion_pytorch_model-00001-of-00002.safetensors
+ expected_num_shards = int(weight_loc.split("-")[-1].split(".")[0])
+
+ actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")])
+ self.assertTrue(actual_num_shards == expected_num_shards)
+
+ new_model = self.model_class.from_pretrained(tmp_dir)
+
+ torch.manual_seed(0)
+ new_output = new_model(**inputs_dict)
+ self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
+
+ @require_torch_gpu
+ def test_sharded_checkpoints_device_map(self):
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**config).eval()
if model._no_split_modules is None:
diff --git a/tests/models/unets/test_models_unet_2d_condition.py b/tests/models/unets/test_models_unet_2d_condition.py
index ca5d964f7609..dd524c5b8684 100644
--- a/tests/models/unets/test_models_unet_2d_condition.py
+++ b/tests/models/unets/test_models_unet_2d_condition.py
@@ -1038,7 +1038,7 @@ def test_ip_adapter_plus(self):
@require_torch_gpu
def test_load_sharded_checkpoint_from_hub(self):
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
- loaded_model = self.model_class.from_pretrained("hf-internal-testing/unet2d-sharded-dummy", device_map="auto")
+ loaded_model = self.model_class.from_pretrained("hf-internal-testing/unet2d-sharded-dummy")
new_output = loaded_model(**inputs_dict)
assert loaded_model
@@ -1046,6 +1046,25 @@ def test_load_sharded_checkpoint_from_hub(self):
@require_torch_gpu
def test_load_sharded_checkpoint_from_hub_local(self):
+ _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
+ ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy")
+ loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True)
+ new_output = loaded_model(**inputs_dict)
+
+ assert loaded_model
+ assert new_output.sample.shape == (4, 4, 16, 16)
+
+ @require_torch_gpu
+ def test_load_sharded_checkpoint_device_map_from_hub(self):
+ _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
+ loaded_model = self.model_class.from_pretrained("hf-internal-testing/unet2d-sharded-dummy", device_map="auto")
+ new_output = loaded_model(**inputs_dict)
+
+ assert loaded_model
+ assert new_output.sample.shape == (4, 4, 16, 16)
+
+ @require_torch_gpu
+ def test_load_sharded_checkpoint_device_map_from_hub_local(self):
_, inputs_dict = self.prepare_init_args_and_inputs_for_common()
ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy")
loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True, device_map="auto")
diff --git a/tests/pipelines/controlnet_sd3/__init__.py b/tests/pipelines/controlnet_sd3/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py
new file mode 100644
index 000000000000..824c1de1b9a5
--- /dev/null
+++ b/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py
@@ -0,0 +1,348 @@
+# coding=utf-8
+# Copyright 2024 HuggingFace Inc and The InstantX Team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import gc
+import unittest
+
+import numpy as np
+import torch
+from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
+
+from diffusers import (
+ AutoencoderKL,
+ FlowMatchEulerDiscreteScheduler,
+ SD3Transformer2DModel,
+ StableDiffusion3ControlNetPipeline,
+)
+from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel
+from diffusers.utils import load_image
+from diffusers.utils.testing_utils import (
+ enable_full_determinism,
+ require_torch_gpu,
+ slow,
+ torch_device,
+)
+from diffusers.utils.torch_utils import randn_tensor
+
+from ..test_pipelines_common import PipelineTesterMixin
+
+
+enable_full_determinism()
+
+
+class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
+ pipeline_class = StableDiffusion3ControlNetPipeline
+ params = frozenset(
+ [
+ "prompt",
+ "height",
+ "width",
+ "guidance_scale",
+ "negative_prompt",
+ "prompt_embeds",
+ "negative_prompt_embeds",
+ ]
+ )
+ batch_params = frozenset(["prompt", "negative_prompt"])
+
+ def get_dummy_components(self):
+ torch.manual_seed(0)
+ transformer = SD3Transformer2DModel(
+ sample_size=32,
+ patch_size=1,
+ in_channels=8,
+ num_layers=4,
+ attention_head_dim=8,
+ num_attention_heads=4,
+ joint_attention_dim=32,
+ caption_projection_dim=32,
+ pooled_projection_dim=64,
+ out_channels=8,
+ )
+
+ torch.manual_seed(0)
+ controlnet = SD3ControlNetModel(
+ sample_size=32,
+ patch_size=1,
+ in_channels=8,
+ num_layers=1,
+ attention_head_dim=8,
+ num_attention_heads=4,
+ joint_attention_dim=32,
+ caption_projection_dim=32,
+ pooled_projection_dim=64,
+ out_channels=8,
+ )
+ clip_text_encoder_config = CLIPTextConfig(
+ bos_token_id=0,
+ eos_token_id=2,
+ hidden_size=32,
+ intermediate_size=37,
+ layer_norm_eps=1e-05,
+ num_attention_heads=4,
+ num_hidden_layers=5,
+ pad_token_id=1,
+ vocab_size=1000,
+ hidden_act="gelu",
+ projection_dim=32,
+ )
+
+ torch.manual_seed(0)
+ text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config)
+
+ torch.manual_seed(0)
+ text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config)
+
+ torch.manual_seed(0)
+ text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
+
+ tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
+ tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
+ tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
+
+ torch.manual_seed(0)
+ vae = AutoencoderKL(
+ sample_size=32,
+ in_channels=3,
+ out_channels=3,
+ block_out_channels=(4,),
+ layers_per_block=1,
+ latent_channels=8,
+ norm_num_groups=1,
+ use_quant_conv=False,
+ use_post_quant_conv=False,
+ shift_factor=0.0609,
+ scaling_factor=1.5035,
+ )
+
+ scheduler = FlowMatchEulerDiscreteScheduler()
+
+ return {
+ "scheduler": scheduler,
+ "text_encoder": text_encoder,
+ "text_encoder_2": text_encoder_2,
+ "text_encoder_3": text_encoder_3,
+ "tokenizer": tokenizer,
+ "tokenizer_2": tokenizer_2,
+ "tokenizer_3": tokenizer_3,
+ "transformer": transformer,
+ "vae": vae,
+ "controlnet": controlnet,
+ }
+
+ def get_dummy_inputs(self, device, seed=0):
+ if str(device).startswith("mps"):
+ generator = torch.manual_seed(seed)
+ else:
+ generator = torch.Generator(device="cpu").manual_seed(seed)
+
+ control_image = randn_tensor(
+ (1, 3, 32, 32),
+ generator=generator,
+ device=torch.device(device),
+ dtype=torch.float16,
+ )
+
+ controlnet_conditioning_scale = 0.5
+
+ inputs = {
+ "prompt": "A painting of a squirrel eating a burger",
+ "generator": generator,
+ "num_inference_steps": 2,
+ "guidance_scale": 5.0,
+ "output_type": "np",
+ "control_image": control_image,
+ "controlnet_conditioning_scale": controlnet_conditioning_scale,
+ }
+
+ return inputs
+
+ def test_controlnet_sd3(self):
+ components = self.get_dummy_components()
+ sd_pipe = StableDiffusion3ControlNetPipeline(**components)
+ sd_pipe = sd_pipe.to(torch_device, dtype=torch.float16)
+ sd_pipe.set_progress_bar_config(disable=None)
+
+ inputs = self.get_dummy_inputs(torch_device)
+ output = sd_pipe(**inputs)
+ image = output.images
+
+ image_slice = image[0, -3:, -3:, -1]
+ assert image.shape == (1, 32, 32, 3)
+
+ expected_slice = np.array(
+ [0.5761719, 0.71777344, 0.59228516, 0.578125, 0.6020508, 0.39453125, 0.46728516, 0.51708984, 0.58984375]
+ )
+
+ assert (
+ np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
+ ), f"Expected: {expected_slice}, got: {image_slice.flatten()}"
+
+
+@slow
+@require_torch_gpu
+class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase):
+ pipeline_class = StableDiffusion3ControlNetPipeline
+
+ def setUp(self):
+ super().setUp()
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ def tearDown(self):
+ super().tearDown()
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ def test_canny(self):
+ controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+ pipe.set_progress_bar_config(disable=None)
+
+ generator = torch.Generator(device="cpu").manual_seed(0)
+ prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image"
+ n_prompt = "NSFW, nude, naked, porn, ugly"
+ control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
+
+ output = pipe(
+ prompt,
+ negative_prompt=n_prompt,
+ control_image=control_image,
+ controlnet_conditioning_scale=0.5,
+ guidance_scale=5.0,
+ num_inference_steps=2,
+ output_type="np",
+ generator=generator,
+ )
+ image = output.images[0]
+
+ assert image.shape == (1024, 1024, 3)
+
+ original_image = image[-3:, -3:, -1].flatten()
+
+ expected_image = np.array(
+ [0.20947266, 0.1574707, 0.19897461, 0.15063477, 0.1418457, 0.17285156, 0.14160156, 0.13989258, 0.30810547]
+ )
+
+ assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
+
+ def test_pose(self):
+ controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Pose", torch_dtype=torch.float16)
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+ pipe.set_progress_bar_config(disable=None)
+
+ generator = torch.Generator(device="cpu").manual_seed(0)
+ prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image'
+ n_prompt = "NSFW, nude, naked, porn, ugly"
+ control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Pose/resolve/main/pose.jpg")
+
+ output = pipe(
+ prompt,
+ negative_prompt=n_prompt,
+ control_image=control_image,
+ controlnet_conditioning_scale=0.5,
+ guidance_scale=5.0,
+ num_inference_steps=2,
+ output_type="np",
+ generator=generator,
+ )
+ image = output.images[0]
+
+ assert image.shape == (1024, 1024, 3)
+
+ original_image = image[-3:, -3:, -1].flatten()
+
+ expected_image = np.array(
+ [0.8671875, 0.86621094, 0.91015625, 0.8491211, 0.87890625, 0.9140625, 0.8300781, 0.8334961, 0.8623047]
+ )
+
+ assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
+
+ def test_tile(self):
+ controlnet = SD3ControlNetModel.from_pretrained("InstantX//SD3-Controlnet-Tile", torch_dtype=torch.float16)
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+ pipe.set_progress_bar_config(disable=None)
+
+ generator = torch.Generator(device="cpu").manual_seed(0)
+ prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image'
+ n_prompt = "NSFW, nude, naked, porn, ugly"
+ control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Tile/resolve/main/tile.jpg")
+
+ output = pipe(
+ prompt,
+ negative_prompt=n_prompt,
+ control_image=control_image,
+ controlnet_conditioning_scale=0.5,
+ guidance_scale=5.0,
+ num_inference_steps=2,
+ output_type="np",
+ generator=generator,
+ )
+ image = output.images[0]
+
+ assert image.shape == (1024, 1024, 3)
+
+ original_image = image[-3:, -3:, -1].flatten()
+
+ expected_image = np.array(
+ [0.6982422, 0.7011719, 0.65771484, 0.6904297, 0.7416992, 0.6904297, 0.6977539, 0.7080078, 0.6386719]
+ )
+
+ assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
+
+ def test_multi_controlnet(self):
+ controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16)
+ controlnet = SD3MultiControlNetModel([controlnet, controlnet])
+
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16
+ )
+ pipe.enable_model_cpu_offload()
+ pipe.set_progress_bar_config(disable=None)
+
+ generator = torch.Generator(device="cpu").manual_seed(0)
+ prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image"
+ n_prompt = "NSFW, nude, naked, porn, ugly"
+ control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg")
+
+ output = pipe(
+ prompt,
+ negative_prompt=n_prompt,
+ control_image=[control_image, control_image],
+ controlnet_conditioning_scale=[0.25, 0.25],
+ guidance_scale=5.0,
+ num_inference_steps=2,
+ output_type="np",
+ generator=generator,
+ )
+ image = output.images[0]
+
+ assert image.shape == (1024, 1024, 3)
+
+ original_image = image[-3:, -3:, -1].flatten()
+ expected_image = np.array(
+ [0.7451172, 0.7416992, 0.7158203, 0.7792969, 0.7607422, 0.7089844, 0.6855469, 0.71777344, 0.7314453]
+ )
+
+ assert np.abs(original_image.flatten() - expected_image).max() < 1e-2
diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
index 3e4477324f7b..f000a957e12f 100644
--- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
+++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
@@ -36,7 +36,6 @@
StableDiffusionInpaintPipeline,
UNet2DConditionModel,
)
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
@@ -1151,530 +1150,3 @@ def test_inpaint_dpm(self):
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
-
-
-class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase):
- def test_pil_inputs(self):
- height, width = 32, 32
- im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
- im = Image.fromarray(im)
- mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5
- mask = Image.fromarray((mask * 255).astype(np.uint8))
-
- t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True)
-
- self.assertTrue(isinstance(t_mask, torch.Tensor))
- self.assertTrue(isinstance(t_masked, torch.Tensor))
- self.assertTrue(isinstance(t_image, torch.Tensor))
-
- self.assertEqual(t_mask.ndim, 4)
- self.assertEqual(t_masked.ndim, 4)
- self.assertEqual(t_image.ndim, 4)
-
- self.assertEqual(t_mask.shape, (1, 1, height, width))
- self.assertEqual(t_masked.shape, (1, 3, height, width))
- self.assertEqual(t_image.shape, (1, 3, height, width))
-
- self.assertTrue(t_mask.dtype == torch.float32)
- self.assertTrue(t_masked.dtype == torch.float32)
- self.assertTrue(t_image.dtype == torch.float32)
-
- self.assertTrue(t_mask.min() >= 0.0)
- self.assertTrue(t_mask.max() <= 1.0)
- self.assertTrue(t_masked.min() >= -1.0)
- self.assertTrue(t_masked.min() <= 1.0)
- self.assertTrue(t_image.min() >= -1.0)
- self.assertTrue(t_image.min() >= -1.0)
-
- self.assertTrue(t_mask.sum() > 0.0)
-
- def test_np_inputs(self):
- height, width = 32, 32
-
- im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
- im_pil = Image.fromarray(im_np)
- mask_np = (
- np.random.randint(
- 0,
- 255,
- (
- height,
- width,
- ),
- dtype=np.uint8,
- )
- > 127.5
- )
- mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8))
-
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
- t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image(
- im_pil, mask_pil, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_np == t_mask_pil).all())
- self.assertTrue((t_masked_np == t_masked_pil).all())
- self.assertTrue((t_image_np == t_image_pil).all())
-
- def test_torch_3D_2D_inputs(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
- im_np = im_tensor.numpy().transpose(1, 2, 0)
- mask_np = mask_tensor.numpy()
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_3D_3D_inputs(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- 1,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
- im_np = im_tensor.numpy().transpose(1, 2, 0)
- mask_np = mask_tensor.numpy()[0]
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_4D_2D_inputs(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 1,
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
- im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
- mask_np = mask_tensor.numpy()
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_4D_3D_inputs(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 1,
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- 1,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
- im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
- mask_np = mask_tensor.numpy()[0]
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_4D_4D_inputs(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 1,
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- 1,
- 1,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
- im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
- mask_np = mask_tensor.numpy()[0][0]
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
- im_np, mask_np, height, width, return_image=True
- )
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_batch_4D_3D(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 2,
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- 2,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
-
- im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor]
- mask_nps = [mask.numpy() for mask in mask_tensor]
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)]
- t_mask_np = torch.cat([n[0] for n in nps])
- t_masked_np = torch.cat([n[1] for n in nps])
- t_image_np = torch.cat([n[2] for n in nps])
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_torch_batch_4D_4D(self):
- height, width = 32, 32
-
- im_tensor = torch.randint(
- 0,
- 255,
- (
- 2,
- 3,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- mask_tensor = (
- torch.randint(
- 0,
- 255,
- (
- 2,
- 1,
- height,
- width,
- ),
- dtype=torch.uint8,
- )
- > 127.5
- )
-
- im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor]
- mask_nps = [mask.numpy()[0] for mask in mask_tensor]
-
- t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
- im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
- )
- nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)]
- t_mask_np = torch.cat([n[0] for n in nps])
- t_masked_np = torch.cat([n[1] for n in nps])
- t_image_np = torch.cat([n[2] for n in nps])
-
- self.assertTrue((t_mask_tensor == t_mask_np).all())
- self.assertTrue((t_masked_tensor == t_masked_np).all())
- self.assertTrue((t_image_tensor == t_image_np).all())
-
- def test_shape_mismatch(self):
- height, width = 32, 32
-
- # test height and width
- with self.assertRaises(AssertionError):
- prepare_mask_and_masked_image(
- torch.randn(
- 3,
- height,
- width,
- ),
- torch.randn(64, 64),
- height,
- width,
- return_image=True,
- )
- # test batch dim
- with self.assertRaises(AssertionError):
- prepare_mask_and_masked_image(
- torch.randn(
- 2,
- 3,
- height,
- width,
- ),
- torch.randn(4, 64, 64),
- height,
- width,
- return_image=True,
- )
- # test batch dim
- with self.assertRaises(AssertionError):
- prepare_mask_and_masked_image(
- torch.randn(
- 2,
- 3,
- height,
- width,
- ),
- torch.randn(4, 1, 64, 64),
- height,
- width,
- return_image=True,
- )
-
- def test_type_mismatch(self):
- height, width = 32, 32
-
- # test tensors-only
- with self.assertRaises(TypeError):
- prepare_mask_and_masked_image(
- torch.rand(
- 3,
- height,
- width,
- ),
- torch.rand(
- 3,
- height,
- width,
- ).numpy(),
- height,
- width,
- return_image=True,
- )
- # test tensors-only
- with self.assertRaises(TypeError):
- prepare_mask_and_masked_image(
- torch.rand(
- 3,
- height,
- width,
- ).numpy(),
- torch.rand(
- 3,
- height,
- width,
- ),
- height,
- width,
- return_image=True,
- )
-
- def test_channels_first(self):
- height, width = 32, 32
-
- # test channels first for 3D tensors
- with self.assertRaises(AssertionError):
- prepare_mask_and_masked_image(
- torch.rand(height, width, 3),
- torch.rand(
- 3,
- height,
- width,
- ),
- height,
- width,
- return_image=True,
- )
-
- def test_tensor_range(self):
- height, width = 32, 32
-
- # test im <= 1
- with self.assertRaises(ValueError):
- prepare_mask_and_masked_image(
- torch.ones(
- 3,
- height,
- width,
- )
- * 2,
- torch.rand(
- height,
- width,
- ),
- height,
- width,
- return_image=True,
- )
- # test im >= -1
- with self.assertRaises(ValueError):
- prepare_mask_and_masked_image(
- torch.ones(
- 3,
- height,
- width,
- )
- * (-2),
- torch.rand(
- height,
- width,
- ),
- height,
- width,
- return_image=True,
- )
- # test mask <= 1
- with self.assertRaises(ValueError):
- prepare_mask_and_masked_image(
- torch.rand(
- 3,
- height,
- width,
- ),
- torch.ones(
- height,
- width,
- )
- * 2,
- height,
- width,
- return_image=True,
- )
- # test mask >= 0
- with self.assertRaises(ValueError):
- prepare_mask_and_masked_image(
- torch.rand(
- 3,
- height,
- width,
- ),
- torch.ones(
- height,
- width,
- )
- * -1,
- height,
- width,
- return_image=True,
- )