Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
279ba5a
·
verified ·
1 Parent(s): 899f5a8

Upload folder using huggingface_hub

Browse files
main/pipeline_controlnet_xl_kolors.py ADDED
@@ -0,0 +1,1355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPVisionModelWithProjection,
26
+ )
27
+
28
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
29
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
+ from diffusers.loaders import (
31
+ FromSingleFileMixin,
32
+ IPAdapterMixin,
33
+ StableDiffusionXLLoraLoaderMixin,
34
+ TextualInversionLoaderMixin,
35
+ )
36
+ from diffusers.models import (
37
+ AutoencoderKL,
38
+ ControlNetModel,
39
+ ImageProjection,
40
+ MultiControlNetModel,
41
+ UNet2DConditionModel,
42
+ )
43
+ from diffusers.models.attention_processor import (
44
+ AttnProcessor2_0,
45
+ XFormersAttnProcessor,
46
+ )
47
+ from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
48
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
50
+ from diffusers.schedulers import KarrasDiffusionSchedulers
51
+ from diffusers.utils import (
52
+ deprecate,
53
+ is_invisible_watermark_available,
54
+ logging,
55
+ replace_example_docstring,
56
+ )
57
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
58
+
59
+
60
+ if is_invisible_watermark_available():
61
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
62
+
63
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
64
+
65
+
66
+ EXAMPLE_DOC_STRING = """
67
+ Examples:
68
+ ```py
69
+ >>> import torch
70
+ >>> from diffusers import KolorsControlNetPipeline, ControlNetModel
71
+ >>> from diffusers.utils import load_image
72
+ >>> import numpy as np
73
+ >>> import cv2
74
+ >>> from PIL import Image
75
+
76
+ >>> prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
77
+ >>> negative_prompt = "low quality, bad quality, sketches"
78
+
79
+ >>> # download an image
80
+ >>> image = load_image(
81
+ ... "https://hf.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png"
82
+ ... )
83
+
84
+ >>> # initialize the models and pipeline
85
+ >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
86
+ >>> controlnet = ControlNetModel.from_pretrained(
87
+ ... "Kwai-Kolors/Kolors-ControlNet-Canny",
88
+ ... use_safetensors=True,
89
+ ... torch_dtype=torch.float16
90
+ ... )
91
+
92
+ >>> pipe = KolorsControlNetPipeline.from_pretrained(
93
+ ... "Kwai-Kolors/Kolors-diffusers",
94
+ ... controlnet=controlnet,
95
+ ... variant="fp16",
96
+ ... use_safetensors=True,
97
+ ... torch_dtype=torch.float16
98
+ ... )
99
+ >>> pipe.enable_model_cpu_offload()
100
+
101
+ >>> # get canny image
102
+ >>> image = np.array(image)
103
+ >>> image = cv2.Canny(image, 100, 200)
104
+ >>> image = image[:, :, None]
105
+ >>> image = np.concatenate([image, image, image], axis=2)
106
+ >>> canny_image = Image.fromarray(image)
107
+
108
+ >>> # generate image
109
+ >>> image = pipe(
110
+ ... prompt, controlnet_conditioning_scale=controlnet_conditioning_scale, image=canny_image
111
+ ... ).images[0]
112
+ ```
113
+ """
114
+
115
+
116
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
117
+ def retrieve_latents(
118
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
119
+ ):
120
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
121
+ return encoder_output.latent_dist.sample(generator)
122
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
123
+ return encoder_output.latent_dist.mode()
124
+ elif hasattr(encoder_output, "latents"):
125
+ return encoder_output.latents
126
+ else:
127
+ raise AttributeError("Could not access latents of provided encoder_output")
128
+
129
+
130
+ class KolorsControlNetPipeline(
131
+ DiffusionPipeline,
132
+ StableDiffusionMixin,
133
+ StableDiffusionXLLoraLoaderMixin,
134
+ FromSingleFileMixin,
135
+ IPAdapterMixin,
136
+ ):
137
+ r"""
138
+ Pipeline for image-to-image generation using Kolors with ControlNet guidance.
139
+
140
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
141
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
142
+
143
+ The pipeline also inherits the following loading methods:
144
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
145
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
146
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
147
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
148
+
149
+ Args:
150
+ vae ([`AutoencoderKL`]):
151
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
152
+ text_encoder ([`ChatGLMModel`]):
153
+ Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
154
+ tokenizer (`ChatGLMTokenizer`):
155
+ Tokenizer of class
156
+ [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
157
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
158
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
159
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
160
+ as a list, the outputs from each ControlNet are added together to create one combined additional
161
+ conditioning.
162
+ scheduler ([`SchedulerMixin`]):
163
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
164
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
165
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
166
+ Whether the `unet` requires an `aesthetic_score` condition to be passed during inference.
167
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
168
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
169
+ `Kwai-Kolors/Kolors-diffusers`.
170
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
171
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
172
+ """
173
+
174
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
175
+
176
+ _optional_components = [
177
+ "tokenizer",
178
+ "text_encoder",
179
+ "feature_extractor",
180
+ "image_encoder",
181
+ ]
182
+ _callback_tensor_inputs = [
183
+ "latents",
184
+ "prompt_embeds",
185
+ "negative_prompt_embeds",
186
+ "add_text_embeds",
187
+ "add_time_ids",
188
+ "negative_pooled_prompt_embeds",
189
+ "negative_add_time_ids",
190
+ "image",
191
+ ]
192
+
193
+ def __init__(
194
+ self,
195
+ vae: AutoencoderKL,
196
+ text_encoder: ChatGLMModel,
197
+ tokenizer: ChatGLMTokenizer,
198
+ unet: UNet2DConditionModel,
199
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
200
+ scheduler: KarrasDiffusionSchedulers,
201
+ requires_aesthetics_score: bool = False,
202
+ force_zeros_for_empty_prompt: bool = True,
203
+ feature_extractor: CLIPImageProcessor = None,
204
+ image_encoder: CLIPVisionModelWithProjection = None,
205
+ add_watermarker: Optional[bool] = None,
206
+ ):
207
+ super().__init__()
208
+
209
+ if isinstance(controlnet, (list, tuple)):
210
+ controlnet = MultiControlNetModel(controlnet)
211
+
212
+ self.register_modules(
213
+ vae=vae,
214
+ text_encoder=text_encoder,
215
+ tokenizer=tokenizer,
216
+ unet=unet,
217
+ controlnet=controlnet,
218
+ scheduler=scheduler,
219
+ feature_extractor=feature_extractor,
220
+ image_encoder=image_encoder,
221
+ )
222
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
223
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
224
+ self.control_image_processor = VaeImageProcessor(
225
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
226
+ )
227
+
228
+ if add_watermarker:
229
+ self.watermark = StableDiffusionXLWatermarker()
230
+ else:
231
+ self.watermark = None
232
+
233
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
234
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
235
+
236
+ def encode_prompt(
237
+ self,
238
+ prompt,
239
+ device: Optional[torch.device] = None,
240
+ num_images_per_prompt: int = 1,
241
+ do_classifier_free_guidance: bool = True,
242
+ negative_prompt=None,
243
+ prompt_embeds: Optional[torch.FloatTensor] = None,
244
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
245
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
246
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
247
+ lora_scale: Optional[float] = None,
248
+ ):
249
+ r"""
250
+ Encodes the prompt into text encoder hidden states.
251
+
252
+ Args:
253
+ prompt (`str` or `List[str]`, *optional*):
254
+ prompt to be encoded
255
+ device: (`torch.device`):
256
+ torch device
257
+ num_images_per_prompt (`int`):
258
+ number of images that should be generated per prompt
259
+ do_classifier_free_guidance (`bool`):
260
+ whether to use classifier free guidance or not
261
+ negative_prompt (`str` or `List[str]`, *optional*):
262
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
263
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
264
+ less than `1`).
265
+ prompt_embeds (`torch.FloatTensor`, *optional*):
266
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
267
+ provided, text embeddings will be generated from `prompt` input argument.
268
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
269
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
270
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
271
+ argument.
272
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
273
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
274
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
275
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
276
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
277
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
278
+ input argument.
279
+ lora_scale (`float`, *optional*):
280
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
281
+ """
282
+ device = device or self._execution_device
283
+
284
+ # set lora scale so that monkey patched LoRA
285
+ # function of text encoder can correctly access it
286
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
287
+ self._lora_scale = lora_scale
288
+
289
+ if prompt is not None and isinstance(prompt, str):
290
+ batch_size = 1
291
+ elif prompt is not None and isinstance(prompt, list):
292
+ batch_size = len(prompt)
293
+ else:
294
+ batch_size = prompt_embeds.shape[0]
295
+
296
+ # Define tokenizers and text encoders
297
+ tokenizers = [self.tokenizer]
298
+ text_encoders = [self.text_encoder]
299
+
300
+ if prompt_embeds is None:
301
+ # textual inversion: procecss multi-vector tokens if necessary
302
+ prompt_embeds_list = []
303
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
304
+ if isinstance(self, TextualInversionLoaderMixin):
305
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
306
+
307
+ text_inputs = tokenizer(
308
+ prompt,
309
+ padding="max_length",
310
+ max_length=256,
311
+ truncation=True,
312
+ return_tensors="pt",
313
+ ).to(self._execution_device)
314
+ output = text_encoder(
315
+ input_ids=text_inputs["input_ids"],
316
+ attention_mask=text_inputs["attention_mask"],
317
+ position_ids=text_inputs["position_ids"],
318
+ output_hidden_states=True,
319
+ )
320
+ prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
321
+ pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
322
+ bs_embed, seq_len, _ = prompt_embeds.shape
323
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
324
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
325
+
326
+ prompt_embeds_list.append(prompt_embeds)
327
+
328
+ prompt_embeds = prompt_embeds_list[0]
329
+
330
+ # get unconditional embeddings for classifier free guidance
331
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
332
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
333
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
334
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
335
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
336
+ uncond_tokens: List[str]
337
+ if negative_prompt is None:
338
+ uncond_tokens = [""] * batch_size
339
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
340
+ raise TypeError(
341
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
342
+ f" {type(prompt)}."
343
+ )
344
+ elif isinstance(negative_prompt, str):
345
+ uncond_tokens = [negative_prompt]
346
+ elif batch_size != len(negative_prompt):
347
+ raise ValueError(
348
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
349
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
350
+ " the batch size of `prompt`."
351
+ )
352
+ else:
353
+ uncond_tokens = negative_prompt
354
+
355
+ negative_prompt_embeds_list = []
356
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
357
+ # textual inversion: procecss multi-vector tokens if necessary
358
+ if isinstance(self, TextualInversionLoaderMixin):
359
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
360
+
361
+ max_length = prompt_embeds.shape[1]
362
+ uncond_input = tokenizer(
363
+ uncond_tokens,
364
+ padding="max_length",
365
+ max_length=max_length,
366
+ truncation=True,
367
+ return_tensors="pt",
368
+ ).to(self._execution_device)
369
+ output = text_encoder(
370
+ input_ids=uncond_input["input_ids"],
371
+ attention_mask=uncond_input["attention_mask"],
372
+ position_ids=uncond_input["position_ids"],
373
+ output_hidden_states=True,
374
+ )
375
+ negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
376
+ negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
377
+
378
+ if do_classifier_free_guidance:
379
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
380
+ seq_len = negative_prompt_embeds.shape[1]
381
+
382
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
383
+
384
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
385
+ negative_prompt_embeds = negative_prompt_embeds.view(
386
+ batch_size * num_images_per_prompt, seq_len, -1
387
+ )
388
+
389
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
390
+
391
+ negative_prompt_embeds = negative_prompt_embeds_list[0]
392
+
393
+ bs_embed = pooled_prompt_embeds.shape[0]
394
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
395
+ bs_embed * num_images_per_prompt, -1
396
+ )
397
+ if do_classifier_free_guidance:
398
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
399
+ bs_embed * num_images_per_prompt, -1
400
+ )
401
+
402
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
403
+
404
+ def prepare_ip_adapter_image_embeds(
405
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
406
+ ):
407
+ image_embeds = []
408
+ if do_classifier_free_guidance:
409
+ negative_image_embeds = []
410
+ if ip_adapter_image_embeds is None:
411
+ if not isinstance(ip_adapter_image, list):
412
+ ip_adapter_image = [ip_adapter_image]
413
+
414
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
415
+ raise ValueError(
416
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. "
417
+ f"Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
418
+ )
419
+
420
+ for single_ip_adapter_image, image_proj_layer in zip(
421
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
422
+ ):
423
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
424
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
425
+ single_ip_adapter_image, device, 1, output_hidden_state
426
+ )
427
+
428
+ image_embeds.append(single_image_embeds[None, :])
429
+ if do_classifier_free_guidance:
430
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
431
+ else:
432
+ for single_image_embeds in ip_adapter_image_embeds:
433
+ if do_classifier_free_guidance:
434
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
435
+
436
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
437
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
438
+ dtype = next(self.image_encoder.parameters()).dtype
439
+
440
+ if not isinstance(image, torch.Tensor):
441
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
442
+
443
+ image = image.to(device=device, dtype=dtype)
444
+ if output_hidden_states:
445
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
446
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
447
+ uncond_image_enc_hidden_states = self.image_encoder(
448
+ torch.zeros_like(image), output_hidden_states=True
449
+ ).hidden_states[-2]
450
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
451
+ num_images_per_prompt, dim=0
452
+ )
453
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
454
+ else:
455
+ image_embeds = self.image_encoder(image).image_embeds
456
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
457
+ uncond_image_embeds = torch.zeros_like(image_embeds)
458
+
459
+ return image_embeds, uncond_image_embeds
460
+
461
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
462
+ def prepare_extra_step_kwargs(self, generator, eta):
463
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
464
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
465
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
466
+ # and should be between [0, 1]
467
+
468
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
469
+ extra_step_kwargs = {}
470
+ if accepts_eta:
471
+ extra_step_kwargs["eta"] = eta
472
+
473
+ # check if the scheduler accepts generator
474
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
475
+ if accepts_generator:
476
+ extra_step_kwargs["generator"] = generator
477
+ return extra_step_kwargs
478
+
479
+ def check_inputs(
480
+ self,
481
+ prompt,
482
+ image,
483
+ num_inference_steps,
484
+ callback_steps,
485
+ negative_prompt=None,
486
+ prompt_embeds=None,
487
+ negative_prompt_embeds=None,
488
+ pooled_prompt_embeds=None,
489
+ negative_pooled_prompt_embeds=None,
490
+ ip_adapter_image=None,
491
+ ip_adapter_image_embeds=None,
492
+ controlnet_conditioning_scale=1.0,
493
+ control_guidance_start=0.0,
494
+ control_guidance_end=1.0,
495
+ callback_on_step_end_tensor_inputs=None,
496
+ ):
497
+ if num_inference_steps is None:
498
+ raise ValueError("`num_inference_steps` cannot be None.")
499
+ elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
500
+ raise ValueError(
501
+ f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
502
+ f" {type(num_inference_steps)}."
503
+ )
504
+
505
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
506
+ raise ValueError(
507
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
508
+ f" {type(callback_steps)}."
509
+ )
510
+
511
+ if callback_on_step_end_tensor_inputs is not None and not all(
512
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
513
+ ):
514
+ raise ValueError(
515
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
516
+ )
517
+
518
+ if prompt is not None and prompt_embeds is not None:
519
+ raise ValueError(
520
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
521
+ " only forward one of the two."
522
+ )
523
+ elif prompt is None and prompt_embeds is None:
524
+ raise ValueError(
525
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
526
+ )
527
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
528
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
529
+
530
+ if negative_prompt is not None and negative_prompt_embeds is not None:
531
+ raise ValueError(
532
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
533
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
534
+ )
535
+
536
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
537
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
538
+ raise ValueError(
539
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
540
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
541
+ f" {negative_prompt_embeds.shape}."
542
+ )
543
+
544
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
545
+ raise ValueError(
546
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
547
+ )
548
+
549
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
550
+ raise ValueError(
551
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
552
+ )
553
+
554
+ # `prompt` needs more sophisticated handling when there are multiple
555
+ # conditionings.
556
+ if isinstance(self.controlnet, MultiControlNetModel):
557
+ if isinstance(prompt, list):
558
+ logger.warning(
559
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
560
+ " prompts. The conditionings will be fixed across the prompts."
561
+ )
562
+
563
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
564
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
565
+ )
566
+
567
+ # Check `controlnet_conditioning_scale`
568
+ if (
569
+ isinstance(self.controlnet, ControlNetModel)
570
+ or is_compiled
571
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
572
+ ):
573
+ if not isinstance(controlnet_conditioning_scale, float):
574
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
575
+ elif (
576
+ isinstance(self.controlnet, MultiControlNetModel)
577
+ or is_compiled
578
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
579
+ ):
580
+ if isinstance(controlnet_conditioning_scale, list):
581
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
582
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
583
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
584
+ self.controlnet.nets
585
+ ):
586
+ raise ValueError(
587
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
588
+ " the same length as the number of controlnets"
589
+ )
590
+ else:
591
+ assert False
592
+
593
+ if not isinstance(control_guidance_start, (tuple, list)):
594
+ control_guidance_start = [control_guidance_start]
595
+
596
+ if not isinstance(control_guidance_end, (tuple, list)):
597
+ control_guidance_end = [control_guidance_end]
598
+
599
+ if len(control_guidance_start) != len(control_guidance_end):
600
+ raise ValueError(
601
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
602
+ )
603
+
604
+ if isinstance(self.controlnet, MultiControlNetModel):
605
+ if len(control_guidance_start) != len(self.controlnet.nets):
606
+ raise ValueError(
607
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
608
+ )
609
+
610
+ for start, end in zip(control_guidance_start, control_guidance_end):
611
+ if start >= end:
612
+ raise ValueError(
613
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
614
+ )
615
+ if start < 0.0:
616
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
617
+ if end > 1.0:
618
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
619
+
620
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
621
+ raise ValueError(
622
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
623
+ )
624
+
625
+ if ip_adapter_image_embeds is not None:
626
+ if not isinstance(ip_adapter_image_embeds, list):
627
+ raise ValueError(
628
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
629
+ )
630
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
631
+ raise ValueError(
632
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
633
+ )
634
+
635
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
636
+ def check_image(self, image, prompt, prompt_embeds):
637
+ image_is_pil = isinstance(image, PIL.Image.Image)
638
+ image_is_tensor = isinstance(image, torch.Tensor)
639
+ image_is_np = isinstance(image, np.ndarray)
640
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
641
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
642
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
643
+
644
+ if (
645
+ not image_is_pil
646
+ and not image_is_tensor
647
+ and not image_is_np
648
+ and not image_is_pil_list
649
+ and not image_is_tensor_list
650
+ and not image_is_np_list
651
+ ):
652
+ raise TypeError(
653
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
654
+ )
655
+
656
+ if image_is_pil:
657
+ image_batch_size = 1
658
+ else:
659
+ image_batch_size = len(image)
660
+
661
+ if prompt is not None and isinstance(prompt, str):
662
+ prompt_batch_size = 1
663
+ elif prompt is not None and isinstance(prompt, list):
664
+ prompt_batch_size = len(prompt)
665
+ elif prompt_embeds is not None:
666
+ prompt_batch_size = prompt_embeds.shape[0]
667
+
668
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
669
+ raise ValueError(
670
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
671
+ )
672
+
673
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
674
+ def prepare_control_image(
675
+ self,
676
+ image,
677
+ width,
678
+ height,
679
+ batch_size,
680
+ num_images_per_prompt,
681
+ device,
682
+ dtype,
683
+ do_classifier_free_guidance=False,
684
+ guess_mode=False,
685
+ ):
686
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
687
+ image_batch_size = image.shape[0]
688
+
689
+ if image_batch_size == 1:
690
+ repeat_by = batch_size
691
+ else:
692
+ # image batch size is the same as prompt batch size
693
+ repeat_by = num_images_per_prompt
694
+
695
+ image = image.repeat_interleave(repeat_by, dim=0)
696
+
697
+ image = image.to(device=device, dtype=dtype)
698
+
699
+ if do_classifier_free_guidance and not guess_mode:
700
+ image = torch.cat([image] * 2)
701
+
702
+ return image
703
+
704
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
705
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
706
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
707
+ if isinstance(generator, list) and len(generator) != batch_size:
708
+ raise ValueError(
709
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
710
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
711
+ )
712
+
713
+ if latents is None:
714
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
715
+ else:
716
+ latents = latents.to(device)
717
+
718
+ # scale the initial noise by the standard deviation required by the scheduler
719
+ latents = latents * self.scheduler.init_noise_sigma
720
+ return latents
721
+
722
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
723
+ def prepare_latents_t2i(
724
+ self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
725
+ ):
726
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
727
+ if isinstance(generator, list) and len(generator) != batch_size:
728
+ raise ValueError(
729
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
730
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
731
+ )
732
+
733
+ if latents is None:
734
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
735
+ else:
736
+ latents = latents.to(device)
737
+
738
+ # scale the initial noise by the standard deviation required by the scheduler
739
+ latents = latents * self.scheduler.init_noise_sigma
740
+ return latents
741
+
742
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
743
+ def _get_add_time_ids(
744
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
745
+ ):
746
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
747
+
748
+ passed_add_embed_dim = (
749
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
750
+ )
751
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
752
+
753
+ if expected_add_embed_dim != passed_add_embed_dim:
754
+ raise ValueError(
755
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
756
+ )
757
+
758
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
759
+ return add_time_ids
760
+
761
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
762
+ def upcast_vae(self):
763
+ dtype = self.vae.dtype
764
+ self.vae.to(dtype=torch.float32)
765
+ use_torch_2_0_or_xformers = isinstance(
766
+ self.vae.decoder.mid_block.attentions[0].processor,
767
+ (
768
+ AttnProcessor2_0,
769
+ XFormersAttnProcessor,
770
+ ),
771
+ )
772
+ # if xformers or torch_2_0 is used attention block does not need
773
+ # to be in float32 which can save lots of memory
774
+ if use_torch_2_0_or_xformers:
775
+ self.vae.post_quant_conv.to(dtype)
776
+ self.vae.decoder.conv_in.to(dtype)
777
+ self.vae.decoder.mid_block.to(dtype)
778
+
779
+ @property
780
+ def guidance_scale(self):
781
+ return self._guidance_scale
782
+
783
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
784
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
785
+ # corresponds to doing no classifier free guidance.
786
+ @property
787
+ def do_classifier_free_guidance(self):
788
+ return self._guidance_scale > 1
789
+
790
+ @property
791
+ def cross_attention_kwargs(self):
792
+ return self._cross_attention_kwargs
793
+
794
+ @property
795
+ def num_timesteps(self):
796
+ return self._num_timesteps
797
+
798
+ @torch.no_grad()
799
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
800
+ def __call__(
801
+ self,
802
+ prompt: Union[str, List[str]] = None,
803
+ image: PipelineImageInput = None,
804
+ height: Optional[int] = None,
805
+ width: Optional[int] = None,
806
+ num_inference_steps: int = 50,
807
+ guidance_scale: float = 5.0,
808
+ negative_prompt: Optional[Union[str, List[str]]] = None,
809
+ num_images_per_prompt: Optional[int] = 1,
810
+ eta: float = 0.0,
811
+ guess_mode: bool = False,
812
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
813
+ latents: Optional[torch.Tensor] = None,
814
+ prompt_embeds: Optional[torch.Tensor] = None,
815
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
816
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
817
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
818
+ ip_adapter_image: Optional[PipelineImageInput] = None,
819
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
820
+ output_type: Optional[str] = "pil",
821
+ return_dict: bool = True,
822
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
823
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
824
+ control_guidance_start: Union[float, List[float]] = 0.0,
825
+ control_guidance_end: Union[float, List[float]] = 1.0,
826
+ original_size: Tuple[int, int] = None,
827
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
828
+ target_size: Tuple[int, int] = None,
829
+ negative_original_size: Optional[Tuple[int, int]] = None,
830
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
831
+ negative_target_size: Optional[Tuple[int, int]] = None,
832
+ callback_on_step_end: Optional[
833
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
834
+ ] = None,
835
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
836
+ **kwargs,
837
+ ):
838
+ r"""
839
+ Function invoked when calling the pipeline for generation.
840
+
841
+ Args:
842
+ prompt (`str` or `List[str]`, *optional*):
843
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
844
+ instead.
845
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
846
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
847
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
848
+ the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
849
+ be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
850
+ and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
851
+ init, images must be passed as a list such that each element of the list can be correctly batched for
852
+ input to a single controlnet.
853
+ height (`int`, *optional*, defaults to the size of image):
854
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
855
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
856
+ and checkpoints that are not specifically fine-tuned on low resolutions.
857
+ width (`int`, *optional*, defaults to the size of image):
858
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
859
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
860
+ and checkpoints that are not specifically fine-tuned on low resolutions.
861
+ strength (`float`, *optional*, defaults to 0.8):
862
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
863
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
864
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
865
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
866
+ essentially ignores `image`.
867
+ num_inference_steps (`int`, *optional*, defaults to 50):
868
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
869
+ expense of slower inference.
870
+ guidance_scale (`float`, *optional*, defaults to 7.5):
871
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
872
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
873
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
874
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
875
+ usually at the expense of lower image quality.
876
+ negative_prompt (`str` or `List[str]`, *optional*):
877
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
878
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
879
+ less than `1`).
880
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
881
+ The number of images to generate per prompt.
882
+ eta (`float`, *optional*, defaults to 0.0):
883
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
884
+ [`schedulers.DDIMScheduler`], will be ignored for others.
885
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
886
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
887
+ to make generation deterministic.
888
+ latents (`torch.Tensor`, *optional*):
889
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
890
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
891
+ tensor will ge generated by sampling using the supplied random `generator`.
892
+ prompt_embeds (`torch.Tensor`, *optional*):
893
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
894
+ provided, text embeddings will be generated from `prompt` input argument.
895
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
896
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
897
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
898
+ argument.
899
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
900
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
901
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
902
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
903
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
904
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
905
+ input argument.
906
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
907
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
908
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
909
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
910
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
911
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
912
+ output_type (`str`, *optional*, defaults to `"pil"`):
913
+ The output format of the generate image. Choose between
914
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
915
+ return_dict (`bool`, *optional*, defaults to `True`):
916
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
917
+ plain tuple.
918
+ cross_attention_kwargs (`dict`, *optional*):
919
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
920
+ `self.processor` in
921
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
922
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
923
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
924
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
925
+ corresponding scale as a list.
926
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
927
+ The percentage of total steps at which the controlnet starts applying.
928
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
929
+ The percentage of total steps at which the controlnet stops applying.
930
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
931
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
932
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
933
+ explained in section 2.2 of
934
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
935
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
936
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
937
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
938
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
939
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
940
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
941
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
942
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
943
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
944
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
945
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
946
+ micro-conditioning as explained in section 2.2 of
947
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
948
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
949
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
950
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
951
+ micro-conditioning as explained in section 2.2 of
952
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
953
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
954
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
955
+ To negatively condition the generation process based on a target image resolution. It should be as same
956
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
957
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
958
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
959
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
960
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
961
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
962
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
963
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
964
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
965
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
966
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
967
+ `._callback_tensor_inputs` attribute of your pipeline class.
968
+
969
+ Examples:
970
+
971
+ Returns:
972
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
973
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
974
+ containing the output images.
975
+ """
976
+
977
+ callback = kwargs.pop("callback", None)
978
+ callback_steps = kwargs.pop("callback_steps", None)
979
+
980
+ if callback is not None:
981
+ deprecate(
982
+ "callback",
983
+ "1.0.0",
984
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
985
+ )
986
+ if callback_steps is not None:
987
+ deprecate(
988
+ "callback_steps",
989
+ "1.0.0",
990
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
991
+ )
992
+
993
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
994
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
995
+
996
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
997
+
998
+ # align format for control guidance
999
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1000
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1001
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1002
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1003
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1004
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1005
+ control_guidance_start, control_guidance_end = (
1006
+ mult * [control_guidance_start],
1007
+ mult * [control_guidance_end],
1008
+ )
1009
+
1010
+ # from IPython import embed; embed()
1011
+ # 1. Check inputs. Raise error if not correct
1012
+ self.check_inputs(
1013
+ prompt,
1014
+ image,
1015
+ num_inference_steps,
1016
+ callback_steps,
1017
+ negative_prompt,
1018
+ prompt_embeds,
1019
+ negative_prompt_embeds,
1020
+ pooled_prompt_embeds,
1021
+ negative_pooled_prompt_embeds,
1022
+ ip_adapter_image,
1023
+ ip_adapter_image_embeds,
1024
+ controlnet_conditioning_scale,
1025
+ control_guidance_start,
1026
+ control_guidance_end,
1027
+ callback_on_step_end_tensor_inputs,
1028
+ )
1029
+
1030
+ self._guidance_scale = guidance_scale
1031
+ self._cross_attention_kwargs = cross_attention_kwargs
1032
+
1033
+ # 2. Define call parameters
1034
+ if prompt is not None and isinstance(prompt, str):
1035
+ batch_size = 1
1036
+ elif prompt is not None and isinstance(prompt, list):
1037
+ batch_size = len(prompt)
1038
+ else:
1039
+ batch_size = prompt_embeds.shape[0]
1040
+
1041
+ device = self._execution_device
1042
+
1043
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1044
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1045
+
1046
+ # 3.1. Encode input prompt
1047
+ text_encoder_lora_scale = (
1048
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1049
+ )
1050
+ (
1051
+ prompt_embeds,
1052
+ negative_prompt_embeds,
1053
+ pooled_prompt_embeds,
1054
+ negative_pooled_prompt_embeds,
1055
+ ) = self.encode_prompt(
1056
+ prompt,
1057
+ device,
1058
+ num_images_per_prompt,
1059
+ self.do_classifier_free_guidance,
1060
+ negative_prompt,
1061
+ prompt_embeds=prompt_embeds,
1062
+ negative_prompt_embeds=negative_prompt_embeds,
1063
+ pooled_prompt_embeds=pooled_prompt_embeds,
1064
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1065
+ lora_scale=text_encoder_lora_scale,
1066
+ )
1067
+
1068
+ # 3.2 Encode ip_adapter_image
1069
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1070
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1071
+ ip_adapter_image,
1072
+ ip_adapter_image_embeds,
1073
+ device,
1074
+ batch_size * num_images_per_prompt,
1075
+ self.do_classifier_free_guidance,
1076
+ )
1077
+
1078
+ if isinstance(controlnet, ControlNetModel):
1079
+ image = self.prepare_control_image(
1080
+ image=image,
1081
+ width=width,
1082
+ height=height,
1083
+ batch_size=batch_size * num_images_per_prompt,
1084
+ num_images_per_prompt=num_images_per_prompt,
1085
+ device=device,
1086
+ dtype=controlnet.dtype,
1087
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1088
+ guess_mode=guess_mode,
1089
+ )
1090
+ height, width = image.shape[-2:]
1091
+ elif isinstance(controlnet, MultiControlNetModel):
1092
+ control_images = []
1093
+
1094
+ for control_image_ in image:
1095
+ control_image_ = self.prepare_control_image(
1096
+ image=control_image_,
1097
+ width=width,
1098
+ height=height,
1099
+ batch_size=batch_size * num_images_per_prompt,
1100
+ num_images_per_prompt=num_images_per_prompt,
1101
+ device=device,
1102
+ dtype=controlnet.dtype,
1103
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1104
+ guess_mode=guess_mode,
1105
+ )
1106
+
1107
+ control_images.append(control_image_)
1108
+
1109
+ image = control_images
1110
+ height, width = image[0].shape[-2:]
1111
+ else:
1112
+ assert False
1113
+
1114
+ # 4. Prepare timesteps
1115
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1116
+
1117
+ timesteps = self.scheduler.timesteps
1118
+
1119
+ # 5. Prepare latent variables
1120
+ num_channels_latents = self.unet.config.in_channels
1121
+ latents = self.prepare_latents(
1122
+ batch_size * num_images_per_prompt,
1123
+ num_channels_latents,
1124
+ height,
1125
+ width,
1126
+ prompt_embeds.dtype,
1127
+ device,
1128
+ generator,
1129
+ latents,
1130
+ )
1131
+
1132
+ # 6.5 Optionally get Guidance Scale Embedding
1133
+ timestep_cond = None
1134
+ if self.unet.config.time_cond_proj_dim is not None:
1135
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1136
+ timestep_cond = self.get_guidance_scale_embedding(
1137
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1138
+ ).to(device=device, dtype=latents.dtype)
1139
+
1140
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1141
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1142
+
1143
+ # 7.1 Create tensor stating which controlnets to keep
1144
+ controlnet_keep = []
1145
+ for i in range(len(timesteps)):
1146
+ keeps = [
1147
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1148
+ for s, e in zip(control_guidance_start, control_guidance_end)
1149
+ ]
1150
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1151
+
1152
+ # 7.2 Prepare added time ids & embeddings
1153
+ if isinstance(image, list):
1154
+ original_size = original_size or image[0].shape[-2:]
1155
+ else:
1156
+ original_size = original_size or image.shape[-2:]
1157
+ target_size = target_size or (height, width)
1158
+
1159
+ # 7. Prepare added time ids & embeddings
1160
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1161
+
1162
+ add_text_embeds = pooled_prompt_embeds
1163
+ add_time_ids = self._get_add_time_ids(
1164
+ original_size,
1165
+ crops_coords_top_left,
1166
+ target_size,
1167
+ dtype=prompt_embeds.dtype,
1168
+ text_encoder_projection_dim=text_encoder_projection_dim,
1169
+ )
1170
+
1171
+ if negative_original_size is not None and negative_target_size is not None:
1172
+ negative_add_time_ids = self._get_add_time_ids(
1173
+ negative_original_size,
1174
+ negative_crops_coords_top_left,
1175
+ negative_target_size,
1176
+ dtype=prompt_embeds.dtype,
1177
+ text_encoder_projection_dim=text_encoder_projection_dim,
1178
+ )
1179
+ else:
1180
+ negative_add_time_ids = add_time_ids
1181
+
1182
+ if self.do_classifier_free_guidance:
1183
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1184
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1185
+ add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
1186
+
1187
+ prompt_embeds = prompt_embeds.to(device)
1188
+ add_text_embeds = add_text_embeds.to(device)
1189
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1190
+
1191
+ # patch diffusers controlnet instance forward, undo
1192
+ # after denoising loop
1193
+
1194
+ patched_cn_models = []
1195
+ if isinstance(self.controlnet, MultiControlNetModel):
1196
+ cn_models_to_patch = self.controlnet.nets
1197
+ else:
1198
+ cn_models_to_patch = [self.controlnet]
1199
+
1200
+ for cn_model in cn_models_to_patch:
1201
+ cn_og_forward = cn_model.forward
1202
+
1203
+ def _cn_patch_forward(*args, **kwargs):
1204
+ encoder_hidden_states = kwargs["encoder_hidden_states"]
1205
+ if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
1206
+ # Ensure encoder_hidden_states is on the same device as the projection layer
1207
+ encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
1208
+ encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
1209
+ kwargs.pop("encoder_hidden_states")
1210
+ return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
1211
+
1212
+ cn_model.forward = _cn_patch_forward
1213
+ patched_cn_models.append((cn_model, cn_og_forward))
1214
+
1215
+ # 8. Denoising loop
1216
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1217
+ try:
1218
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1219
+ for i, t in enumerate(timesteps):
1220
+ # expand the latents if we are doing classifier free guidance
1221
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1222
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1223
+
1224
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1225
+
1226
+ # controlnet(s) inference
1227
+ if guess_mode and self.do_classifier_free_guidance:
1228
+ # Infer ControlNet only for the conditional batch.
1229
+ control_model_input = latents
1230
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1231
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1232
+ controlnet_added_cond_kwargs = {
1233
+ "text_embeds": add_text_embeds.chunk(2)[1],
1234
+ "time_ids": add_time_ids.chunk(2)[1],
1235
+ }
1236
+ else:
1237
+ control_model_input = latent_model_input
1238
+ controlnet_prompt_embeds = prompt_embeds
1239
+ controlnet_added_cond_kwargs = added_cond_kwargs
1240
+
1241
+ if isinstance(controlnet_keep[i], list):
1242
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1243
+ else:
1244
+ controlnet_cond_scale = controlnet_conditioning_scale
1245
+ if isinstance(controlnet_cond_scale, list):
1246
+ controlnet_cond_scale = controlnet_cond_scale[0]
1247
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1248
+
1249
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1250
+ control_model_input,
1251
+ t,
1252
+ encoder_hidden_states=controlnet_prompt_embeds,
1253
+ controlnet_cond=image,
1254
+ conditioning_scale=cond_scale,
1255
+ guess_mode=guess_mode,
1256
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1257
+ return_dict=False,
1258
+ )
1259
+
1260
+ if guess_mode and self.do_classifier_free_guidance:
1261
+ # Infered ControlNet only for the conditional batch.
1262
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1263
+ # add 0 to the unconditional batch to keep it unchanged.
1264
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1265
+ mid_block_res_sample = torch.cat(
1266
+ [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
1267
+ )
1268
+
1269
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1270
+ added_cond_kwargs["image_embeds"] = image_embeds
1271
+
1272
+ # predict the noise residual
1273
+ noise_pred = self.unet(
1274
+ latent_model_input,
1275
+ t,
1276
+ encoder_hidden_states=prompt_embeds,
1277
+ timestep_cond=timestep_cond,
1278
+ cross_attention_kwargs=self.cross_attention_kwargs,
1279
+ down_block_additional_residuals=down_block_res_samples,
1280
+ mid_block_additional_residual=mid_block_res_sample,
1281
+ added_cond_kwargs=added_cond_kwargs,
1282
+ return_dict=False,
1283
+ )[0]
1284
+
1285
+ # perform guidance
1286
+ if self.do_classifier_free_guidance:
1287
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1288
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1289
+
1290
+ # compute the previous noisy sample x_t -> x_t-1
1291
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1292
+
1293
+ if callback_on_step_end is not None:
1294
+ callback_kwargs = {}
1295
+ for k in callback_on_step_end_tensor_inputs:
1296
+ callback_kwargs[k] = locals()[k]
1297
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1298
+
1299
+ latents = callback_outputs.pop("latents", latents)
1300
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1301
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1302
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1303
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1304
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1305
+ )
1306
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1307
+ negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
1308
+ image = callback_outputs.pop("image", image)
1309
+
1310
+ # call the callback, if provided
1311
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1312
+ progress_bar.update()
1313
+ if callback is not None and i % callback_steps == 0:
1314
+ step_idx = i // getattr(self.scheduler, "order", 1)
1315
+ callback(step_idx, t, latents)
1316
+
1317
+ finally:
1318
+ for cn_and_og in patched_cn_models:
1319
+ cn_and_og[0].forward = cn_and_og[1]
1320
+
1321
+ # If we do sequential model offloading, let's offload unet and controlnet
1322
+ # manually for max memory savings
1323
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1324
+ self.unet.to("cpu")
1325
+ self.controlnet.to("cpu")
1326
+ torch.cuda.empty_cache()
1327
+ torch.cuda.ipc_collect()
1328
+
1329
+ if not output_type == "latent":
1330
+ # make sure the VAE is in float32 mode, as it overflows in float16
1331
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1332
+
1333
+ if needs_upcasting:
1334
+ self.upcast_vae()
1335
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1336
+
1337
+ latents = latents / self.vae.config.scaling_factor
1338
+ image = self.vae.decode(latents, return_dict=False)[0]
1339
+
1340
+ # cast back to fp16 if needed
1341
+ if needs_upcasting:
1342
+ self.vae.to(dtype=torch.float16)
1343
+ else:
1344
+ image = latents
1345
+ return StableDiffusionXLPipelineOutput(images=image)
1346
+
1347
+ image = self.image_processor.postprocess(image, output_type=output_type)
1348
+
1349
+ # Offload all models
1350
+ self.maybe_free_model_hooks()
1351
+
1352
+ if not return_dict:
1353
+ return (image,)
1354
+
1355
+ return StableDiffusionXLPipelineOutput(images=image)
main/pipeline_controlnet_xl_kolors_img2img.py ADDED
@@ -0,0 +1,1557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPVisionModelWithProjection,
26
+ )
27
+
28
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
29
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
+ from diffusers.loaders import (
31
+ FromSingleFileMixin,
32
+ IPAdapterMixin,
33
+ StableDiffusionXLLoraLoaderMixin,
34
+ TextualInversionLoaderMixin,
35
+ )
36
+ from diffusers.models import (
37
+ AutoencoderKL,
38
+ ControlNetModel,
39
+ ImageProjection,
40
+ MultiControlNetModel,
41
+ UNet2DConditionModel,
42
+ )
43
+ from diffusers.models.attention_processor import (
44
+ AttnProcessor2_0,
45
+ XFormersAttnProcessor,
46
+ )
47
+ from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
48
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
49
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
50
+ from diffusers.schedulers import KarrasDiffusionSchedulers
51
+ from diffusers.utils import (
52
+ deprecate,
53
+ is_invisible_watermark_available,
54
+ logging,
55
+ replace_example_docstring,
56
+ )
57
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
58
+
59
+
60
+ if is_invisible_watermark_available():
61
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
62
+
63
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
64
+
65
+
66
+ EXAMPLE_DOC_STRING = """
67
+ Examples:
68
+ ```py
69
+ >>> import torch
70
+ >>> import numpy as np
71
+ >>> from PIL import Image
72
+
73
+ >>> from transformers import DPTImageProcessor, DPTForDepthEstimation
74
+ >>> from diffusers import ControlNetModel, KolorsControlNetImg2ImgPipeline
75
+ >>> from diffusers.utils import load_image
76
+
77
+ >>> depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to("cuda")
78
+ >>> feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
79
+ >>> controlnet = ControlNetModel.from_pretrained(
80
+ ... "Kwai-Kolors/Kolors-ControlNet-Depth",
81
+ ... use_safetensors=True,
82
+ ... torch_dtype=torch.float16
83
+ ... )
84
+ >>> pipe = KolorsControlNetImg2ImgPipeline.from_pretrained(
85
+ ... "Kwai-Kolors/Kolors-diffusers",
86
+ ... controlnet=controlnet,
87
+ ... variant="fp16",
88
+ ... use_safetensors=True,
89
+ ... torch_dtype=torch.float16
90
+ ... )
91
+ >>> pipe.enable_model_cpu_offload()
92
+
93
+
94
+ >>> def get_depth_map(image):
95
+ ... image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
96
+ ...
97
+ ... with torch.no_grad(), torch.autocast("cuda"):
98
+ ... depth_map = depth_estimator(image).predicted_depth
99
+ ...
100
+ ... depth_map = torch.nn.functional.interpolate(
101
+ ... depth_map.unsqueeze(1),
102
+ ... size=(1024, 1024),
103
+ ... mode="bicubic",
104
+ ... align_corners=False,
105
+ ... )
106
+ ... depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
107
+ ... depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
108
+ ... depth_map = (depth_map - depth_min) / (depth_max - depth_min)
109
+ ... image = torch.cat([depth_map] * 3, dim=1)
110
+ ... image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
111
+ ... image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
112
+ ... return image
113
+
114
+
115
+ >>> prompt = "A robot, 4k photo"
116
+ >>> image = load_image(
117
+ ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
118
+ ... "/kandinsky/cat.png"
119
+ ... ).resize((1024, 1024))
120
+ >>> controlnet_conditioning_scale = 0.5 # recommended for good generalization
121
+ >>> depth_image = get_depth_map(image)
122
+
123
+ >>> images = pipe(
124
+ ... prompt,
125
+ ... image=image,
126
+ ... control_image=depth_image,
127
+ ... strength=0.80,
128
+ ... num_inference_steps=50,
129
+ ... controlnet_conditioning_scale=controlnet_conditioning_scale,
130
+ ... ).images
131
+ >>> images[0].save(f"robot_cat.png")
132
+ ```
133
+ """
134
+
135
+
136
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
137
+ def retrieve_latents(
138
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
139
+ ):
140
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
141
+ return encoder_output.latent_dist.sample(generator)
142
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
143
+ return encoder_output.latent_dist.mode()
144
+ elif hasattr(encoder_output, "latents"):
145
+ return encoder_output.latents
146
+ else:
147
+ raise AttributeError("Could not access latents of provided encoder_output")
148
+
149
+
150
+ class KolorsControlNetImg2ImgPipeline(
151
+ DiffusionPipeline,
152
+ StableDiffusionMixin,
153
+ StableDiffusionXLLoraLoaderMixin,
154
+ FromSingleFileMixin,
155
+ IPAdapterMixin,
156
+ ):
157
+ r"""
158
+ Pipeline for image-to-image generation using Kolors with ControlNet guidance.
159
+
160
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
161
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
162
+
163
+ The pipeline also inherits the following loading methods:
164
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
165
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
166
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
167
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
168
+
169
+ Args:
170
+ vae ([`AutoencoderKL`]):
171
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
172
+ text_encoder ([`ChatGLMModel`]):
173
+ Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
174
+ tokenizer (`ChatGLMTokenizer`):
175
+ Tokenizer of class
176
+ [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
177
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
178
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
179
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
180
+ as a list, the outputs from each ControlNet are added together to create one combined additional
181
+ conditioning.
182
+ scheduler ([`SchedulerMixin`]):
183
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
184
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
185
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
186
+ Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
187
+ config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
188
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
189
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
190
+ `Kwai-Kolors/Kolors-diffusers`.
191
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
192
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
193
+ """
194
+
195
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
196
+
197
+ _optional_components = [
198
+ "tokenizer",
199
+ "text_encoder",
200
+ "feature_extractor",
201
+ "image_encoder",
202
+ ]
203
+ _callback_tensor_inputs = [
204
+ "latents",
205
+ "prompt_embeds",
206
+ "negative_prompt_embeds",
207
+ "add_text_embeds",
208
+ "add_time_ids",
209
+ "negative_pooled_prompt_embeds",
210
+ "add_neg_time_ids",
211
+ "control_image",
212
+ ]
213
+
214
+ def __init__(
215
+ self,
216
+ vae: AutoencoderKL,
217
+ text_encoder: ChatGLMModel,
218
+ tokenizer: ChatGLMTokenizer,
219
+ unet: UNet2DConditionModel,
220
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
221
+ scheduler: KarrasDiffusionSchedulers,
222
+ requires_aesthetics_score: bool = False,
223
+ force_zeros_for_empty_prompt: bool = True,
224
+ feature_extractor: CLIPImageProcessor = None,
225
+ image_encoder: CLIPVisionModelWithProjection = None,
226
+ add_watermarker: Optional[bool] = None,
227
+ ):
228
+ super().__init__()
229
+
230
+ if isinstance(controlnet, (list, tuple)):
231
+ controlnet = MultiControlNetModel(controlnet)
232
+
233
+ self.register_modules(
234
+ vae=vae,
235
+ text_encoder=text_encoder,
236
+ tokenizer=tokenizer,
237
+ unet=unet,
238
+ controlnet=controlnet,
239
+ scheduler=scheduler,
240
+ feature_extractor=feature_extractor,
241
+ image_encoder=image_encoder,
242
+ )
243
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
244
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
245
+ self.control_image_processor = VaeImageProcessor(
246
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
247
+ )
248
+
249
+ if add_watermarker:
250
+ self.watermark = StableDiffusionXLWatermarker()
251
+ else:
252
+ self.watermark = None
253
+
254
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
255
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
256
+
257
+ def encode_prompt(
258
+ self,
259
+ prompt,
260
+ device: Optional[torch.device] = None,
261
+ num_images_per_prompt: int = 1,
262
+ do_classifier_free_guidance: bool = True,
263
+ negative_prompt=None,
264
+ prompt_embeds: Optional[torch.FloatTensor] = None,
265
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
266
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
267
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
268
+ lora_scale: Optional[float] = None,
269
+ ):
270
+ r"""
271
+ Encodes the prompt into text encoder hidden states.
272
+
273
+ Args:
274
+ prompt (`str` or `List[str]`, *optional*):
275
+ prompt to be encoded
276
+ device: (`torch.device`):
277
+ torch device
278
+ num_images_per_prompt (`int`):
279
+ number of images that should be generated per prompt
280
+ do_classifier_free_guidance (`bool`):
281
+ whether to use classifier free guidance or not
282
+ negative_prompt (`str` or `List[str]`, *optional*):
283
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
284
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
285
+ less than `1`).
286
+ prompt_embeds (`torch.FloatTensor`, *optional*):
287
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
288
+ provided, text embeddings will be generated from `prompt` input argument.
289
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
290
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
291
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
292
+ argument.
293
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
294
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
295
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
296
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
297
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
298
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
299
+ input argument.
300
+ lora_scale (`float`, *optional*):
301
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
302
+ """
303
+ # from IPython import embed; embed(); exit()
304
+ device = device or self._execution_device
305
+
306
+ # set lora scale so that monkey patched LoRA
307
+ # function of text encoder can correctly access it
308
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
309
+ self._lora_scale = lora_scale
310
+
311
+ if prompt is not None and isinstance(prompt, str):
312
+ batch_size = 1
313
+ elif prompt is not None and isinstance(prompt, list):
314
+ batch_size = len(prompt)
315
+ else:
316
+ batch_size = prompt_embeds.shape[0]
317
+
318
+ # Define tokenizers and text encoders
319
+ tokenizers = [self.tokenizer]
320
+ text_encoders = [self.text_encoder]
321
+
322
+ if prompt_embeds is None:
323
+ # textual inversion: procecss multi-vector tokens if necessary
324
+ prompt_embeds_list = []
325
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
326
+ if isinstance(self, TextualInversionLoaderMixin):
327
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
328
+
329
+ text_inputs = tokenizer(
330
+ prompt,
331
+ padding="max_length",
332
+ max_length=256,
333
+ truncation=True,
334
+ return_tensors="pt",
335
+ ).to(self._execution_device)
336
+ output = text_encoder(
337
+ input_ids=text_inputs["input_ids"],
338
+ attention_mask=text_inputs["attention_mask"],
339
+ position_ids=text_inputs["position_ids"],
340
+ output_hidden_states=True,
341
+ )
342
+ prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
343
+ pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
344
+ bs_embed, seq_len, _ = prompt_embeds.shape
345
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
346
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
347
+
348
+ prompt_embeds_list.append(prompt_embeds)
349
+
350
+ # prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
351
+ prompt_embeds = prompt_embeds_list[0]
352
+
353
+ # get unconditional embeddings for classifier free guidance
354
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
355
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
356
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
357
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
358
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
359
+ # negative_prompt = negative_prompt or ""
360
+ uncond_tokens: List[str]
361
+ if negative_prompt is None:
362
+ uncond_tokens = [""] * batch_size
363
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
364
+ raise TypeError(
365
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
366
+ f" {type(prompt)}."
367
+ )
368
+ elif isinstance(negative_prompt, str):
369
+ uncond_tokens = [negative_prompt]
370
+ elif batch_size != len(negative_prompt):
371
+ raise ValueError(
372
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
373
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
374
+ " the batch size of `prompt`."
375
+ )
376
+ else:
377
+ uncond_tokens = negative_prompt
378
+
379
+ negative_prompt_embeds_list = []
380
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
381
+ # textual inversion: procecss multi-vector tokens if necessary
382
+ if isinstance(self, TextualInversionLoaderMixin):
383
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
384
+
385
+ max_length = prompt_embeds.shape[1]
386
+ uncond_input = tokenizer(
387
+ uncond_tokens,
388
+ padding="max_length",
389
+ max_length=max_length,
390
+ truncation=True,
391
+ return_tensors="pt",
392
+ ).to(self._execution_device)
393
+ output = text_encoder(
394
+ input_ids=uncond_input["input_ids"],
395
+ attention_mask=uncond_input["attention_mask"],
396
+ position_ids=uncond_input["position_ids"],
397
+ output_hidden_states=True,
398
+ )
399
+ negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
400
+ negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
401
+
402
+ if do_classifier_free_guidance:
403
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
404
+ seq_len = negative_prompt_embeds.shape[1]
405
+
406
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
407
+
408
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
409
+ negative_prompt_embeds = negative_prompt_embeds.view(
410
+ batch_size * num_images_per_prompt, seq_len, -1
411
+ )
412
+
413
+ # For classifier free guidance, we need to do two forward passes.
414
+ # Here we concatenate the unconditional and text embeddings into a single batch
415
+ # to avoid doing two forward passes
416
+
417
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
418
+
419
+ # negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
420
+ negative_prompt_embeds = negative_prompt_embeds_list[0]
421
+
422
+ bs_embed = pooled_prompt_embeds.shape[0]
423
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
424
+ bs_embed * num_images_per_prompt, -1
425
+ )
426
+ if do_classifier_free_guidance:
427
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
428
+ bs_embed * num_images_per_prompt, -1
429
+ )
430
+
431
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
432
+
433
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
434
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
435
+ dtype = next(self.image_encoder.parameters()).dtype
436
+
437
+ if not isinstance(image, torch.Tensor):
438
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
439
+
440
+ image = image.to(device=device, dtype=dtype)
441
+ if output_hidden_states:
442
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
443
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
444
+ uncond_image_enc_hidden_states = self.image_encoder(
445
+ torch.zeros_like(image), output_hidden_states=True
446
+ ).hidden_states[-2]
447
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
448
+ num_images_per_prompt, dim=0
449
+ )
450
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
451
+ else:
452
+ image_embeds = self.image_encoder(image).image_embeds
453
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
454
+ uncond_image_embeds = torch.zeros_like(image_embeds)
455
+
456
+ return image_embeds, uncond_image_embeds
457
+
458
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
459
+ def prepare_ip_adapter_image_embeds(
460
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
461
+ ):
462
+ image_embeds = []
463
+ if do_classifier_free_guidance:
464
+ negative_image_embeds = []
465
+ if ip_adapter_image_embeds is None:
466
+ if not isinstance(ip_adapter_image, list):
467
+ ip_adapter_image = [ip_adapter_image]
468
+
469
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
470
+ raise ValueError(
471
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
472
+ )
473
+
474
+ for single_ip_adapter_image, image_proj_layer in zip(
475
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
476
+ ):
477
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
478
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
479
+ single_ip_adapter_image, device, 1, output_hidden_state
480
+ )
481
+
482
+ image_embeds.append(single_image_embeds[None, :])
483
+ if do_classifier_free_guidance:
484
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
485
+ else:
486
+ for single_image_embeds in ip_adapter_image_embeds:
487
+ if do_classifier_free_guidance:
488
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
489
+ negative_image_embeds.append(single_negative_image_embeds)
490
+ image_embeds.append(single_image_embeds)
491
+
492
+ ip_adapter_image_embeds = []
493
+ for i, single_image_embeds in enumerate(image_embeds):
494
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
495
+ if do_classifier_free_guidance:
496
+ single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
497
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
498
+
499
+ single_image_embeds = single_image_embeds.to(device=device)
500
+ ip_adapter_image_embeds.append(single_image_embeds)
501
+
502
+ return ip_adapter_image_embeds
503
+
504
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
505
+ def prepare_extra_step_kwargs(self, generator, eta):
506
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
507
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for others.
508
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
509
+ # and should be between [0, 1]
510
+
511
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
512
+ extra_step_kwargs = {}
513
+ if accepts_eta:
514
+ extra_step_kwargs["eta"] = eta
515
+
516
+ # check if the scheduler accepts generator
517
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
518
+ if accepts_generator:
519
+ extra_step_kwargs["generator"] = generator
520
+ return extra_step_kwargs
521
+
522
+ def check_inputs(
523
+ self,
524
+ prompt,
525
+ image,
526
+ strength,
527
+ num_inference_steps,
528
+ callback_steps,
529
+ negative_prompt=None,
530
+ prompt_embeds=None,
531
+ negative_prompt_embeds=None,
532
+ pooled_prompt_embeds=None,
533
+ negative_pooled_prompt_embeds=None,
534
+ ip_adapter_image=None,
535
+ ip_adapter_image_embeds=None,
536
+ controlnet_conditioning_scale=1.0,
537
+ control_guidance_start=0.0,
538
+ control_guidance_end=1.0,
539
+ callback_on_step_end_tensor_inputs=None,
540
+ ):
541
+ if strength < 0 or strength > 1:
542
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
543
+ if num_inference_steps is None:
544
+ raise ValueError("`num_inference_steps` cannot be None.")
545
+ elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
546
+ raise ValueError(
547
+ f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
548
+ f" {type(num_inference_steps)}."
549
+ )
550
+
551
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
552
+ raise ValueError(
553
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
554
+ f" {type(callback_steps)}."
555
+ )
556
+
557
+ if callback_on_step_end_tensor_inputs is not None and not all(
558
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
559
+ ):
560
+ raise ValueError(
561
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
562
+ )
563
+
564
+ if prompt is not None and prompt_embeds is not None:
565
+ raise ValueError(
566
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
567
+ " only forward one of the two."
568
+ )
569
+ elif prompt is None and prompt_embeds is None:
570
+ raise ValueError(
571
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
572
+ )
573
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
574
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
575
+
576
+ if negative_prompt is not None and negative_prompt_embeds is not None:
577
+ raise ValueError(
578
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
579
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
580
+ )
581
+
582
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
583
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
584
+ raise ValueError(
585
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
586
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
587
+ f" {negative_prompt_embeds.shape}."
588
+ )
589
+
590
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
591
+ raise ValueError(
592
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
593
+ )
594
+
595
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
596
+ raise ValueError(
597
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
598
+ )
599
+
600
+ # `prompt` needs more sophisticated handling when there are multiple
601
+ # conditionings.
602
+ if isinstance(self.controlnet, MultiControlNetModel):
603
+ if isinstance(prompt, list):
604
+ logger.warning(
605
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
606
+ " prompts. The conditionings will be fixed across the prompts."
607
+ )
608
+
609
+ # Check `image`
610
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
611
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
612
+ )
613
+
614
+ if (
615
+ isinstance(self.controlnet, ControlNetModel)
616
+ or is_compiled
617
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
618
+ ):
619
+ self.check_image(image, prompt, prompt_embeds)
620
+ elif (
621
+ isinstance(self.controlnet, MultiControlNetModel)
622
+ or is_compiled
623
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
624
+ ):
625
+ if not isinstance(image, list):
626
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
627
+
628
+ # When `image` is a nested list:
629
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
630
+ elif any(isinstance(i, list) for i in image):
631
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
632
+ elif len(image) != len(self.controlnet.nets):
633
+ raise ValueError(
634
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
635
+ )
636
+
637
+ for image_ in image:
638
+ self.check_image(image_, prompt, prompt_embeds)
639
+ else:
640
+ assert False
641
+
642
+ # Check `controlnet_conditioning_scale`
643
+ if (
644
+ isinstance(self.controlnet, ControlNetModel)
645
+ or is_compiled
646
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
647
+ ):
648
+ if not isinstance(controlnet_conditioning_scale, float):
649
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
650
+ elif (
651
+ isinstance(self.controlnet, MultiControlNetModel)
652
+ or is_compiled
653
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
654
+ ):
655
+ if isinstance(controlnet_conditioning_scale, list):
656
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
657
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
658
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
659
+ self.controlnet.nets
660
+ ):
661
+ raise ValueError(
662
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
663
+ " the same length as the number of controlnets"
664
+ )
665
+ else:
666
+ assert False
667
+
668
+ if not isinstance(control_guidance_start, (tuple, list)):
669
+ control_guidance_start = [control_guidance_start]
670
+
671
+ if not isinstance(control_guidance_end, (tuple, list)):
672
+ control_guidance_end = [control_guidance_end]
673
+
674
+ if len(control_guidance_start) != len(control_guidance_end):
675
+ raise ValueError(
676
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
677
+ )
678
+
679
+ if isinstance(self.controlnet, MultiControlNetModel):
680
+ if len(control_guidance_start) != len(self.controlnet.nets):
681
+ raise ValueError(
682
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
683
+ )
684
+
685
+ for start, end in zip(control_guidance_start, control_guidance_end):
686
+ if start >= end:
687
+ raise ValueError(
688
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
689
+ )
690
+ if start < 0.0:
691
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
692
+ if end > 1.0:
693
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
694
+
695
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
696
+ raise ValueError(
697
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
698
+ )
699
+
700
+ if ip_adapter_image_embeds is not None:
701
+ if not isinstance(ip_adapter_image_embeds, list):
702
+ raise ValueError(
703
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
704
+ )
705
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
706
+ raise ValueError(
707
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
708
+ )
709
+
710
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
711
+ def check_image(self, image, prompt, prompt_embeds):
712
+ image_is_pil = isinstance(image, PIL.Image.Image)
713
+ image_is_tensor = isinstance(image, torch.Tensor)
714
+ image_is_np = isinstance(image, np.ndarray)
715
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
716
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
717
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
718
+
719
+ if (
720
+ not image_is_pil
721
+ and not image_is_tensor
722
+ and not image_is_np
723
+ and not image_is_pil_list
724
+ and not image_is_tensor_list
725
+ and not image_is_np_list
726
+ ):
727
+ raise TypeError(
728
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
729
+ )
730
+
731
+ if image_is_pil:
732
+ image_batch_size = 1
733
+ else:
734
+ image_batch_size = len(image)
735
+
736
+ if prompt is not None and isinstance(prompt, str):
737
+ prompt_batch_size = 1
738
+ elif prompt is not None and isinstance(prompt, list):
739
+ prompt_batch_size = len(prompt)
740
+ elif prompt_embeds is not None:
741
+ prompt_batch_size = prompt_embeds.shape[0]
742
+
743
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
744
+ raise ValueError(
745
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
746
+ )
747
+
748
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
749
+ def prepare_control_image(
750
+ self,
751
+ image,
752
+ width,
753
+ height,
754
+ batch_size,
755
+ num_images_per_prompt,
756
+ device,
757
+ dtype,
758
+ do_classifier_free_guidance=False,
759
+ guess_mode=False,
760
+ ):
761
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
762
+ image_batch_size = image.shape[0]
763
+
764
+ if image_batch_size == 1:
765
+ repeat_by = batch_size
766
+ else:
767
+ # image batch size is the same as prompt batch size
768
+ repeat_by = num_images_per_prompt
769
+
770
+ image = image.repeat_interleave(repeat_by, dim=0)
771
+
772
+ image = image.to(device=device, dtype=dtype)
773
+
774
+ if do_classifier_free_guidance and not guess_mode:
775
+ image = torch.cat([image] * 2)
776
+
777
+ return image
778
+
779
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
780
+ def get_timesteps(self, num_inference_steps, strength, device):
781
+ # get the original timestep using init_timestep
782
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
783
+
784
+ t_start = max(num_inference_steps - init_timestep, 0)
785
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
786
+ if hasattr(self.scheduler, "set_begin_index"):
787
+ self.scheduler.set_begin_index(t_start * self.scheduler.order)
788
+
789
+ return timesteps, num_inference_steps - t_start
790
+
791
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
792
+ def prepare_latents(
793
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
794
+ ):
795
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
796
+ raise ValueError(
797
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
798
+ )
799
+
800
+ # Offload text encoder if `enable_model_cpu_offload` was enabled
801
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
802
+ torch.cuda.empty_cache()
803
+ torch.cuda.ipc_collect()
804
+
805
+ image = image.to(device=device, dtype=dtype)
806
+
807
+ batch_size = batch_size * num_images_per_prompt
808
+
809
+ if image.shape[1] == 4:
810
+ init_latents = image
811
+
812
+ else:
813
+ # make sure the VAE is in float32 mode, as it overflows in float16
814
+ if self.vae.config.force_upcast:
815
+ image = image.float()
816
+ self.vae.to(dtype=torch.float32)
817
+
818
+ if isinstance(generator, list) and len(generator) != batch_size:
819
+ raise ValueError(
820
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
821
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
822
+ )
823
+
824
+ elif isinstance(generator, list):
825
+ init_latents = [
826
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
827
+ for i in range(batch_size)
828
+ ]
829
+ init_latents = torch.cat(init_latents, dim=0)
830
+ else:
831
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
832
+
833
+ if self.vae.config.force_upcast:
834
+ self.vae.to(dtype)
835
+
836
+ init_latents = init_latents.to(dtype)
837
+
838
+ init_latents = self.vae.config.scaling_factor * init_latents
839
+
840
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
841
+ # expand init_latents for batch_size
842
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
843
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
844
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
845
+ raise ValueError(
846
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
847
+ )
848
+ else:
849
+ init_latents = torch.cat([init_latents], dim=0)
850
+
851
+ if add_noise:
852
+ shape = init_latents.shape
853
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
854
+ # get latents
855
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
856
+
857
+ latents = init_latents
858
+
859
+ return latents
860
+
861
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
862
+ def prepare_latents_t2i(
863
+ self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
864
+ ):
865
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
866
+ if isinstance(generator, list) and len(generator) != batch_size:
867
+ raise ValueError(
868
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
869
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
870
+ )
871
+
872
+ if latents is None:
873
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
874
+ else:
875
+ latents = latents.to(device)
876
+
877
+ # scale the initial noise by the standard deviation required by the scheduler
878
+ latents = latents * self.scheduler.init_noise_sigma
879
+ return latents
880
+
881
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
882
+ def _get_add_time_ids(
883
+ self,
884
+ original_size,
885
+ crops_coords_top_left,
886
+ target_size,
887
+ aesthetic_score,
888
+ negative_aesthetic_score,
889
+ negative_original_size,
890
+ negative_crops_coords_top_left,
891
+ negative_target_size,
892
+ dtype,
893
+ text_encoder_projection_dim=None,
894
+ ):
895
+ if self.config.requires_aesthetics_score:
896
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
897
+ add_neg_time_ids = list(
898
+ negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
899
+ )
900
+ else:
901
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
902
+ add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
903
+
904
+ passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096
905
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
906
+
907
+ if (
908
+ expected_add_embed_dim > passed_add_embed_dim
909
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
910
+ ):
911
+ raise ValueError(
912
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
913
+ )
914
+ elif (
915
+ expected_add_embed_dim < passed_add_embed_dim
916
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
917
+ ):
918
+ raise ValueError(
919
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
920
+ )
921
+ elif expected_add_embed_dim != passed_add_embed_dim:
922
+ raise ValueError(
923
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`."
924
+ )
925
+
926
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
927
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
928
+
929
+ return add_time_ids, add_neg_time_ids
930
+
931
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
932
+ def upcast_vae(self):
933
+ dtype = self.vae.dtype
934
+ self.vae.to(dtype=torch.float32)
935
+ use_torch_2_0_or_xformers = isinstance(
936
+ self.vae.decoder.mid_block.attentions[0].processor,
937
+ (
938
+ AttnProcessor2_0,
939
+ XFormersAttnProcessor,
940
+ ),
941
+ )
942
+ # if xformers or torch_2_0 is used attention block does not need
943
+ # to be in float32 which can save lots of memory
944
+ if use_torch_2_0_or_xformers:
945
+ self.vae.post_quant_conv.to(dtype)
946
+ self.vae.decoder.conv_in.to(dtype)
947
+ self.vae.decoder.mid_block.to(dtype)
948
+
949
+ @property
950
+ def guidance_scale(self):
951
+ return self._guidance_scale
952
+
953
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
954
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
955
+ # corresponds to doing no classifier free guidance.
956
+ @property
957
+ def do_classifier_free_guidance(self):
958
+ return self._guidance_scale > 1
959
+
960
+ @property
961
+ def cross_attention_kwargs(self):
962
+ return self._cross_attention_kwargs
963
+
964
+ @property
965
+ def num_timesteps(self):
966
+ return self._num_timesteps
967
+
968
+ @torch.no_grad()
969
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
970
+ def __call__(
971
+ self,
972
+ prompt: Union[str, List[str]] = None,
973
+ image: PipelineImageInput = None,
974
+ control_image: PipelineImageInput = None,
975
+ height: Optional[int] = None,
976
+ width: Optional[int] = None,
977
+ strength: float = 0.8,
978
+ num_inference_steps: int = 50,
979
+ guidance_scale: float = 5.0,
980
+ negative_prompt: Optional[Union[str, List[str]]] = None,
981
+ num_images_per_prompt: Optional[int] = 1,
982
+ eta: float = 0.0,
983
+ guess_mode: bool = False,
984
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
985
+ latents: Optional[torch.Tensor] = None,
986
+ prompt_embeds: Optional[torch.Tensor] = None,
987
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
988
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
989
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
990
+ ip_adapter_image: Optional[PipelineImageInput] = None,
991
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
992
+ output_type: Optional[str] = "pil",
993
+ return_dict: bool = True,
994
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
995
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
996
+ control_guidance_start: Union[float, List[float]] = 0.0,
997
+ control_guidance_end: Union[float, List[float]] = 1.0,
998
+ original_size: Tuple[int, int] = None,
999
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
1000
+ target_size: Tuple[int, int] = None,
1001
+ negative_original_size: Optional[Tuple[int, int]] = None,
1002
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
1003
+ negative_target_size: Optional[Tuple[int, int]] = None,
1004
+ aesthetic_score: float = 6.0,
1005
+ negative_aesthetic_score: float = 2.5,
1006
+ callback_on_step_end: Optional[
1007
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
1008
+ ] = None,
1009
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1010
+ **kwargs,
1011
+ ):
1012
+ r"""
1013
+ Function invoked when calling the pipeline for generation.
1014
+
1015
+ Args:
1016
+ prompt (`str` or `List[str]`, *optional*):
1017
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1018
+ instead.
1019
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1020
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1021
+ The initial image will be used as the starting point for the image generation process. Can also accept
1022
+ image latents as `image`, if passing latents directly, it will not be encoded again.
1023
+ control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1024
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1025
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
1026
+ the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
1027
+ be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1028
+ and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
1029
+ init, images must be passed as a list such that each element of the list can be correctly batched for
1030
+ input to a single controlnet.
1031
+ height (`int`, *optional*, defaults to the size of control_image):
1032
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
1033
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1034
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1035
+ width (`int`, *optional*, defaults to the size of control_image):
1036
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
1037
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1038
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1039
+ strength (`float`, *optional*, defaults to 0.8):
1040
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1041
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1042
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1043
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1044
+ essentially ignores `image`.
1045
+ num_inference_steps (`int`, *optional*, defaults to 50):
1046
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1047
+ expense of slower inference.
1048
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1049
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1050
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1051
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1052
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1053
+ usually at the expense of lower image quality.
1054
+ negative_prompt (`str` or `List[str]`, *optional*):
1055
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1056
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1057
+ less than `1`).
1058
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1059
+ The number of images to generate per prompt.
1060
+ eta (`float`, *optional*, defaults to 0.0):
1061
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1062
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1063
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1064
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1065
+ to make generation deterministic.
1066
+ latents (`torch.Tensor`, *optional*):
1067
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1068
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1069
+ tensor will ge generated by sampling using the supplied random `generator`.
1070
+ prompt_embeds (`torch.Tensor`, *optional*):
1071
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1072
+ provided, text embeddings will be generated from `prompt` input argument.
1073
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
1074
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1075
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1076
+ argument.
1077
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
1078
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1079
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1080
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1081
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1082
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1083
+ input argument.
1084
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1085
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1086
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
1087
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
1088
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
1089
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
1090
+ output_type (`str`, *optional*, defaults to `"pil"`):
1091
+ The output format of the generate image. Choose between
1092
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1093
+ return_dict (`bool`, *optional*, defaults to `True`):
1094
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1095
+ plain tuple.
1096
+ cross_attention_kwargs (`dict`, *optional*):
1097
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1098
+ `self.processor` in
1099
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1100
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1101
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
1102
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
1103
+ corresponding scale as a list.
1104
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1105
+ The percentage of total steps at which the controlnet starts applying.
1106
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1107
+ The percentage of total steps at which the controlnet stops applying.
1108
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1109
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1110
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1111
+ explained in section 2.2 of
1112
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1113
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1114
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1115
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1116
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1117
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1118
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1119
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1120
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1121
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1122
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1123
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1124
+ micro-conditioning as explained in section 2.2 of
1125
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1126
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1127
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1128
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1129
+ micro-conditioning as explained in section 2.2 of
1130
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1131
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1132
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1133
+ To negatively condition the generation process based on a target image resolution. It should be as same
1134
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1135
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1136
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1137
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1138
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1139
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1140
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1141
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1142
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1143
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1144
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1145
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
1146
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
1147
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
1148
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
1149
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
1150
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1151
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1152
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1153
+ `._callback_tensor_inputs` attribute of your pipeline class.
1154
+
1155
+ Examples:
1156
+
1157
+ Returns:
1158
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1159
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple`
1160
+ containing the output images.
1161
+ """
1162
+
1163
+ callback = kwargs.pop("callback", None)
1164
+ callback_steps = kwargs.pop("callback_steps", None)
1165
+
1166
+ if callback is not None:
1167
+ deprecate(
1168
+ "callback",
1169
+ "1.0.0",
1170
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1171
+ )
1172
+ if callback_steps is not None:
1173
+ deprecate(
1174
+ "callback_steps",
1175
+ "1.0.0",
1176
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1177
+ )
1178
+
1179
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1180
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1181
+
1182
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1183
+
1184
+ # align format for control guidance
1185
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1186
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1187
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1188
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1189
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1190
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1191
+ control_guidance_start, control_guidance_end = (
1192
+ mult * [control_guidance_start],
1193
+ mult * [control_guidance_end],
1194
+ )
1195
+
1196
+ # from IPython import embed; embed()
1197
+ # 1. Check inputs. Raise error if not correct
1198
+ self.check_inputs(
1199
+ prompt,
1200
+ control_image,
1201
+ strength,
1202
+ num_inference_steps,
1203
+ callback_steps,
1204
+ negative_prompt,
1205
+ prompt_embeds,
1206
+ negative_prompt_embeds,
1207
+ pooled_prompt_embeds,
1208
+ negative_pooled_prompt_embeds,
1209
+ ip_adapter_image,
1210
+ ip_adapter_image_embeds,
1211
+ controlnet_conditioning_scale,
1212
+ control_guidance_start,
1213
+ control_guidance_end,
1214
+ callback_on_step_end_tensor_inputs,
1215
+ )
1216
+
1217
+ self._guidance_scale = guidance_scale
1218
+ self._cross_attention_kwargs = cross_attention_kwargs
1219
+
1220
+ # 2. Define call parameters
1221
+ if prompt is not None and isinstance(prompt, str):
1222
+ batch_size = 1
1223
+ elif prompt is not None and isinstance(prompt, list):
1224
+ batch_size = len(prompt)
1225
+ else:
1226
+ batch_size = prompt_embeds.shape[0]
1227
+
1228
+ device = self._execution_device
1229
+
1230
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1231
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1232
+
1233
+ # 3.1. Encode input prompt
1234
+ text_encoder_lora_scale = (
1235
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1236
+ )
1237
+ (
1238
+ prompt_embeds,
1239
+ negative_prompt_embeds,
1240
+ pooled_prompt_embeds,
1241
+ negative_pooled_prompt_embeds,
1242
+ ) = self.encode_prompt(
1243
+ prompt,
1244
+ device,
1245
+ num_images_per_prompt,
1246
+ self.do_classifier_free_guidance,
1247
+ negative_prompt,
1248
+ prompt_embeds=prompt_embeds,
1249
+ negative_prompt_embeds=negative_prompt_embeds,
1250
+ pooled_prompt_embeds=pooled_prompt_embeds,
1251
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1252
+ lora_scale=text_encoder_lora_scale,
1253
+ )
1254
+
1255
+ # 3.2 Encode ip_adapter_image
1256
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1257
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1258
+ ip_adapter_image,
1259
+ ip_adapter_image_embeds,
1260
+ device,
1261
+ batch_size * num_images_per_prompt,
1262
+ self.do_classifier_free_guidance,
1263
+ )
1264
+
1265
+ # 4. Prepare image and controlnet_conditioning_image
1266
+ image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1267
+
1268
+ if isinstance(controlnet, ControlNetModel):
1269
+ control_image = self.prepare_control_image(
1270
+ image=control_image,
1271
+ width=width,
1272
+ height=height,
1273
+ batch_size=batch_size * num_images_per_prompt,
1274
+ num_images_per_prompt=num_images_per_prompt,
1275
+ device=device,
1276
+ dtype=controlnet.dtype,
1277
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1278
+ guess_mode=guess_mode,
1279
+ )
1280
+ height, width = control_image.shape[-2:]
1281
+ elif isinstance(controlnet, MultiControlNetModel):
1282
+ control_images = []
1283
+
1284
+ for control_image_ in control_image:
1285
+ control_image_ = self.prepare_control_image(
1286
+ image=control_image_,
1287
+ width=width,
1288
+ height=height,
1289
+ batch_size=batch_size * num_images_per_prompt,
1290
+ num_images_per_prompt=num_images_per_prompt,
1291
+ device=device,
1292
+ dtype=controlnet.dtype,
1293
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1294
+ guess_mode=guess_mode,
1295
+ )
1296
+
1297
+ control_images.append(control_image_)
1298
+
1299
+ control_image = control_images
1300
+ height, width = control_image[0].shape[-2:]
1301
+ else:
1302
+ assert False
1303
+
1304
+ # 5. Prepare timesteps
1305
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1306
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
1307
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1308
+ self._num_timesteps = len(timesteps)
1309
+
1310
+ # 6. Prepare latent variables
1311
+
1312
+ num_channels_latents = self.unet.config.in_channels
1313
+ if latents is None:
1314
+ if strength >= 1.0:
1315
+ latents = self.prepare_latents_t2i(
1316
+ batch_size * num_images_per_prompt,
1317
+ num_channels_latents,
1318
+ height,
1319
+ width,
1320
+ prompt_embeds.dtype,
1321
+ device,
1322
+ generator,
1323
+ latents,
1324
+ )
1325
+ else:
1326
+ latents = self.prepare_latents(
1327
+ image,
1328
+ latent_timestep,
1329
+ batch_size,
1330
+ num_images_per_prompt,
1331
+ prompt_embeds.dtype,
1332
+ device,
1333
+ generator,
1334
+ True,
1335
+ )
1336
+
1337
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1338
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1339
+
1340
+ # 7.1 Create tensor stating which controlnets to keep
1341
+ controlnet_keep = []
1342
+ for i in range(len(timesteps)):
1343
+ keeps = [
1344
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1345
+ for s, e in zip(control_guidance_start, control_guidance_end)
1346
+ ]
1347
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1348
+
1349
+ # 7.2 Prepare added time ids & embeddings
1350
+ if isinstance(control_image, list):
1351
+ original_size = original_size or control_image[0].shape[-2:]
1352
+ else:
1353
+ original_size = original_size or control_image.shape[-2:]
1354
+ target_size = target_size or (height, width)
1355
+
1356
+ # 7. Prepare added time ids & embeddings
1357
+ if negative_original_size is None:
1358
+ negative_original_size = original_size
1359
+ if negative_target_size is None:
1360
+ negative_target_size = target_size
1361
+
1362
+ add_text_embeds = pooled_prompt_embeds
1363
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1364
+
1365
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1366
+ original_size,
1367
+ crops_coords_top_left,
1368
+ target_size,
1369
+ aesthetic_score,
1370
+ negative_aesthetic_score,
1371
+ negative_original_size,
1372
+ negative_crops_coords_top_left,
1373
+ negative_target_size,
1374
+ dtype=prompt_embeds.dtype,
1375
+ text_encoder_projection_dim=text_encoder_projection_dim,
1376
+ )
1377
+
1378
+ if self.do_classifier_free_guidance:
1379
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1380
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1381
+ add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
1382
+ add_neg_time_ids = torch.cat([add_neg_time_ids, add_neg_time_ids], dim=0)
1383
+
1384
+ prompt_embeds = prompt_embeds.to(device)
1385
+ add_text_embeds = add_text_embeds.to(device)
1386
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1387
+ add_neg_time_ids = add_neg_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1388
+
1389
+ # patch diffusers controlnet instance forward, undo
1390
+ # after denoising loop
1391
+
1392
+ patched_cn_models = []
1393
+ if isinstance(self.controlnet, MultiControlNetModel):
1394
+ cn_models_to_patch = self.controlnet.nets
1395
+ else:
1396
+ cn_models_to_patch = [self.controlnet]
1397
+
1398
+ for cn_model in cn_models_to_patch:
1399
+ cn_og_forward = cn_model.forward
1400
+
1401
+ def _cn_patch_forward(*args, **kwargs):
1402
+ encoder_hidden_states = kwargs["encoder_hidden_states"]
1403
+ if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
1404
+ # Ensure encoder_hidden_states is on the same device as the projection layer
1405
+ encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
1406
+ encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
1407
+ kwargs.pop("encoder_hidden_states")
1408
+ return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
1409
+
1410
+ cn_model.forward = _cn_patch_forward
1411
+ patched_cn_models.append((cn_model, cn_og_forward))
1412
+
1413
+ # 8. Denoising loop
1414
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1415
+
1416
+ try:
1417
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1418
+ for i, t in enumerate(timesteps):
1419
+ # expand the latents if we are doing classifier free guidance
1420
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1421
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1422
+
1423
+ added_cond_kwargs = {
1424
+ "text_embeds": add_text_embeds,
1425
+ "time_ids": add_time_ids,
1426
+ "neg_time_ids": add_neg_time_ids,
1427
+ }
1428
+
1429
+ # controlnet(s) inference
1430
+ if guess_mode and self.do_classifier_free_guidance:
1431
+ # Infer ControlNet only for the conditional batch.
1432
+ control_model_input = latents
1433
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1434
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1435
+ controlnet_added_cond_kwargs = {
1436
+ "text_embeds": add_text_embeds.chunk(2)[1],
1437
+ "time_ids": add_time_ids.chunk(2)[1],
1438
+ "neg_time_ids": add_neg_time_ids.chunk(2)[1],
1439
+ }
1440
+ else:
1441
+ control_model_input = latent_model_input
1442
+ controlnet_prompt_embeds = prompt_embeds
1443
+ controlnet_added_cond_kwargs = added_cond_kwargs
1444
+
1445
+ if isinstance(controlnet_keep[i], list):
1446
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1447
+ else:
1448
+ controlnet_cond_scale = controlnet_conditioning_scale
1449
+ if isinstance(controlnet_cond_scale, list):
1450
+ controlnet_cond_scale = controlnet_cond_scale[0]
1451
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1452
+
1453
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1454
+ control_model_input,
1455
+ t,
1456
+ encoder_hidden_states=controlnet_prompt_embeds,
1457
+ controlnet_cond=control_image,
1458
+ conditioning_scale=cond_scale,
1459
+ guess_mode=guess_mode,
1460
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1461
+ return_dict=False,
1462
+ )
1463
+
1464
+ if guess_mode and self.do_classifier_free_guidance:
1465
+ # Infered ControlNet only for the conditional batch.
1466
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1467
+ # add 0 to the unconditional batch to keep it unchanged.
1468
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1469
+ mid_block_res_sample = torch.cat(
1470
+ [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
1471
+ )
1472
+
1473
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1474
+ added_cond_kwargs["image_embeds"] = image_embeds
1475
+
1476
+ # predict the noise residual
1477
+ noise_pred = self.unet(
1478
+ latent_model_input,
1479
+ t,
1480
+ encoder_hidden_states=prompt_embeds,
1481
+ cross_attention_kwargs=self.cross_attention_kwargs,
1482
+ down_block_additional_residuals=down_block_res_samples,
1483
+ mid_block_additional_residual=mid_block_res_sample,
1484
+ added_cond_kwargs=added_cond_kwargs,
1485
+ return_dict=False,
1486
+ )[0]
1487
+
1488
+ # perform guidance
1489
+ if self.do_classifier_free_guidance:
1490
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1491
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1492
+
1493
+ # compute the previous noisy sample x_t -> x_t-1
1494
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1495
+
1496
+ if callback_on_step_end is not None:
1497
+ callback_kwargs = {}
1498
+ for k in callback_on_step_end_tensor_inputs:
1499
+ callback_kwargs[k] = locals()[k]
1500
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1501
+
1502
+ latents = callback_outputs.pop("latents", latents)
1503
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1504
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1505
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1506
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1507
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1508
+ )
1509
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1510
+ add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
1511
+ control_image = callback_outputs.pop("control_image", control_image)
1512
+
1513
+ # call the callback, if provided
1514
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1515
+ progress_bar.update()
1516
+ if callback is not None and i % callback_steps == 0:
1517
+ step_idx = i // getattr(self.scheduler, "order", 1)
1518
+ callback(step_idx, t, latents)
1519
+ finally:
1520
+ for cn_and_og in patched_cn_models:
1521
+ cn_and_og[0].forward = cn_and_og[1]
1522
+
1523
+ # If we do sequential model offloading, let's offload unet and controlnet
1524
+ # manually for max memory savings
1525
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1526
+ self.unet.to("cpu")
1527
+ self.controlnet.to("cpu")
1528
+ torch.cuda.empty_cache()
1529
+ torch.cuda.ipc_collect()
1530
+
1531
+ if not output_type == "latent":
1532
+ # make sure the VAE is in float32 mode, as it overflows in float16
1533
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1534
+
1535
+ if needs_upcasting:
1536
+ self.upcast_vae()
1537
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1538
+
1539
+ latents = latents / self.vae.config.scaling_factor
1540
+ image = self.vae.decode(latents, return_dict=False)[0]
1541
+
1542
+ # cast back to fp16 if needed
1543
+ if needs_upcasting:
1544
+ self.vae.to(dtype=torch.float16)
1545
+ else:
1546
+ image = latents
1547
+ return StableDiffusionXLPipelineOutput(images=image)
1548
+
1549
+ image = self.image_processor.postprocess(image, output_type=output_type)
1550
+
1551
+ # Offload all models
1552
+ self.maybe_free_model_hooks()
1553
+
1554
+ if not return_dict:
1555
+ return (image,)
1556
+
1557
+ return StableDiffusionXLPipelineOutput(images=image)
main/pipeline_controlnet_xl_kolors_inpaint.py ADDED
@@ -0,0 +1,1871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from transformers import (
23
+ CLIPImageProcessor,
24
+ CLIPVisionModelWithProjection,
25
+ )
26
+
27
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
28
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
29
+ from diffusers.loaders import (
30
+ FromSingleFileMixin,
31
+ IPAdapterMixin,
32
+ StableDiffusionXLLoraLoaderMixin,
33
+ TextualInversionLoaderMixin,
34
+ )
35
+ from diffusers.models import (
36
+ AutoencoderKL,
37
+ ControlNetModel,
38
+ ImageProjection,
39
+ MultiControlNetModel,
40
+ UNet2DConditionModel,
41
+ )
42
+ from diffusers.models.attention_processor import (
43
+ AttnProcessor2_0,
44
+ XFormersAttnProcessor,
45
+ )
46
+ from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
47
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
48
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
49
+ from diffusers.schedulers import KarrasDiffusionSchedulers
50
+ from diffusers.utils import deprecate, is_invisible_watermark_available, logging, replace_example_docstring
51
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
52
+
53
+
54
+ if is_invisible_watermark_available():
55
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
56
+
57
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
58
+
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> from diffusers import KolorsControlNetInpaintPipeline, ControlNetModel
64
+ >>> from diffusers.utils import load_image
65
+ >>> from PIL import Image
66
+ >>> import numpy as np
67
+ >>> import torch
68
+ >>> import cv2
69
+
70
+ >>> init_image = load_image(
71
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
72
+ ... )
73
+ >>> init_image = init_image.resize((1024, 1024))
74
+
75
+ >>> generator = torch.Generator(device="cpu").manual_seed(1)
76
+
77
+ >>> mask_image = load_image(
78
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
79
+ ... )
80
+ >>> mask_image = mask_image.resize((1024, 1024))
81
+
82
+
83
+ >>> def make_canny_condition(image):
84
+ ... image = np.array(image)
85
+ ... image = cv2.Canny(image, 100, 200)
86
+ ... image = image[:, :, None]
87
+ ... image = np.concatenate([image, image, image], axis=2)
88
+ ... image = Image.fromarray(image)
89
+ ... return image
90
+
91
+
92
+ >>> control_image = make_canny_condition(init_image)
93
+
94
+ >>> controlnet = ControlNetModel.from_pretrained(
95
+ ... "Kwai-Kolors/Kolors-ControlNet-Canny",
96
+ ... use_safetensors=True,
97
+ ... torch_dtype=torch.float16
98
+ ... )
99
+ >>> pipe = KolorsControlNetInpaintPipeline.from_pretrained(
100
+ ... "Kwai-Kolors/Kolors-diffusers",
101
+ ... controlnet=controlnet,
102
+ ... variant="fp16",
103
+ ... use_safetensors=True,
104
+ ... torch_dtype=torch.float16
105
+ ... )
106
+
107
+ >>> pipe.enable_model_cpu_offload()
108
+
109
+ # generate image
110
+ >>> image = pipe(
111
+ ... "a handsome man with ray-ban sunglasses",
112
+ ... num_inference_steps=20,
113
+ ... generator=generator,
114
+ ... eta=1.0,
115
+ ... image=init_image,
116
+ ... mask_image=mask_image,
117
+ ... control_image=control_image,
118
+ ... ).images[0]
119
+ ```
120
+ """
121
+
122
+
123
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
124
+ def retrieve_latents(
125
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
126
+ ):
127
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
128
+ return encoder_output.latent_dist.sample(generator)
129
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
130
+ return encoder_output.latent_dist.mode()
131
+ elif hasattr(encoder_output, "latents"):
132
+ return encoder_output.latents
133
+ else:
134
+ raise AttributeError("Could not access latents of provided encoder_output")
135
+
136
+
137
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
138
+ def retrieve_timesteps(
139
+ scheduler,
140
+ num_inference_steps: Optional[int] = None,
141
+ device: Optional[Union[str, torch.device]] = None,
142
+ timesteps: Optional[List[int]] = None,
143
+ sigmas: Optional[List[float]] = None,
144
+ **kwargs,
145
+ ):
146
+ """
147
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
148
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
149
+
150
+ Args:
151
+ scheduler (`SchedulerMixin`):
152
+ The scheduler to get timesteps from.
153
+ num_inference_steps (`int`):
154
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
155
+ must be `None`.
156
+ device (`str` or `torch.device`, *optional*):
157
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
158
+ timesteps (`List[int]`, *optional*):
159
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
160
+ `num_inference_steps` and `sigmas` must be `None`.
161
+ sigmas (`List[float]`, *optional*):
162
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
163
+ `num_inference_steps` and `timesteps` must be `None`.
164
+
165
+ Returns:
166
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
167
+ second element is the number of inference steps.
168
+ """
169
+ if timesteps is not None and sigmas is not None:
170
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
171
+ if timesteps is not None:
172
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
173
+ if not accepts_timesteps:
174
+ raise ValueError(
175
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
176
+ f" timestep schedules. Please check whether you are using the correct scheduler."
177
+ )
178
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
179
+ timesteps = scheduler.timesteps
180
+ num_inference_steps = len(timesteps)
181
+ elif sigmas is not None:
182
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
183
+ if not accept_sigmas:
184
+ raise ValueError(
185
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
186
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
187
+ )
188
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
189
+ timesteps = scheduler.timesteps
190
+ num_inference_steps = len(timesteps)
191
+ else:
192
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
193
+ timesteps = scheduler.timesteps
194
+ return timesteps, num_inference_steps
195
+
196
+
197
+ class KolorsControlNetInpaintPipeline(
198
+ DiffusionPipeline,
199
+ StableDiffusionMixin,
200
+ StableDiffusionXLLoraLoaderMixin,
201
+ FromSingleFileMixin,
202
+ IPAdapterMixin,
203
+ ):
204
+ r"""
205
+ Pipeline for inpainting using Kolors with ControlNet guidance.
206
+
207
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
208
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
209
+
210
+ The pipeline also inherits the following loading methods:
211
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
212
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
213
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
214
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
215
+
216
+ Args:
217
+ vae ([`AutoencoderKL`]):
218
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
219
+ text_encoder ([`ChatGLMModel`]):
220
+ Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
221
+ tokenizer (`ChatGLMTokenizer`):
222
+ Tokenizer of class
223
+ [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
224
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
225
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
226
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
227
+ as a list, the outputs from each ControlNet are added together to create one combined additional
228
+ conditioning.
229
+ scheduler ([`SchedulerMixin`]):
230
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
231
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
232
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
233
+ Whether the `unet` requires an `aesthetic_score` condition to be passed during inference. Also see the
234
+ config of `stabilityai/stable-diffusion-xl-refiner-1-0`.
235
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
236
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
237
+ `Kwai-Kolors/Kolors-diffusers`.
238
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
239
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
240
+ """
241
+
242
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
243
+
244
+ _optional_components = [
245
+ "tokenizer",
246
+ "text_encoder",
247
+ "feature_extractor",
248
+ "image_encoder",
249
+ ]
250
+ _callback_tensor_inputs = [
251
+ "latents",
252
+ "prompt_embeds",
253
+ "negative_prompt_embeds",
254
+ "add_text_embeds",
255
+ "add_time_ids",
256
+ "negative_pooled_prompt_embeds",
257
+ "add_neg_time_ids",
258
+ "mask",
259
+ "masked_image_latents",
260
+ "control_image",
261
+ ]
262
+
263
+ def __init__(
264
+ self,
265
+ vae: AutoencoderKL,
266
+ text_encoder: ChatGLMModel,
267
+ tokenizer: ChatGLMTokenizer,
268
+ unet: UNet2DConditionModel,
269
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
270
+ scheduler: KarrasDiffusionSchedulers,
271
+ requires_aesthetics_score: bool = False,
272
+ force_zeros_for_empty_prompt: bool = True,
273
+ feature_extractor: CLIPImageProcessor = None,
274
+ image_encoder: CLIPVisionModelWithProjection = None,
275
+ add_watermarker: Optional[bool] = None,
276
+ ):
277
+ super().__init__()
278
+
279
+ if isinstance(controlnet, (list, tuple)):
280
+ controlnet = MultiControlNetModel(controlnet)
281
+
282
+ self.register_modules(
283
+ vae=vae,
284
+ text_encoder=text_encoder,
285
+ tokenizer=tokenizer,
286
+ unet=unet,
287
+ controlnet=controlnet,
288
+ scheduler=scheduler,
289
+ feature_extractor=feature_extractor,
290
+ image_encoder=image_encoder,
291
+ )
292
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
293
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
294
+ self.control_image_processor = VaeImageProcessor(
295
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
296
+ )
297
+ self.mask_processor = VaeImageProcessor(
298
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
299
+ )
300
+
301
+ if add_watermarker:
302
+ self.watermark = StableDiffusionXLWatermarker()
303
+ else:
304
+ self.watermark = None
305
+
306
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
307
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
308
+
309
+ def encode_prompt(
310
+ self,
311
+ prompt,
312
+ device: Optional[torch.device] = None,
313
+ num_images_per_prompt: int = 1,
314
+ do_classifier_free_guidance: bool = True,
315
+ negative_prompt=None,
316
+ prompt_embeds: Optional[torch.FloatTensor] = None,
317
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
318
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
319
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
320
+ lora_scale: Optional[float] = None,
321
+ ):
322
+ r"""
323
+ Encodes the prompt into text encoder hidden states.
324
+
325
+ Args:
326
+ prompt (`str` or `List[str]`, *optional*):
327
+ prompt to be encoded
328
+ device: (`torch.device`):
329
+ torch device
330
+ num_images_per_prompt (`int`):
331
+ number of images that should be generated per prompt
332
+ do_classifier_free_guidance (`bool`):
333
+ whether to use classifier free guidance or not
334
+ negative_prompt (`str` or `List[str]`, *optional*):
335
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
336
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
337
+ less than `1`).
338
+ prompt_embeds (`torch.FloatTensor`, *optional*):
339
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
340
+ provided, text embeddings will be generated from `prompt` input argument.
341
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
342
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
343
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
344
+ argument.
345
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
346
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
347
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
348
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
349
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
350
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
351
+ input argument.
352
+ lora_scale (`float`, *optional*):
353
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
354
+ """
355
+ device = device or self._execution_device
356
+
357
+ # set lora scale so that monkey patched LoRA
358
+ # function of text encoder can correctly access it
359
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
360
+ self._lora_scale = lora_scale
361
+
362
+ if prompt is not None and isinstance(prompt, str):
363
+ batch_size = 1
364
+ elif prompt is not None and isinstance(prompt, list):
365
+ batch_size = len(prompt)
366
+ else:
367
+ batch_size = prompt_embeds.shape[0]
368
+
369
+ # Define tokenizers and text encoders
370
+ tokenizers = [self.tokenizer]
371
+ text_encoders = [self.text_encoder]
372
+
373
+ if prompt_embeds is None:
374
+ # textual inversion: procecss multi-vector tokens if necessary
375
+ prompt_embeds_list = []
376
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
377
+ if isinstance(self, TextualInversionLoaderMixin):
378
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
379
+
380
+ text_inputs = tokenizer(
381
+ prompt,
382
+ padding="max_length",
383
+ max_length=256,
384
+ truncation=True,
385
+ return_tensors="pt",
386
+ ).to(self._execution_device)
387
+ output = text_encoder(
388
+ input_ids=text_inputs["input_ids"],
389
+ attention_mask=text_inputs["attention_mask"],
390
+ position_ids=text_inputs["position_ids"],
391
+ output_hidden_states=True,
392
+ )
393
+ prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
394
+ pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
395
+ bs_embed, seq_len, _ = prompt_embeds.shape
396
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
397
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
398
+ prompt_embeds_list.append(prompt_embeds)
399
+
400
+ # prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
401
+ prompt_embeds = prompt_embeds_list[0]
402
+
403
+ # get unconditional embeddings for classifier free guidance
404
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
405
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
406
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
407
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
408
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
409
+ # negative_prompt = negative_prompt or ""
410
+ uncond_tokens: List[str]
411
+ if negative_prompt is None:
412
+ uncond_tokens = [""] * batch_size
413
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
414
+ raise TypeError(
415
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
416
+ f" {type(prompt)}."
417
+ )
418
+ elif isinstance(negative_prompt, str):
419
+ uncond_tokens = [negative_prompt]
420
+ elif batch_size != len(negative_prompt):
421
+ raise ValueError(
422
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
423
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
424
+ " the batch size of `prompt`."
425
+ )
426
+ else:
427
+ uncond_tokens = negative_prompt
428
+
429
+ negative_prompt_embeds_list = []
430
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
431
+ # textual inversion: procecss multi-vector tokens if necessary
432
+ if isinstance(self, TextualInversionLoaderMixin):
433
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
434
+
435
+ max_length = prompt_embeds.shape[1]
436
+ uncond_input = tokenizer(
437
+ uncond_tokens,
438
+ padding="max_length",
439
+ max_length=max_length,
440
+ truncation=True,
441
+ return_tensors="pt",
442
+ ).to(self._execution_device)
443
+ output = text_encoder(
444
+ input_ids=uncond_input["input_ids"],
445
+ attention_mask=uncond_input["attention_mask"],
446
+ position_ids=uncond_input["position_ids"],
447
+ output_hidden_states=True,
448
+ )
449
+ negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
450
+ negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
451
+
452
+ if do_classifier_free_guidance:
453
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
454
+ seq_len = negative_prompt_embeds.shape[1]
455
+
456
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
457
+
458
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
459
+ negative_prompt_embeds = negative_prompt_embeds.view(
460
+ batch_size * num_images_per_prompt, seq_len, -1
461
+ )
462
+
463
+ # For classifier free guidance, we need to do two forward passes.
464
+ # Here we concatenate the unconditional and text embeddings into a single batch
465
+ # to avoid doing two forward passes
466
+
467
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
468
+
469
+ # negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
470
+ negative_prompt_embeds = negative_prompt_embeds_list[0]
471
+
472
+ bs_embed = pooled_prompt_embeds.shape[0]
473
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
474
+ bs_embed * num_images_per_prompt, -1
475
+ )
476
+ if do_classifier_free_guidance:
477
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
478
+ bs_embed * num_images_per_prompt, -1
479
+ )
480
+
481
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
482
+
483
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
484
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
485
+ dtype = next(self.image_encoder.parameters()).dtype
486
+
487
+ if not isinstance(image, torch.Tensor):
488
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
489
+
490
+ image = image.to(device=device, dtype=dtype)
491
+ if output_hidden_states:
492
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
493
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
494
+ uncond_image_enc_hidden_states = self.image_encoder(
495
+ torch.zeros_like(image), output_hidden_states=True
496
+ ).hidden_states[-2]
497
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
498
+ num_images_per_prompt, dim=0
499
+ )
500
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
501
+ else:
502
+ image_embeds = self.image_encoder(image).image_embeds
503
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
504
+ uncond_image_embeds = torch.zeros_like(image_embeds)
505
+
506
+ return image_embeds, uncond_image_embeds
507
+
508
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
509
+ def prepare_ip_adapter_image_embeds(
510
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
511
+ ):
512
+ image_embeds = []
513
+ if do_classifier_free_guidance:
514
+ negative_image_embeds = []
515
+ if ip_adapter_image_embeds is None:
516
+ if not isinstance(ip_adapter_image, list):
517
+ ip_adapter_image = [ip_adapter_image]
518
+
519
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
520
+ raise ValueError(
521
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got "
522
+ f"{len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
523
+ )
524
+
525
+ for single_ip_adapter_image, image_proj_layer in zip(
526
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
527
+ ):
528
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
529
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
530
+ single_ip_adapter_image, device, 1, output_hidden_state
531
+ )
532
+
533
+ image_embeds.append(single_image_embeds[None, :])
534
+ if do_classifier_free_guidance:
535
+ negative_image_embeds.append(single_negative_image_embeds[None, :])
536
+ else:
537
+ for single_image_embeds in ip_adapter_image_embeds:
538
+ if do_classifier_free_guidance:
539
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
540
+ negative_image_embeds.append(single_negative_image_embeds)
541
+ image_embeds.append(single_image_embeds)
542
+
543
+ ip_adapter_image_embeds = []
544
+ for i, single_image_embeds in enumerate(image_embeds):
545
+ single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
546
+ if do_classifier_free_guidance:
547
+ single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
548
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
549
+
550
+ single_image_embeds = single_image_embeds.to(device=device)
551
+ ip_adapter_image_embeds.append(single_image_embeds)
552
+
553
+ return ip_adapter_image_embeds
554
+
555
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
556
+ def prepare_extra_step_kwargs(self, generator, eta):
557
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
558
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
559
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
560
+ # and should be between [0, 1]
561
+
562
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
563
+ extra_step_kwargs = {}
564
+ if accepts_eta:
565
+ extra_step_kwargs["eta"] = eta
566
+
567
+ # check if the scheduler accepts generator
568
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
569
+ if accepts_generator:
570
+ extra_step_kwargs["generator"] = generator
571
+ return extra_step_kwargs
572
+
573
+ def check_inputs(
574
+ self,
575
+ prompt,
576
+ image,
577
+ strength,
578
+ num_inference_steps,
579
+ callback_steps,
580
+ negative_prompt=None,
581
+ prompt_embeds=None,
582
+ negative_prompt_embeds=None,
583
+ pooled_prompt_embeds=None,
584
+ negative_pooled_prompt_embeds=None,
585
+ ip_adapter_image=None,
586
+ ip_adapter_image_embeds=None,
587
+ controlnet_conditioning_scale=1.0,
588
+ control_guidance_start=0.0,
589
+ control_guidance_end=1.0,
590
+ callback_on_step_end_tensor_inputs=None,
591
+ ):
592
+ if strength < 0 or strength > 1:
593
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
594
+ if num_inference_steps is None:
595
+ raise ValueError("`num_inference_steps` cannot be None.")
596
+ elif not isinstance(num_inference_steps, int) or num_inference_steps <= 0:
597
+ raise ValueError(
598
+ f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type"
599
+ f" {type(num_inference_steps)}."
600
+ )
601
+
602
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
603
+ raise ValueError(
604
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
605
+ f" {type(callback_steps)}."
606
+ )
607
+
608
+ if callback_on_step_end_tensor_inputs is not None and not all(
609
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
610
+ ):
611
+ raise ValueError(
612
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
613
+ )
614
+
615
+ if prompt is not None and prompt_embeds is not None:
616
+ raise ValueError(
617
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
618
+ " only forward one of the two."
619
+ )
620
+ elif prompt is None and prompt_embeds is None:
621
+ raise ValueError(
622
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
623
+ )
624
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
625
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
626
+
627
+ if negative_prompt is not None and negative_prompt_embeds is not None:
628
+ raise ValueError(
629
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
630
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
631
+ )
632
+
633
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
634
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
635
+ raise ValueError(
636
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
637
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
638
+ f" {negative_prompt_embeds.shape}."
639
+ )
640
+
641
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
642
+ raise ValueError(
643
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
644
+ )
645
+
646
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
647
+ raise ValueError(
648
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
649
+ )
650
+
651
+ # `prompt` needs more sophisticated handling when there are multiple
652
+ # conditionings.
653
+ if isinstance(self.controlnet, MultiControlNetModel):
654
+ if isinstance(prompt, list):
655
+ logger.warning(
656
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
657
+ " prompts. The conditionings will be fixed across the prompts."
658
+ )
659
+
660
+ # Check `image`
661
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
662
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
663
+ )
664
+
665
+ if (
666
+ isinstance(self.controlnet, ControlNetModel)
667
+ or is_compiled
668
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
669
+ ):
670
+ self.check_image(image, prompt, prompt_embeds)
671
+ elif (
672
+ isinstance(self.controlnet, MultiControlNetModel)
673
+ or is_compiled
674
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
675
+ ):
676
+ if not isinstance(image, list):
677
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
678
+
679
+ # When `image` is a nested list:
680
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
681
+ elif any(isinstance(i, list) for i in image):
682
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
683
+ elif len(image) != len(self.controlnet.nets):
684
+ raise ValueError(
685
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
686
+ )
687
+
688
+ for image_ in image:
689
+ self.check_image(image_, prompt, prompt_embeds)
690
+ else:
691
+ assert False
692
+
693
+ # Check `controlnet_conditioning_scale`
694
+ if (
695
+ isinstance(self.controlnet, ControlNetModel)
696
+ or is_compiled
697
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
698
+ ):
699
+ if not isinstance(controlnet_conditioning_scale, float):
700
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
701
+ elif (
702
+ isinstance(self.controlnet, MultiControlNetModel)
703
+ or is_compiled
704
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
705
+ ):
706
+ if isinstance(controlnet_conditioning_scale, list):
707
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
708
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
709
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
710
+ self.controlnet.nets
711
+ ):
712
+ raise ValueError(
713
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
714
+ " the same length as the number of controlnets"
715
+ )
716
+ else:
717
+ assert False
718
+
719
+ if not isinstance(control_guidance_start, (tuple, list)):
720
+ control_guidance_start = [control_guidance_start]
721
+
722
+ if not isinstance(control_guidance_end, (tuple, list)):
723
+ control_guidance_end = [control_guidance_end]
724
+
725
+ if len(control_guidance_start) != len(control_guidance_end):
726
+ raise ValueError(
727
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
728
+ )
729
+
730
+ if isinstance(self.controlnet, MultiControlNetModel):
731
+ if len(control_guidance_start) != len(self.controlnet.nets):
732
+ raise ValueError(
733
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
734
+ )
735
+
736
+ for start, end in zip(control_guidance_start, control_guidance_end):
737
+ if start >= end:
738
+ raise ValueError(
739
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
740
+ )
741
+ if start < 0.0:
742
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
743
+ if end > 1.0:
744
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
745
+
746
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
747
+ raise ValueError(
748
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
749
+ )
750
+
751
+ if ip_adapter_image_embeds is not None:
752
+ if not isinstance(ip_adapter_image_embeds, list):
753
+ raise ValueError(
754
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
755
+ )
756
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
757
+ raise ValueError(
758
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
759
+ )
760
+
761
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.check_image
762
+ def check_image(self, image, prompt, prompt_embeds):
763
+ image_is_pil = isinstance(image, PIL.Image.Image)
764
+ image_is_tensor = isinstance(image, torch.Tensor)
765
+ image_is_np = isinstance(image, np.ndarray)
766
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
767
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
768
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
769
+
770
+ if (
771
+ not image_is_pil
772
+ and not image_is_tensor
773
+ and not image_is_np
774
+ and not image_is_pil_list
775
+ and not image_is_tensor_list
776
+ and not image_is_np_list
777
+ ):
778
+ raise TypeError(
779
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
780
+ )
781
+
782
+ if image_is_pil:
783
+ image_batch_size = 1
784
+ else:
785
+ image_batch_size = len(image)
786
+
787
+ if prompt is not None and isinstance(prompt, str):
788
+ prompt_batch_size = 1
789
+ elif prompt is not None and isinstance(prompt, list):
790
+ prompt_batch_size = len(prompt)
791
+ elif prompt_embeds is not None:
792
+ prompt_batch_size = prompt_embeds.shape[0]
793
+
794
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
795
+ raise ValueError(
796
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
797
+ )
798
+
799
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.StableDiffusionXLControlNetPipeline.prepare_image
800
+ def prepare_control_image(
801
+ self,
802
+ image,
803
+ width,
804
+ height,
805
+ batch_size,
806
+ num_images_per_prompt,
807
+ device,
808
+ dtype,
809
+ do_classifier_free_guidance=False,
810
+ guess_mode=False,
811
+ ):
812
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
813
+ image_batch_size = image.shape[0]
814
+
815
+ if image_batch_size == 1:
816
+ repeat_by = batch_size
817
+ else:
818
+ # image batch size is the same as prompt batch size
819
+ repeat_by = num_images_per_prompt
820
+
821
+ image = image.repeat_interleave(repeat_by, dim=0)
822
+
823
+ image = image.to(device=device, dtype=dtype)
824
+
825
+ if do_classifier_free_guidance and not guess_mode:
826
+ image = torch.cat([image] * 2)
827
+
828
+ return image
829
+
830
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
831
+ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
832
+ # get the original timestep using init_timestep
833
+ if denoising_start is None:
834
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
835
+ t_start = max(num_inference_steps - init_timestep, 0)
836
+ else:
837
+ t_start = 0
838
+
839
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
840
+
841
+ # Strength is irrelevant if we directly request a timestep to start at;
842
+ # that is, strength is determined by the denoising_start instead.
843
+ if denoising_start is not None:
844
+ discrete_timestep_cutoff = int(
845
+ round(
846
+ self.scheduler.config.num_train_timesteps
847
+ - (denoising_start * self.scheduler.config.num_train_timesteps)
848
+ )
849
+ )
850
+
851
+ num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
852
+ if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
853
+ # if the scheduler is a 2nd order scheduler we might have to do +1
854
+ # because `num_inference_steps` might be even given that every timestep
855
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
856
+ # mean that we cut the timesteps in the middle of the denoising step
857
+ # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
858
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
859
+ num_inference_steps = num_inference_steps + 1
860
+
861
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
862
+ timesteps = timesteps[-num_inference_steps:]
863
+ return timesteps, num_inference_steps
864
+
865
+ return timesteps, num_inference_steps - t_start
866
+
867
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents
868
+ def prepare_latents(
869
+ self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True
870
+ ):
871
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
872
+ raise ValueError(
873
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
874
+ )
875
+
876
+ # Offload text encoder if `enable_model_cpu_offload` was enabled
877
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
878
+ torch.cuda.empty_cache()
879
+ torch.cuda.ipc_collect()
880
+
881
+ image = image.to(device=device, dtype=dtype)
882
+
883
+ batch_size = batch_size * num_images_per_prompt
884
+
885
+ if image.shape[1] == 4:
886
+ init_latents = image
887
+
888
+ else:
889
+ # make sure the VAE is in float32 mode, as it overflows in float16
890
+ if self.vae.config.force_upcast:
891
+ image = image.float()
892
+ self.vae.to(dtype=torch.float32)
893
+
894
+ if isinstance(generator, list) and len(generator) != batch_size:
895
+ raise ValueError(
896
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
897
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
898
+ )
899
+
900
+ elif isinstance(generator, list):
901
+ init_latents = [
902
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
903
+ for i in range(batch_size)
904
+ ]
905
+ init_latents = torch.cat(init_latents, dim=0)
906
+ else:
907
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
908
+
909
+ if self.vae.config.force_upcast:
910
+ self.vae.to(dtype)
911
+
912
+ init_latents = init_latents.to(dtype)
913
+
914
+ init_latents = self.vae.config.scaling_factor * init_latents
915
+
916
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
917
+ # expand init_latents for batch_size
918
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
919
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
920
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
921
+ raise ValueError(
922
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
923
+ )
924
+ else:
925
+ init_latents = torch.cat([init_latents], dim=0)
926
+
927
+ if add_noise:
928
+ shape = init_latents.shape
929
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
930
+ # get latents
931
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
932
+
933
+ latents = init_latents
934
+
935
+ return latents
936
+
937
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
938
+ def prepare_latents_t2i(
939
+ self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None
940
+ ):
941
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
942
+ if isinstance(generator, list) and len(generator) != batch_size:
943
+ raise ValueError(
944
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
945
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
946
+ )
947
+
948
+ if latents is None:
949
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
950
+ else:
951
+ latents = latents.to(device)
952
+
953
+ # scale the initial noise by the standard deviation required by the scheduler
954
+ latents = latents * self.scheduler.init_noise_sigma
955
+ return latents
956
+
957
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
958
+ def _get_add_time_ids(
959
+ self,
960
+ original_size,
961
+ crops_coords_top_left,
962
+ target_size,
963
+ aesthetic_score,
964
+ negative_aesthetic_score,
965
+ negative_original_size,
966
+ negative_crops_coords_top_left,
967
+ negative_target_size,
968
+ dtype,
969
+ text_encoder_projection_dim=None,
970
+ ):
971
+ if self.config.requires_aesthetics_score:
972
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
973
+ add_neg_time_ids = list(
974
+ negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
975
+ )
976
+ else:
977
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
978
+ add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
979
+
980
+ passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096
981
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
982
+
983
+ if (
984
+ expected_add_embed_dim > passed_add_embed_dim
985
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
986
+ ):
987
+ raise ValueError(
988
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
989
+ )
990
+ elif (
991
+ expected_add_embed_dim < passed_add_embed_dim
992
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
993
+ ):
994
+ raise ValueError(
995
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
996
+ )
997
+ elif expected_add_embed_dim != passed_add_embed_dim:
998
+ raise ValueError(
999
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`."
1000
+ )
1001
+
1002
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1003
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1004
+
1005
+ return add_time_ids, add_neg_time_ids
1006
+
1007
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1008
+ def upcast_vae(self):
1009
+ dtype = self.vae.dtype
1010
+ self.vae.to(dtype=torch.float32)
1011
+ use_torch_2_0_or_xformers = isinstance(
1012
+ self.vae.decoder.mid_block.attentions[0].processor,
1013
+ (
1014
+ AttnProcessor2_0,
1015
+ XFormersAttnProcessor,
1016
+ ),
1017
+ )
1018
+ # if xformers or torch_2_0 is used attention block does not need
1019
+ # to be in float32 which can save lots of memory
1020
+ if use_torch_2_0_or_xformers:
1021
+ self.vae.post_quant_conv.to(dtype)
1022
+ self.vae.decoder.conv_in.to(dtype)
1023
+ self.vae.decoder.mid_block.to(dtype)
1024
+
1025
+ @property
1026
+ def denoising_end(self):
1027
+ return self._denoising_end
1028
+
1029
+ @property
1030
+ def denoising_start(self):
1031
+ return self._denoising_start
1032
+
1033
+ @property
1034
+ def guidance_scale(self):
1035
+ return self._guidance_scale
1036
+
1037
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1038
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1039
+ # corresponds to doing no classifier free guidance.
1040
+ @property
1041
+ def do_classifier_free_guidance(self):
1042
+ return self._guidance_scale > 1
1043
+
1044
+ @property
1045
+ def cross_attention_kwargs(self):
1046
+ return self._cross_attention_kwargs
1047
+
1048
+ @property
1049
+ def num_timesteps(self):
1050
+ return self._num_timesteps
1051
+
1052
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
1053
+ dtype = image.dtype
1054
+ if self.vae.config.force_upcast:
1055
+ image = image.float()
1056
+ self.vae.to(dtype=torch.float32)
1057
+
1058
+ if isinstance(generator, list):
1059
+ image_latents = [
1060
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
1061
+ for i in range(image.shape[0])
1062
+ ]
1063
+ image_latents = torch.cat(image_latents, dim=0)
1064
+ else:
1065
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
1066
+
1067
+ if self.vae.config.force_upcast:
1068
+ self.vae.to(dtype)
1069
+
1070
+ image_latents = image_latents.to(dtype)
1071
+ image_latents = self.vae.config.scaling_factor * image_latents
1072
+
1073
+ return image_latents
1074
+
1075
+ def prepare_mask_latents(
1076
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
1077
+ ):
1078
+ # resize the mask to latents shape as we concatenate the mask to the latents
1079
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1080
+ # and half precision
1081
+ mask = torch.nn.functional.interpolate(
1082
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
1083
+ )
1084
+ mask = mask.to(device=device, dtype=dtype)
1085
+
1086
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1087
+ if mask.shape[0] < batch_size:
1088
+ if not batch_size % mask.shape[0] == 0:
1089
+ raise ValueError(
1090
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1091
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1092
+ " of masks that you pass is divisible by the total requested batch size."
1093
+ )
1094
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1095
+
1096
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1097
+
1098
+ if masked_image is not None and masked_image.shape[1] == 4:
1099
+ masked_image_latents = masked_image
1100
+ else:
1101
+ masked_image_latents = None
1102
+
1103
+ if masked_image is not None:
1104
+ if masked_image_latents is None:
1105
+ masked_image = masked_image.to(device=device, dtype=dtype)
1106
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1107
+
1108
+ if masked_image_latents.shape[0] < batch_size:
1109
+ if not batch_size % masked_image_latents.shape[0] == 0:
1110
+ raise ValueError(
1111
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1112
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1113
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
1114
+ )
1115
+ masked_image_latents = masked_image_latents.repeat(
1116
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
1117
+ )
1118
+
1119
+ masked_image_latents = (
1120
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1121
+ )
1122
+
1123
+ # aligning device to prevent device errors when concating it with the latent model input
1124
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1125
+
1126
+ return mask, masked_image_latents
1127
+
1128
+ @torch.no_grad()
1129
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1130
+ def __call__(
1131
+ self,
1132
+ prompt: Union[str, List[str]] = None,
1133
+ image: PipelineImageInput = None,
1134
+ mask_image: PipelineImageInput = None,
1135
+ control_image: PipelineImageInput = None,
1136
+ masked_image_latents: torch.Tensor = None,
1137
+ height: Optional[int] = None,
1138
+ width: Optional[int] = None,
1139
+ padding_mask_crop: Optional[int] = None,
1140
+ strength: float = 0.9999,
1141
+ num_inference_steps: int = 50,
1142
+ timesteps: List[int] = None,
1143
+ sigmas: List[float] = None,
1144
+ denoising_start: Optional[float] = None,
1145
+ denoising_end: Optional[float] = None,
1146
+ guidance_scale: float = 7.5,
1147
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1148
+ num_images_per_prompt: Optional[int] = 1,
1149
+ eta: float = 0.0,
1150
+ guess_mode: bool = False,
1151
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1152
+ latents: Optional[torch.Tensor] = None,
1153
+ prompt_embeds: Optional[torch.Tensor] = None,
1154
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
1155
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
1156
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1157
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1158
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1159
+ output_type: Optional[str] = "pil",
1160
+ return_dict: bool = True,
1161
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1162
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
1163
+ control_guidance_start: Union[float, List[float]] = 0.0,
1164
+ control_guidance_end: Union[float, List[float]] = 1.0,
1165
+ guidance_rescale: float = 0.0,
1166
+ original_size: Tuple[int, int] = None,
1167
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
1168
+ target_size: Tuple[int, int] = None,
1169
+ negative_original_size: Optional[Tuple[int, int]] = None,
1170
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
1171
+ negative_target_size: Optional[Tuple[int, int]] = None,
1172
+ aesthetic_score: float = 6.0,
1173
+ negative_aesthetic_score: float = 2.5,
1174
+ callback_on_step_end: Optional[
1175
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
1176
+ ] = None,
1177
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1178
+ **kwargs,
1179
+ ):
1180
+ r"""
1181
+ Function invoked when calling the pipeline for generation.
1182
+
1183
+ Args:
1184
+ prompt (`str` or `List[str]`, *optional*):
1185
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1186
+ instead.
1187
+ prompt_2 (`str` or `List[str]`, *optional*):
1188
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1189
+ used in both text-encoders
1190
+ image (`PIL.Image.Image`):
1191
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1192
+ be masked out with `mask_image` and repainted according to `prompt`.
1193
+ mask_image (`PIL.Image.Image`):
1194
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1195
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1196
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1197
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
1198
+ control_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1199
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1200
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
1201
+ the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also
1202
+ be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1203
+ and/or width are passed, `image` is resized according to them. If multiple ControlNets are specified in
1204
+ init, images must be passed as a list such that each element of the list can be correctly batched for
1205
+ input to a single controlnet.
1206
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1207
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
1208
+ Anything below 512 pixels won't work well for
1209
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1210
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1211
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1212
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
1213
+ Anything below 512 pixels won't work well for
1214
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1215
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1216
+ padding_mask_crop (`int`, *optional*, defaults to `None`):
1217
+ The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to
1218
+ image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region
1219
+ with the same aspect ration of the image and contains all masked area, and then expand that area based
1220
+ on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before
1221
+ resizing to the original image size for inpainting. This is useful when the masked area is small while
1222
+ the image is large and contain information irrelevant for inpainting, such as background.
1223
+ strength (`float`, *optional*, defaults to 0.9999):
1224
+ Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
1225
+ between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
1226
+ `strength`. The number of denoising steps depends on the amount of noise initially added. When
1227
+ `strength` is 1, added noise will be maximum and the denoising process will run for the full number of
1228
+ iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
1229
+ portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
1230
+ integer, the value of `strength` will be ignored.
1231
+ num_inference_steps (`int`, *optional*, defaults to 50):
1232
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1233
+ expense of slower inference.
1234
+ timesteps (`List[int]`, *optional*):
1235
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1236
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1237
+ passed will be used. Must be in descending order.
1238
+ sigmas (`List[float]`, *optional*):
1239
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
1240
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
1241
+ will be used.
1242
+ denoising_start (`float`, *optional*):
1243
+ When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1244
+ bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1245
+ it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1246
+ strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1247
+ is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1248
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1249
+ denoising_end (`float`, *optional*):
1250
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1251
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1252
+ still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
1253
+ denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
1254
+ final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
1255
+ forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1256
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1257
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1258
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1259
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1260
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1261
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1262
+ usually at the expense of lower image quality.
1263
+ negative_prompt (`str` or `List[str]`, *optional*):
1264
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1265
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1266
+ less than `1`).
1267
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1268
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1269
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1270
+ prompt_embeds (`torch.Tensor`, *optional*):
1271
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1272
+ provided, text embeddings will be generated from `prompt` input argument.
1273
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
1274
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1275
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1276
+ argument.
1277
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
1278
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1279
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1280
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1281
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1282
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1283
+ input argument.
1284
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1285
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1286
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
1287
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
1288
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
1289
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
1290
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1291
+ The number of images to generate per prompt.
1292
+ eta (`float`, *optional*, defaults to 0.0):
1293
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1294
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1295
+ generator (`torch.Generator`, *optional*):
1296
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1297
+ to make generation deterministic.
1298
+ latents (`torch.Tensor`, *optional*):
1299
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1300
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1301
+ tensor will ge generated by sampling using the supplied random `generator`.
1302
+ output_type (`str`, *optional*, defaults to `"pil"`):
1303
+ The output format of the generate image. Choose between
1304
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1305
+ return_dict (`bool`, *optional*, defaults to `True`):
1306
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1307
+ plain tuple.
1308
+ cross_attention_kwargs (`dict`, *optional*):
1309
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1310
+ `self.processor` in
1311
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1312
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1313
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
1314
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
1315
+ corresponding scale as a list.
1316
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1317
+ The percentage of total steps at which the controlnet starts applying.
1318
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1319
+ The percentage of total steps at which the controlnet stops applying.
1320
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1321
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1322
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1323
+ explained in section 2.2 of
1324
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1325
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1326
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1327
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1328
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1329
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1330
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1331
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1332
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1333
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1334
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1335
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1336
+ micro-conditioning as explained in section 2.2 of
1337
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1338
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1339
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1340
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1341
+ micro-conditioning as explained in section 2.2 of
1342
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1343
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1344
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1345
+ To negatively condition the generation process based on a target image resolution. It should be as same
1346
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1347
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1348
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1349
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1350
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1351
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1352
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1353
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1354
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1355
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1356
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1357
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
1358
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
1359
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
1360
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
1361
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
1362
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1363
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1364
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1365
+ `._callback_tensor_inputs` attribute of your pipeline class.
1366
+
1367
+ Examples:
1368
+
1369
+ Returns:
1370
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
1371
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1372
+ `tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
1373
+ """
1374
+
1375
+ callback = kwargs.pop("callback", None)
1376
+ callback_steps = kwargs.pop("callback_steps", None)
1377
+
1378
+ if callback is not None:
1379
+ deprecate(
1380
+ "callback",
1381
+ "1.0.0",
1382
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1383
+ )
1384
+ if callback_steps is not None:
1385
+ deprecate(
1386
+ "callback_steps",
1387
+ "1.0.0",
1388
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1389
+ )
1390
+
1391
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1392
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1393
+
1394
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1395
+
1396
+ # align format for control guidance
1397
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1398
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1399
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1400
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1401
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1402
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1403
+ control_guidance_start, control_guidance_end = (
1404
+ mult * [control_guidance_start],
1405
+ mult * [control_guidance_end],
1406
+ )
1407
+
1408
+ # from IPython import embed; embed()
1409
+ # 1. Check inputs. Raise error if not correct
1410
+ self.check_inputs(
1411
+ prompt,
1412
+ control_image,
1413
+ strength,
1414
+ num_inference_steps,
1415
+ callback_steps,
1416
+ negative_prompt,
1417
+ prompt_embeds,
1418
+ negative_prompt_embeds,
1419
+ pooled_prompt_embeds,
1420
+ negative_pooled_prompt_embeds,
1421
+ ip_adapter_image,
1422
+ ip_adapter_image_embeds,
1423
+ controlnet_conditioning_scale,
1424
+ control_guidance_start,
1425
+ control_guidance_end,
1426
+ callback_on_step_end_tensor_inputs,
1427
+ )
1428
+
1429
+ self._guidance_scale = guidance_scale
1430
+ self._cross_attention_kwargs = cross_attention_kwargs
1431
+ self._denoising_end = denoising_end
1432
+ self._denoising_start = denoising_start
1433
+
1434
+ # 2. Define call parameters
1435
+ if prompt is not None and isinstance(prompt, str):
1436
+ batch_size = 1
1437
+ elif prompt is not None and isinstance(prompt, list):
1438
+ batch_size = len(prompt)
1439
+ else:
1440
+ batch_size = prompt_embeds.shape[0]
1441
+
1442
+ device = self._execution_device
1443
+
1444
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1445
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1446
+
1447
+ # 3.1. Encode input prompt
1448
+ text_encoder_lora_scale = (
1449
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1450
+ )
1451
+ (
1452
+ prompt_embeds,
1453
+ negative_prompt_embeds,
1454
+ pooled_prompt_embeds,
1455
+ negative_pooled_prompt_embeds,
1456
+ ) = self.encode_prompt(
1457
+ prompt,
1458
+ device,
1459
+ num_images_per_prompt,
1460
+ self.do_classifier_free_guidance,
1461
+ negative_prompt,
1462
+ prompt_embeds=prompt_embeds,
1463
+ negative_prompt_embeds=negative_prompt_embeds,
1464
+ pooled_prompt_embeds=pooled_prompt_embeds,
1465
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1466
+ lora_scale=text_encoder_lora_scale,
1467
+ )
1468
+
1469
+ # 3.2 Encode ip_adapter_image
1470
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1471
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1472
+ ip_adapter_image,
1473
+ ip_adapter_image_embeds,
1474
+ device,
1475
+ batch_size * num_images_per_prompt,
1476
+ self.do_classifier_free_guidance,
1477
+ )
1478
+
1479
+ # 4. Prepare image, mask, and controlnet_conditioning_image
1480
+ if isinstance(controlnet, ControlNetModel):
1481
+ control_image = self.prepare_control_image(
1482
+ image=control_image,
1483
+ width=width,
1484
+ height=height,
1485
+ batch_size=batch_size * num_images_per_prompt,
1486
+ num_images_per_prompt=num_images_per_prompt,
1487
+ device=device,
1488
+ dtype=controlnet.dtype,
1489
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1490
+ guess_mode=guess_mode,
1491
+ )
1492
+ height, width = control_image.shape[-2:]
1493
+ elif isinstance(controlnet, MultiControlNetModel):
1494
+ control_images = []
1495
+
1496
+ for control_image_ in control_image:
1497
+ control_image_ = self.prepare_control_image(
1498
+ image=control_image_,
1499
+ width=width,
1500
+ height=height,
1501
+ batch_size=batch_size * num_images_per_prompt,
1502
+ num_images_per_prompt=num_images_per_prompt,
1503
+ device=device,
1504
+ dtype=controlnet.dtype,
1505
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1506
+ guess_mode=guess_mode,
1507
+ )
1508
+
1509
+ control_images.append(control_image_)
1510
+
1511
+ control_image = control_images
1512
+ height, width = control_image[0].shape[-2:]
1513
+ else:
1514
+ assert False
1515
+
1516
+ # 5. set timesteps
1517
+ def denoising_value_valid(dnv):
1518
+ return isinstance(dnv, float) and 0 < dnv < 1
1519
+
1520
+ timesteps, num_inference_steps = retrieve_timesteps(
1521
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
1522
+ )
1523
+ timesteps, num_inference_steps = self.get_timesteps(
1524
+ num_inference_steps,
1525
+ strength,
1526
+ device,
1527
+ denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
1528
+ )
1529
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1530
+ if num_inference_steps < 1:
1531
+ raise ValueError(
1532
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1533
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1534
+ )
1535
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1536
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1537
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1538
+ is_strength_max = strength == 1.0
1539
+
1540
+ # 6. Preprocess mask and image
1541
+ if padding_mask_crop is not None:
1542
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
1543
+ resize_mode = "fill"
1544
+ else:
1545
+ crops_coords = None
1546
+ resize_mode = "default"
1547
+
1548
+ original_image = image
1549
+ init_image = self.image_processor.preprocess(
1550
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
1551
+ )
1552
+ init_image = init_image.to(dtype=torch.float32)
1553
+
1554
+ mask = self.mask_processor.preprocess(
1555
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
1556
+ )
1557
+
1558
+ if masked_image_latents is not None:
1559
+ masked_image = masked_image_latents
1560
+ elif init_image.shape[1] == 4:
1561
+ # if images are in latent space, we can't mask it
1562
+ masked_image = None
1563
+ else:
1564
+ masked_image = init_image * (mask < 0.5)
1565
+
1566
+ # 7. Prepare latent variables
1567
+ num_channels_latents = self.vae.config.latent_channels
1568
+ num_channels_unet = self.unet.config.in_channels
1569
+ return_image_latents = num_channels_unet == 4
1570
+
1571
+ if latents is None:
1572
+ if strength >= 1.0:
1573
+ latents = self.prepare_latents_t2i(
1574
+ batch_size * num_images_per_prompt,
1575
+ num_channels_latents,
1576
+ height,
1577
+ width,
1578
+ prompt_embeds.dtype,
1579
+ device,
1580
+ generator,
1581
+ latents,
1582
+ )
1583
+ else:
1584
+ latents = self.prepare_latents(
1585
+ init_image,
1586
+ latent_timestep,
1587
+ batch_size,
1588
+ num_images_per_prompt,
1589
+ prompt_embeds.dtype,
1590
+ device,
1591
+ generator,
1592
+ True,
1593
+ )
1594
+
1595
+ # 8. Prepare mask latent variables
1596
+ mask, masked_image_latents = self.prepare_mask_latents(
1597
+ mask,
1598
+ masked_image,
1599
+ batch_size * num_images_per_prompt,
1600
+ height,
1601
+ width,
1602
+ prompt_embeds.dtype,
1603
+ device,
1604
+ generator,
1605
+ self.do_classifier_free_guidance,
1606
+ )
1607
+
1608
+ # 9. Check that sizes of mask, masked image and latents match
1609
+ if num_channels_unet == 9:
1610
+ # default case for runwayml/stable-diffusion-inpainting
1611
+ num_channels_mask = mask.shape[1]
1612
+ num_channels_masked_image = masked_image_latents.shape[1]
1613
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1614
+ raise ValueError(
1615
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1616
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1617
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1618
+ f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
1619
+ " `pipeline.unet` or your `mask_image` or `image` input."
1620
+ )
1621
+ elif num_channels_unet != 4:
1622
+ raise ValueError(
1623
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1624
+ )
1625
+
1626
+ # 8.1. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1627
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1628
+
1629
+ # 8.2 Create tensor stating which controlnets to keep
1630
+ controlnet_keep = []
1631
+ for i in range(len(timesteps)):
1632
+ keeps = [
1633
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1634
+ for s, e in zip(control_guidance_start, control_guidance_end)
1635
+ ]
1636
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1637
+
1638
+ # 9 Prepare added time ids & embeddings
1639
+ if isinstance(control_image, list):
1640
+ original_size = original_size or control_image[0].shape[-2:]
1641
+ else:
1642
+ original_size = original_size or control_image.shape[-2:]
1643
+ target_size = target_size or (height, width)
1644
+
1645
+ if negative_original_size is None:
1646
+ negative_original_size = original_size
1647
+ if negative_target_size is None:
1648
+ negative_target_size = target_size
1649
+
1650
+ add_text_embeds = pooled_prompt_embeds
1651
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1652
+
1653
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1654
+ original_size,
1655
+ crops_coords_top_left,
1656
+ target_size,
1657
+ aesthetic_score,
1658
+ negative_aesthetic_score,
1659
+ negative_original_size,
1660
+ negative_crops_coords_top_left,
1661
+ negative_target_size,
1662
+ dtype=prompt_embeds.dtype,
1663
+ text_encoder_projection_dim=text_encoder_projection_dim,
1664
+ )
1665
+
1666
+ if self.do_classifier_free_guidance:
1667
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1668
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1669
+ add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
1670
+ add_neg_time_ids = torch.cat([add_neg_time_ids, add_neg_time_ids], dim=0)
1671
+
1672
+ prompt_embeds = prompt_embeds.to(device)
1673
+ add_text_embeds = add_text_embeds.to(device)
1674
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1675
+ add_neg_time_ids = add_neg_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1676
+
1677
+ # 10. Denoising loop
1678
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1679
+
1680
+ if (
1681
+ self.denoising_end is not None
1682
+ and self.denoising_start is not None
1683
+ and denoising_value_valid(self.denoising_end)
1684
+ and denoising_value_valid(self.denoising_start)
1685
+ and self.denoising_start >= self.denoising_end
1686
+ ):
1687
+ raise ValueError(
1688
+ f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
1689
+ + f" {self.denoising_end} when using type float."
1690
+ )
1691
+ elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
1692
+ discrete_timestep_cutoff = int(
1693
+ round(
1694
+ self.scheduler.config.num_train_timesteps
1695
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1696
+ )
1697
+ )
1698
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1699
+ timesteps = timesteps[:num_inference_steps]
1700
+
1701
+ # 11.1 Optionally get Guidance Scale Embedding
1702
+ timestep_cond = None
1703
+ if self.unet.config.time_cond_proj_dim is not None:
1704
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1705
+ timestep_cond = self.get_guidance_scale_embedding(
1706
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1707
+ ).to(device=device, dtype=latents.dtype)
1708
+
1709
+ # patch diffusers controlnet instance forward, undo
1710
+ # after denoising loop
1711
+
1712
+ patched_cn_models = []
1713
+ if isinstance(self.controlnet, MultiControlNetModel):
1714
+ cn_models_to_patch = self.controlnet.nets
1715
+ else:
1716
+ cn_models_to_patch = [self.controlnet]
1717
+
1718
+ for cn_model in cn_models_to_patch:
1719
+ cn_og_forward = cn_model.forward
1720
+
1721
+ def _cn_patch_forward(*args, **kwargs):
1722
+ encoder_hidden_states = kwargs["encoder_hidden_states"]
1723
+ if cn_model.encoder_hid_proj is not None and cn_model.config.encoder_hid_dim_type == "text_proj":
1724
+ # Ensure encoder_hidden_states is on the same device as the projection layer
1725
+ encoder_hidden_states = encoder_hidden_states.to(cn_model.encoder_hid_proj.weight.device)
1726
+ encoder_hidden_states = cn_model.encoder_hid_proj(encoder_hidden_states)
1727
+ kwargs.pop("encoder_hidden_states")
1728
+ return cn_og_forward(*args, encoder_hidden_states=encoder_hidden_states, **kwargs)
1729
+
1730
+ cn_model.forward = _cn_patch_forward
1731
+ patched_cn_models.append((cn_model, cn_og_forward))
1732
+
1733
+ try:
1734
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1735
+ for i, t in enumerate(timesteps):
1736
+ # expand the latents if we are doing classifier free guidance
1737
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1738
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1739
+
1740
+ if num_channels_unet == 9:
1741
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1742
+
1743
+ added_cond_kwargs = {
1744
+ "text_embeds": add_text_embeds,
1745
+ "time_ids": add_time_ids,
1746
+ "neg_time_ids": add_neg_time_ids,
1747
+ }
1748
+
1749
+ # controlnet(s) inference
1750
+ if guess_mode and self.do_classifier_free_guidance:
1751
+ # Infer ControlNet only for the conditional batch.
1752
+ control_model_input = latents
1753
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1754
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1755
+ controlnet_added_cond_kwargs = {
1756
+ "text_embeds": add_text_embeds.chunk(2)[1],
1757
+ "time_ids": add_time_ids.chunk(2)[1],
1758
+ "neg_time_ids": add_neg_time_ids.chunk(2)[1],
1759
+ }
1760
+ else:
1761
+ control_model_input = latent_model_input
1762
+ controlnet_prompt_embeds = prompt_embeds
1763
+ controlnet_added_cond_kwargs = added_cond_kwargs
1764
+
1765
+ if isinstance(controlnet_keep[i], list):
1766
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1767
+ else:
1768
+ controlnet_cond_scale = controlnet_conditioning_scale
1769
+ if isinstance(controlnet_cond_scale, list):
1770
+ controlnet_cond_scale = controlnet_cond_scale[0]
1771
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1772
+
1773
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1774
+ control_model_input,
1775
+ t,
1776
+ encoder_hidden_states=controlnet_prompt_embeds,
1777
+ controlnet_cond=control_image,
1778
+ conditioning_scale=cond_scale,
1779
+ guess_mode=guess_mode,
1780
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1781
+ return_dict=False,
1782
+ )
1783
+
1784
+ if guess_mode and self.do_classifier_free_guidance:
1785
+ # Infered ControlNet only for the conditional batch.
1786
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1787
+ # add 0 to the unconditional batch to keep it unchanged.
1788
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1789
+ mid_block_res_sample = torch.cat(
1790
+ [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
1791
+ )
1792
+
1793
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1794
+ added_cond_kwargs["image_embeds"] = image_embeds
1795
+
1796
+ # predict the noise residual
1797
+ noise_pred = self.unet(
1798
+ latent_model_input,
1799
+ t,
1800
+ encoder_hidden_states=prompt_embeds,
1801
+ cross_attention_kwargs=self.cross_attention_kwargs,
1802
+ down_block_additional_residuals=down_block_res_samples,
1803
+ mid_block_additional_residual=mid_block_res_sample,
1804
+ added_cond_kwargs=added_cond_kwargs,
1805
+ return_dict=False,
1806
+ )[0]
1807
+
1808
+ # perform guidance
1809
+ if self.do_classifier_free_guidance:
1810
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1811
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1812
+
1813
+ # compute the previous noisy sample x_t -> x_t-1
1814
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1815
+
1816
+ if callback_on_step_end is not None:
1817
+ callback_kwargs = {}
1818
+ for k in callback_on_step_end_tensor_inputs:
1819
+ callback_kwargs[k] = locals()[k]
1820
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1821
+
1822
+ latents = callback_outputs.pop("latents", latents)
1823
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1824
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1825
+ control_image = callback_outputs.pop("control_image", control_image)
1826
+
1827
+ # call the callback, if provided
1828
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1829
+ progress_bar.update()
1830
+ if callback is not None and i % callback_steps == 0:
1831
+ step_idx = i // getattr(self.scheduler, "order", 1)
1832
+ callback(step_idx, t, latents)
1833
+ finally:
1834
+ for cn_and_og in patched_cn_models:
1835
+ cn_and_og[0].forward = cn_and_og[1]
1836
+
1837
+ # If we do sequential model offloading, let's offload unet and controlnet
1838
+ # manually for max memory savings
1839
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1840
+ self.unet.to("cpu")
1841
+ self.controlnet.to("cpu")
1842
+ torch.cuda.empty_cache()
1843
+ torch.cuda.ipc_collect()
1844
+
1845
+ if not output_type == "latent":
1846
+ # make sure the VAE is in float32 mode, as it overflows in float16
1847
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1848
+
1849
+ if needs_upcasting:
1850
+ self.upcast_vae()
1851
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1852
+
1853
+ latents = latents / self.vae.config.scaling_factor
1854
+ image = self.vae.decode(latents, return_dict=False)[0]
1855
+
1856
+ # cast back to fp16 if needed
1857
+ if needs_upcasting:
1858
+ self.vae.to(dtype=torch.float16)
1859
+ else:
1860
+ image = latents
1861
+ return StableDiffusionXLPipelineOutput(images=image)
1862
+
1863
+ image = self.image_processor.postprocess(image, output_type=output_type)
1864
+
1865
+ # Offload all models
1866
+ self.maybe_free_model_hooks()
1867
+
1868
+ if not return_dict:
1869
+ return (image,)
1870
+
1871
+ return StableDiffusionXLPipelineOutput(images=image)
main/pipeline_kolors_inpainting.py ADDED
@@ -0,0 +1,1728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import PIL.Image
20
+ import torch
21
+ from transformers import (
22
+ CLIPImageProcessor,
23
+ CLIPVisionModelWithProjection,
24
+ )
25
+
26
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
27
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
28
+ from diffusers.loaders import (
29
+ FromSingleFileMixin,
30
+ IPAdapterMixin,
31
+ StableDiffusionXLLoraLoaderMixin,
32
+ TextualInversionLoaderMixin,
33
+ )
34
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
35
+ from diffusers.models.attention_processor import (
36
+ AttnProcessor2_0,
37
+ LoRAAttnProcessor2_0,
38
+ LoRAXFormersAttnProcessor,
39
+ XFormersAttnProcessor,
40
+ )
41
+ from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
42
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
43
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
44
+ from diffusers.schedulers import KarrasDiffusionSchedulers
45
+ from diffusers.utils import (
46
+ deprecate,
47
+ is_invisible_watermark_available,
48
+ is_torch_xla_available,
49
+ logging,
50
+ replace_example_docstring,
51
+ )
52
+ from diffusers.utils.torch_utils import randn_tensor
53
+
54
+
55
+ if is_invisible_watermark_available():
56
+ from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
57
+
58
+ if is_torch_xla_available():
59
+ import torch_xla.core.xla_model as xm
60
+
61
+ XLA_AVAILABLE = True
62
+ else:
63
+ XLA_AVAILABLE = False
64
+
65
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
66
+
67
+
68
+ EXAMPLE_DOC_STRING = """
69
+ Examples:
70
+ ```py
71
+ >>> import torch
72
+ >>> from diffusers import KolorsInpaintPipeline
73
+ >>> from diffusers.utils import load_image
74
+
75
+ >>> pipe = KolorsInpaintPipeline.from_pretrained(
76
+ ... "Kwai-Kolors/Kolors-diffusers",
77
+ ... torch_dtype=torch.float16,
78
+ ... variant="fp16"
79
+ ... use_safetensors=True
80
+ ... )
81
+ >>> pipe.enable_model_cpu_offload()
82
+
83
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
84
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
85
+
86
+ >>> init_image = load_image(img_url).convert("RGB")
87
+ >>> mask_image = load_image(mask_url).convert("RGB")
88
+
89
+ >>> prompt = "A majestic tiger sitting on a bench"
90
+ >>> image = pipe(
91
+ ... prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80
92
+ ... ).images[0]
93
+ ```
94
+ """
95
+
96
+
97
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
98
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
99
+ """
100
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
101
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
102
+ """
103
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
104
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
105
+ # rescale the results from guidance (fixes overexposure)
106
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
107
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
108
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
109
+ return noise_cfg
110
+
111
+
112
+ def mask_pil_to_torch(mask, height, width):
113
+ # preprocess mask
114
+ if isinstance(mask, (PIL.Image.Image, np.ndarray)):
115
+ mask = [mask]
116
+
117
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
118
+ mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
119
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
120
+ mask = mask.astype(np.float32) / 255.0
121
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
122
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
123
+
124
+ mask = torch.from_numpy(mask)
125
+ return mask
126
+
127
+
128
+ def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
129
+ """
130
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
131
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
132
+ ``image`` and ``1`` for the ``mask``.
133
+
134
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
135
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
136
+
137
+ Args:
138
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
139
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
140
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
141
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
142
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
143
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
144
+
145
+
146
+ Raises:
147
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
148
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
149
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
150
+ (ot the other way around).
151
+
152
+ Returns:
153
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
154
+ dimensions: ``batch x channels x height x width``.
155
+ """
156
+
157
+ # checkpoint. TOD(Yiyi) - need to clean this up later
158
+ deprecation_message = "The prepare_mask_and_masked_image method is deprecated and will be removed in a future version. Please use VaeImageProcessor.preprocess instead"
159
+ deprecate(
160
+ "prepare_mask_and_masked_image",
161
+ "0.30.0",
162
+ deprecation_message,
163
+ )
164
+ if image is None:
165
+ raise ValueError("`image` input cannot be undefined.")
166
+
167
+ if mask is None:
168
+ raise ValueError("`mask_image` input cannot be undefined.")
169
+
170
+ if isinstance(image, torch.Tensor):
171
+ if not isinstance(mask, torch.Tensor):
172
+ mask = mask_pil_to_torch(mask, height, width)
173
+
174
+ if image.ndim == 3:
175
+ image = image.unsqueeze(0)
176
+
177
+ # Batch and add channel dim for single mask
178
+ if mask.ndim == 2:
179
+ mask = mask.unsqueeze(0).unsqueeze(0)
180
+
181
+ # Batch single mask or add channel dim
182
+ if mask.ndim == 3:
183
+ # Single batched mask, no channel dim or single mask not batched but channel dim
184
+ if mask.shape[0] == 1:
185
+ mask = mask.unsqueeze(0)
186
+
187
+ # Batched masks no channel dim
188
+ else:
189
+ mask = mask.unsqueeze(1)
190
+
191
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
192
+ # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
193
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
194
+
195
+ # Check image is in [-1, 1]
196
+ # if image.min() < -1 or image.max() > 1:
197
+ # raise ValueError("Image should be in [-1, 1] range")
198
+
199
+ # Check mask is in [0, 1]
200
+ if mask.min() < 0 or mask.max() > 1:
201
+ raise ValueError("Mask should be in [0, 1] range")
202
+
203
+ # Binarize mask
204
+ mask[mask < 0.5] = 0
205
+ mask[mask >= 0.5] = 1
206
+
207
+ # Image as float32
208
+ image = image.to(dtype=torch.float32)
209
+ elif isinstance(mask, torch.Tensor):
210
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
211
+ else:
212
+ # preprocess image
213
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
214
+ image = [image]
215
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
216
+ # resize all images w.r.t passed height an width
217
+ image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
218
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
219
+ image = np.concatenate(image, axis=0)
220
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
221
+ image = np.concatenate([i[None, :] for i in image], axis=0)
222
+
223
+ image = image.transpose(0, 3, 1, 2)
224
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
225
+
226
+ mask = mask_pil_to_torch(mask, height, width)
227
+ mask[mask < 0.5] = 0
228
+ mask[mask >= 0.5] = 1
229
+
230
+ if image.shape[1] == 4:
231
+ # images are in latent space and thus can't
232
+ # be masked set masked_image to None
233
+ # we assume that the checkpoint is not an inpainting
234
+ # checkpoint. TOD(Yiyi) - need to clean this up later
235
+ masked_image = None
236
+ else:
237
+ masked_image = image * (mask < 0.5)
238
+
239
+ # n.b. ensure backwards compatibility as old function does not return image
240
+ if return_image:
241
+ return mask, masked_image, image
242
+
243
+ return mask, masked_image
244
+
245
+
246
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
247
+ def retrieve_latents(
248
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
249
+ ):
250
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
251
+ return encoder_output.latent_dist.sample(generator)
252
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
253
+ return encoder_output.latent_dist.mode()
254
+ elif hasattr(encoder_output, "latents"):
255
+ return encoder_output.latents
256
+ else:
257
+ raise AttributeError("Could not access latents of provided encoder_output")
258
+
259
+
260
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
261
+ def retrieve_timesteps(
262
+ scheduler,
263
+ num_inference_steps: Optional[int] = None,
264
+ device: Optional[Union[str, torch.device]] = None,
265
+ timesteps: Optional[List[int]] = None,
266
+ sigmas: Optional[List[float]] = None,
267
+ **kwargs,
268
+ ):
269
+ """
270
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
271
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
272
+
273
+ Args:
274
+ scheduler (`SchedulerMixin`):
275
+ The scheduler to get timesteps from.
276
+ num_inference_steps (`int`):
277
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
278
+ must be `None`.
279
+ device (`str` or `torch.device`, *optional*):
280
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
281
+ timesteps (`List[int]`, *optional*):
282
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
283
+ `num_inference_steps` and `sigmas` must be `None`.
284
+ sigmas (`List[float]`, *optional*):
285
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
286
+ `num_inference_steps` and `timesteps` must be `None`.
287
+
288
+ Returns:
289
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
290
+ second element is the number of inference steps.
291
+ """
292
+ if timesteps is not None and sigmas is not None:
293
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
294
+ if timesteps is not None:
295
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
296
+ if not accepts_timesteps:
297
+ raise ValueError(
298
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
299
+ f" timestep schedules. Please check whether you are using the correct scheduler."
300
+ )
301
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
302
+ timesteps = scheduler.timesteps
303
+ num_inference_steps = len(timesteps)
304
+ elif sigmas is not None:
305
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
306
+ if not accept_sigmas:
307
+ raise ValueError(
308
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
309
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
310
+ )
311
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
312
+ timesteps = scheduler.timesteps
313
+ num_inference_steps = len(timesteps)
314
+ else:
315
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
316
+ timesteps = scheduler.timesteps
317
+ return timesteps, num_inference_steps
318
+
319
+
320
+ class KolorsInpaintPipeline(
321
+ DiffusionPipeline,
322
+ StableDiffusionMixin,
323
+ StableDiffusionXLLoraLoaderMixin,
324
+ FromSingleFileMixin,
325
+ IPAdapterMixin,
326
+ ):
327
+ r"""
328
+ Pipeline for text-to-image generation using Kolors.
329
+
330
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
331
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
332
+
333
+ The pipeline also inherits the following loading methods:
334
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.safetensors` files
335
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
336
+ - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
337
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
338
+
339
+ Args:
340
+ vae ([`AutoencoderKL`]):
341
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
342
+ text_encoder ([`ChatGLMModel`]):
343
+ Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b).
344
+ tokenizer (`ChatGLMTokenizer`):
345
+ Tokenizer of class
346
+ [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py).
347
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
348
+ scheduler ([`SchedulerMixin`]):
349
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
350
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
351
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
352
+ Whether the `unet` requires a aesthetic_score condition to be passed during inference.
353
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
354
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
355
+ `Kwai-Kolors/Kolors-diffusers`.
356
+ add_watermarker (`bool`, *optional*):
357
+ Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
358
+ watermark output images. If not defined, it will default to True if the package is installed, otherwise no
359
+ watermarker will be used.
360
+ """
361
+
362
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
363
+
364
+ _optional_components = [
365
+ "tokenizer",
366
+ "text_encoder",
367
+ "image_encoder",
368
+ "feature_extractor",
369
+ ]
370
+ _callback_tensor_inputs = [
371
+ "latents",
372
+ "prompt_embeds",
373
+ "negative_prompt_embeds",
374
+ "add_text_embeds",
375
+ "add_time_ids",
376
+ "negative_pooled_prompt_embeds",
377
+ "add_neg_time_ids",
378
+ "mask",
379
+ "masked_image_latents",
380
+ ]
381
+
382
+ def __init__(
383
+ self,
384
+ vae: AutoencoderKL,
385
+ text_encoder: ChatGLMModel,
386
+ tokenizer: ChatGLMTokenizer,
387
+ unet: UNet2DConditionModel,
388
+ scheduler: KarrasDiffusionSchedulers,
389
+ image_encoder: CLIPVisionModelWithProjection = None,
390
+ feature_extractor: CLIPImageProcessor = None,
391
+ requires_aesthetics_score: bool = False,
392
+ force_zeros_for_empty_prompt: bool = True,
393
+ add_watermarker: Optional[bool] = None,
394
+ ):
395
+ super().__init__()
396
+
397
+ self.register_modules(
398
+ vae=vae,
399
+ text_encoder=text_encoder,
400
+ tokenizer=tokenizer,
401
+ unet=unet,
402
+ image_encoder=image_encoder,
403
+ feature_extractor=feature_extractor,
404
+ scheduler=scheduler,
405
+ )
406
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
407
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
408
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
409
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
410
+ self.mask_processor = VaeImageProcessor(
411
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
412
+ )
413
+
414
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
415
+
416
+ if add_watermarker:
417
+ self.watermark = StableDiffusionXLWatermarker()
418
+ else:
419
+ self.watermark = None
420
+
421
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
422
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
423
+ dtype = next(self.image_encoder.parameters()).dtype
424
+
425
+ if not isinstance(image, torch.Tensor):
426
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
427
+
428
+ image = image.to(device=device, dtype=dtype)
429
+ if output_hidden_states:
430
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
431
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
432
+ uncond_image_enc_hidden_states = self.image_encoder(
433
+ torch.zeros_like(image), output_hidden_states=True
434
+ ).hidden_states[-2]
435
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
436
+ num_images_per_prompt, dim=0
437
+ )
438
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
439
+ else:
440
+ image_embeds = self.image_encoder(image).image_embeds
441
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
442
+ uncond_image_embeds = torch.zeros_like(image_embeds)
443
+
444
+ return image_embeds, uncond_image_embeds
445
+
446
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
447
+ def prepare_ip_adapter_image_embeds(
448
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
449
+ ):
450
+ if ip_adapter_image_embeds is None:
451
+ if not isinstance(ip_adapter_image, list):
452
+ ip_adapter_image = [ip_adapter_image]
453
+
454
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
455
+ raise ValueError(
456
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
457
+ )
458
+
459
+ image_embeds = []
460
+ for single_ip_adapter_image, image_proj_layer in zip(
461
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
462
+ ):
463
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
464
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
465
+ single_ip_adapter_image, device, 1, output_hidden_state
466
+ )
467
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
468
+ single_negative_image_embeds = torch.stack(
469
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
470
+ )
471
+
472
+ if do_classifier_free_guidance:
473
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
474
+ single_image_embeds = single_image_embeds.to(device)
475
+
476
+ image_embeds.append(single_image_embeds)
477
+ else:
478
+ repeat_dims = [1]
479
+ image_embeds = []
480
+ for single_image_embeds in ip_adapter_image_embeds:
481
+ if do_classifier_free_guidance:
482
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
483
+ single_image_embeds = single_image_embeds.repeat(
484
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
485
+ )
486
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
487
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
488
+ )
489
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
490
+ else:
491
+ single_image_embeds = single_image_embeds.repeat(
492
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
493
+ )
494
+ image_embeds.append(single_image_embeds)
495
+
496
+ return image_embeds
497
+
498
+ def encode_prompt(
499
+ self,
500
+ prompt,
501
+ device: Optional[torch.device] = None,
502
+ num_images_per_prompt: int = 1,
503
+ do_classifier_free_guidance: bool = True,
504
+ negative_prompt=None,
505
+ prompt_embeds: Optional[torch.FloatTensor] = None,
506
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
507
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
508
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
509
+ lora_scale: Optional[float] = None,
510
+ ):
511
+ r"""
512
+ Encodes the prompt into text encoder hidden states.
513
+
514
+ Args:
515
+ prompt (`str` or `List[str]`, *optional*):
516
+ prompt to be encoded
517
+ device: (`torch.device`):
518
+ torch device
519
+ num_images_per_prompt (`int`):
520
+ number of images that should be generated per prompt
521
+ do_classifier_free_guidance (`bool`):
522
+ whether to use classifier free guidance or not
523
+ negative_prompt (`str` or `List[str]`, *optional*):
524
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
525
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
526
+ less than `1`).
527
+ prompt_embeds (`torch.FloatTensor`, *optional*):
528
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
529
+ provided, text embeddings will be generated from `prompt` input argument.
530
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
531
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
532
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
533
+ argument.
534
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
535
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
536
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
537
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
538
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
539
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
540
+ input argument.
541
+ lora_scale (`float`, *optional*):
542
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
543
+ """
544
+ device = device or self._execution_device
545
+
546
+ # set lora scale so that monkey patched LoRA
547
+ # function of text encoder can correctly access it
548
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
549
+ self._lora_scale = lora_scale
550
+
551
+ if prompt is not None and isinstance(prompt, str):
552
+ batch_size = 1
553
+ elif prompt is not None and isinstance(prompt, list):
554
+ batch_size = len(prompt)
555
+ else:
556
+ batch_size = prompt_embeds.shape[0]
557
+
558
+ # Define tokenizers and text encoders
559
+ tokenizers = [self.tokenizer]
560
+ text_encoders = [self.text_encoder]
561
+
562
+ if prompt_embeds is None:
563
+ # textual inversion: procecss multi-vector tokens if necessary
564
+ prompt_embeds_list = []
565
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
566
+ if isinstance(self, TextualInversionLoaderMixin):
567
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
568
+
569
+ text_inputs = tokenizer(
570
+ prompt,
571
+ padding="max_length",
572
+ max_length=256,
573
+ truncation=True,
574
+ return_tensors="pt",
575
+ ).to(self._execution_device)
576
+ output = text_encoder(
577
+ input_ids=text_inputs["input_ids"],
578
+ attention_mask=text_inputs["attention_mask"],
579
+ position_ids=text_inputs["position_ids"],
580
+ output_hidden_states=True,
581
+ )
582
+ prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
583
+ pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
584
+ bs_embed, seq_len, _ = prompt_embeds.shape
585
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
586
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
587
+ prompt_embeds_list.append(prompt_embeds)
588
+
589
+ # prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
590
+ prompt_embeds = prompt_embeds_list[0]
591
+
592
+ # get unconditional embeddings for classifier free guidance
593
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
594
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
595
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
596
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
597
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
598
+ # negative_prompt = negative_prompt or ""
599
+ uncond_tokens: List[str]
600
+ if negative_prompt is None:
601
+ uncond_tokens = [""] * batch_size
602
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
603
+ raise TypeError(
604
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
605
+ f" {type(prompt)}."
606
+ )
607
+ elif isinstance(negative_prompt, str):
608
+ uncond_tokens = [negative_prompt]
609
+ elif batch_size != len(negative_prompt):
610
+ raise ValueError(
611
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
612
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
613
+ " the batch size of `prompt`."
614
+ )
615
+ else:
616
+ uncond_tokens = negative_prompt
617
+
618
+ negative_prompt_embeds_list = []
619
+ for tokenizer, text_encoder in zip(tokenizers, text_encoders):
620
+ # textual inversion: procecss multi-vector tokens if necessary
621
+ if isinstance(self, TextualInversionLoaderMixin):
622
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, tokenizer)
623
+
624
+ max_length = prompt_embeds.shape[1]
625
+ uncond_input = tokenizer(
626
+ uncond_tokens,
627
+ padding="max_length",
628
+ max_length=max_length,
629
+ truncation=True,
630
+ return_tensors="pt",
631
+ ).to(self._execution_device)
632
+ output = text_encoder(
633
+ input_ids=uncond_input["input_ids"],
634
+ attention_mask=uncond_input["attention_mask"],
635
+ position_ids=uncond_input["position_ids"],
636
+ output_hidden_states=True,
637
+ )
638
+ negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone()
639
+ negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096]
640
+
641
+ if do_classifier_free_guidance:
642
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
643
+ seq_len = negative_prompt_embeds.shape[1]
644
+
645
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device)
646
+
647
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
648
+ negative_prompt_embeds = negative_prompt_embeds.view(
649
+ batch_size * num_images_per_prompt, seq_len, -1
650
+ )
651
+
652
+ # For classifier free guidance, we need to do two forward passes.
653
+ # Here we concatenate the unconditional and text embeddings into a single batch
654
+ # to avoid doing two forward passes
655
+
656
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
657
+
658
+ # negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
659
+ negative_prompt_embeds = negative_prompt_embeds_list[0]
660
+
661
+ bs_embed = pooled_prompt_embeds.shape[0]
662
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
663
+ bs_embed * num_images_per_prompt, -1
664
+ )
665
+ if do_classifier_free_guidance:
666
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
667
+ bs_embed * num_images_per_prompt, -1
668
+ )
669
+
670
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
671
+
672
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
673
+ def prepare_extra_step_kwargs(self, generator, eta):
674
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
675
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
676
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
677
+ # and should be between [0, 1]
678
+
679
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
680
+ extra_step_kwargs = {}
681
+ if accepts_eta:
682
+ extra_step_kwargs["eta"] = eta
683
+
684
+ # check if the scheduler accepts generator
685
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
686
+ if accepts_generator:
687
+ extra_step_kwargs["generator"] = generator
688
+ return extra_step_kwargs
689
+
690
+ def check_inputs(
691
+ self,
692
+ prompt,
693
+ image,
694
+ mask_image,
695
+ height,
696
+ width,
697
+ strength,
698
+ callback_steps,
699
+ output_type,
700
+ negative_prompt=None,
701
+ prompt_embeds=None,
702
+ negative_prompt_embeds=None,
703
+ ip_adapter_image=None,
704
+ ip_adapter_image_embeds=None,
705
+ callback_on_step_end_tensor_inputs=None,
706
+ padding_mask_crop=None,
707
+ ):
708
+ if strength < 0 or strength > 1:
709
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
710
+
711
+ if height % 8 != 0 or width % 8 != 0:
712
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
713
+
714
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
715
+ raise ValueError(
716
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
717
+ f" {type(callback_steps)}."
718
+ )
719
+
720
+ if callback_on_step_end_tensor_inputs is not None and not all(
721
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
722
+ ):
723
+ raise ValueError(
724
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
725
+ )
726
+
727
+ if prompt is not None and prompt_embeds is not None:
728
+ raise ValueError(
729
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
730
+ " only forward one of the two."
731
+ )
732
+ elif prompt is None and prompt_embeds is None:
733
+ raise ValueError(
734
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
735
+ )
736
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
737
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
738
+
739
+ if negative_prompt is not None and negative_prompt_embeds is not None:
740
+ raise ValueError(
741
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
742
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
743
+ )
744
+
745
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
746
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
747
+ raise ValueError(
748
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
749
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
750
+ f" {negative_prompt_embeds.shape}."
751
+ )
752
+ if padding_mask_crop is not None:
753
+ if not isinstance(image, PIL.Image.Image):
754
+ raise ValueError(
755
+ f"The image should be a PIL image when inpainting mask crop, but is of type {type(image)}."
756
+ )
757
+ if not isinstance(mask_image, PIL.Image.Image):
758
+ raise ValueError(
759
+ f"The mask image should be a PIL image when inpainting mask crop, but is of type"
760
+ f" {type(mask_image)}."
761
+ )
762
+ if output_type != "pil":
763
+ raise ValueError(f"The output type should be PIL when inpainting mask crop, but is {output_type}.")
764
+
765
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
766
+ raise ValueError(
767
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
768
+ )
769
+
770
+ if ip_adapter_image_embeds is not None:
771
+ if not isinstance(ip_adapter_image_embeds, list):
772
+ raise ValueError(
773
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
774
+ )
775
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
776
+ raise ValueError(
777
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
778
+ )
779
+
780
+ def prepare_latents(
781
+ self,
782
+ batch_size,
783
+ num_channels_latents,
784
+ height,
785
+ width,
786
+ dtype,
787
+ device,
788
+ generator,
789
+ latents=None,
790
+ image=None,
791
+ timestep=None,
792
+ is_strength_max=True,
793
+ add_noise=True,
794
+ return_noise=False,
795
+ return_image_latents=False,
796
+ ):
797
+ shape = (
798
+ batch_size,
799
+ num_channels_latents,
800
+ int(height) // self.vae_scale_factor,
801
+ int(width) // self.vae_scale_factor,
802
+ )
803
+ if isinstance(generator, list) and len(generator) != batch_size:
804
+ raise ValueError(
805
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
806
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
807
+ )
808
+
809
+ if (image is None or timestep is None) and not is_strength_max:
810
+ raise ValueError(
811
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
812
+ "However, either the image or the noise timestep has not been provided."
813
+ )
814
+
815
+ if image.shape[1] == 4:
816
+ image_latents = image.to(device=device, dtype=dtype)
817
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
818
+ elif return_image_latents or (latents is None and not is_strength_max):
819
+ image = image.to(device=device, dtype=dtype)
820
+ image_latents = self._encode_vae_image(image=image, generator=generator)
821
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
822
+
823
+ if latents is None and add_noise:
824
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
825
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
826
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
827
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
828
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
829
+ elif add_noise:
830
+ noise = latents.to(device)
831
+ latents = noise * self.scheduler.init_noise_sigma
832
+ else:
833
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
834
+ latents = image_latents.to(device)
835
+
836
+ outputs = (latents,)
837
+
838
+ if return_noise:
839
+ outputs += (noise,)
840
+
841
+ if return_image_latents:
842
+ outputs += (image_latents,)
843
+
844
+ return outputs
845
+
846
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
847
+ dtype = image.dtype
848
+ if self.vae.config.force_upcast:
849
+ image = image.float()
850
+ self.vae.to(dtype=torch.float32)
851
+
852
+ if isinstance(generator, list):
853
+ image_latents = [
854
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
855
+ for i in range(image.shape[0])
856
+ ]
857
+ image_latents = torch.cat(image_latents, dim=0)
858
+ else:
859
+ image_latents = retrieve_latents(self.vae.encode(image), generator=generator)
860
+
861
+ if self.vae.config.force_upcast:
862
+ self.vae.to(dtype)
863
+
864
+ image_latents = image_latents.to(dtype)
865
+ image_latents = self.vae.config.scaling_factor * image_latents
866
+
867
+ return image_latents
868
+
869
+ def prepare_mask_latents(
870
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
871
+ ):
872
+ # resize the mask to latents shape as we concatenate the mask to the latents
873
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
874
+ # and half precision
875
+ mask = torch.nn.functional.interpolate(
876
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
877
+ )
878
+ mask = mask.to(device=device, dtype=dtype)
879
+
880
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
881
+ if mask.shape[0] < batch_size:
882
+ if not batch_size % mask.shape[0] == 0:
883
+ raise ValueError(
884
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
885
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
886
+ " of masks that you pass is divisible by the total requested batch size."
887
+ )
888
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
889
+
890
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
891
+
892
+ if masked_image is not None and masked_image.shape[1] == 4:
893
+ masked_image_latents = masked_image
894
+ else:
895
+ masked_image_latents = None
896
+
897
+ if masked_image is not None:
898
+ if masked_image_latents is None:
899
+ masked_image = masked_image.to(device=device, dtype=dtype)
900
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
901
+
902
+ if masked_image_latents.shape[0] < batch_size:
903
+ if not batch_size % masked_image_latents.shape[0] == 0:
904
+ raise ValueError(
905
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
906
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
907
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
908
+ )
909
+ masked_image_latents = masked_image_latents.repeat(
910
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
911
+ )
912
+
913
+ masked_image_latents = (
914
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
915
+ )
916
+
917
+ # aligning device to prevent device errors when concating it with the latent model input
918
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
919
+
920
+ return mask, masked_image_latents
921
+
922
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
923
+ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
924
+ # get the original timestep using init_timestep
925
+ if denoising_start is None:
926
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
927
+ t_start = max(num_inference_steps - init_timestep, 0)
928
+ else:
929
+ t_start = 0
930
+
931
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
932
+
933
+ # Strength is irrelevant if we directly request a timestep to start at;
934
+ # that is, strength is determined by the denoising_start instead.
935
+ if denoising_start is not None:
936
+ discrete_timestep_cutoff = int(
937
+ round(
938
+ self.scheduler.config.num_train_timesteps
939
+ - (denoising_start * self.scheduler.config.num_train_timesteps)
940
+ )
941
+ )
942
+
943
+ num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
944
+ if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
945
+ # if the scheduler is a 2nd order scheduler we might have to do +1
946
+ # because `num_inference_steps` might be even given that every timestep
947
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
948
+ # mean that we cut the timesteps in the middle of the denoising step
949
+ # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1
950
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
951
+ num_inference_steps = num_inference_steps + 1
952
+
953
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
954
+ timesteps = timesteps[-num_inference_steps:]
955
+ return timesteps, num_inference_steps
956
+
957
+ return timesteps, num_inference_steps - t_start
958
+
959
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline._get_add_time_ids
960
+ def _get_add_time_ids(
961
+ self,
962
+ original_size,
963
+ crops_coords_top_left,
964
+ target_size,
965
+ aesthetic_score,
966
+ negative_aesthetic_score,
967
+ negative_original_size,
968
+ negative_crops_coords_top_left,
969
+ negative_target_size,
970
+ dtype,
971
+ text_encoder_projection_dim=None,
972
+ ):
973
+ if self.config.requires_aesthetics_score:
974
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
975
+ add_neg_time_ids = list(
976
+ negative_original_size + negative_crops_coords_top_left + (negative_aesthetic_score,)
977
+ )
978
+ else:
979
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
980
+ add_neg_time_ids = list(negative_original_size + crops_coords_top_left + negative_target_size)
981
+
982
+ passed_add_embed_dim = self.unet.config.addition_time_embed_dim * len(add_time_ids) + 4096
983
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
984
+
985
+ if (
986
+ expected_add_embed_dim > passed_add_embed_dim
987
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
988
+ ):
989
+ raise ValueError(
990
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
991
+ )
992
+ elif (
993
+ expected_add_embed_dim < passed_add_embed_dim
994
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
995
+ ):
996
+ raise ValueError(
997
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
998
+ )
999
+ elif expected_add_embed_dim != passed_add_embed_dim:
1000
+ raise ValueError(
1001
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder.config.projection_dim`."
1002
+ )
1003
+
1004
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1005
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1006
+
1007
+ return add_time_ids, add_neg_time_ids
1008
+
1009
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1010
+ def upcast_vae(self):
1011
+ dtype = self.vae.dtype
1012
+ self.vae.to(dtype=torch.float32)
1013
+ use_torch_2_0_or_xformers = isinstance(
1014
+ self.vae.decoder.mid_block.attentions[0].processor,
1015
+ (
1016
+ AttnProcessor2_0,
1017
+ XFormersAttnProcessor,
1018
+ LoRAXFormersAttnProcessor,
1019
+ LoRAAttnProcessor2_0,
1020
+ ),
1021
+ )
1022
+ # if xformers or torch_2_0 is used attention block does not need
1023
+ # to be in float32 which can save lots of memory
1024
+ if use_torch_2_0_or_xformers:
1025
+ self.vae.post_quant_conv.to(dtype)
1026
+ self.vae.decoder.conv_in.to(dtype)
1027
+ self.vae.decoder.mid_block.to(dtype)
1028
+
1029
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1030
+ def get_guidance_scale_embedding(
1031
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
1032
+ ) -> torch.Tensor:
1033
+ """
1034
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1035
+
1036
+ Args:
1037
+ w (`torch.Tensor`):
1038
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
1039
+ embedding_dim (`int`, *optional*, defaults to 512):
1040
+ Dimension of the embeddings to generate.
1041
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
1042
+ Data type of the generated embeddings.
1043
+
1044
+ Returns:
1045
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
1046
+ """
1047
+ assert len(w.shape) == 1
1048
+ w = w * 1000.0
1049
+
1050
+ half_dim = embedding_dim // 2
1051
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1052
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1053
+ emb = w.to(dtype)[:, None] * emb[None, :]
1054
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1055
+ if embedding_dim % 2 == 1: # zero pad
1056
+ emb = torch.nn.functional.pad(emb, (0, 1))
1057
+ assert emb.shape == (w.shape[0], embedding_dim)
1058
+ return emb
1059
+
1060
+ @property
1061
+ def guidance_scale(self):
1062
+ return self._guidance_scale
1063
+
1064
+ @property
1065
+ def guidance_rescale(self):
1066
+ return self._guidance_rescale
1067
+
1068
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1069
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1070
+ # corresponds to doing no classifier free guidance.
1071
+ @property
1072
+ def do_classifier_free_guidance(self):
1073
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1074
+
1075
+ @property
1076
+ def cross_attention_kwargs(self):
1077
+ return self._cross_attention_kwargs
1078
+
1079
+ @property
1080
+ def denoising_end(self):
1081
+ return self._denoising_end
1082
+
1083
+ @property
1084
+ def denoising_start(self):
1085
+ return self._denoising_start
1086
+
1087
+ @property
1088
+ def num_timesteps(self):
1089
+ return self._num_timesteps
1090
+
1091
+ @property
1092
+ def interrupt(self):
1093
+ return self._interrupt
1094
+
1095
+ @torch.no_grad()
1096
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1097
+ def __call__(
1098
+ self,
1099
+ prompt: Union[str, List[str]] = None,
1100
+ image: PipelineImageInput = None,
1101
+ mask_image: PipelineImageInput = None,
1102
+ masked_image_latents: torch.Tensor = None,
1103
+ height: Optional[int] = None,
1104
+ width: Optional[int] = None,
1105
+ padding_mask_crop: Optional[int] = None,
1106
+ strength: float = 0.9999,
1107
+ num_inference_steps: int = 50,
1108
+ timesteps: List[int] = None,
1109
+ sigmas: List[float] = None,
1110
+ denoising_start: Optional[float] = None,
1111
+ denoising_end: Optional[float] = None,
1112
+ guidance_scale: float = 7.5,
1113
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1114
+ num_images_per_prompt: Optional[int] = 1,
1115
+ eta: float = 0.0,
1116
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1117
+ latents: Optional[torch.Tensor] = None,
1118
+ prompt_embeds: Optional[torch.Tensor] = None,
1119
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
1120
+ pooled_prompt_embeds: Optional[torch.Tensor] = None,
1121
+ negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1122
+ ip_adapter_image: Optional[PipelineImageInput] = None,
1123
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1124
+ output_type: Optional[str] = "pil",
1125
+ return_dict: bool = True,
1126
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1127
+ guidance_rescale: float = 0.0,
1128
+ original_size: Tuple[int, int] = None,
1129
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
1130
+ target_size: Tuple[int, int] = None,
1131
+ negative_original_size: Optional[Tuple[int, int]] = None,
1132
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
1133
+ negative_target_size: Optional[Tuple[int, int]] = None,
1134
+ aesthetic_score: float = 6.0,
1135
+ negative_aesthetic_score: float = 2.5,
1136
+ callback_on_step_end: Optional[
1137
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
1138
+ ] = None,
1139
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1140
+ **kwargs,
1141
+ ):
1142
+ r"""
1143
+ Function invoked when calling the pipeline for generation.
1144
+
1145
+ Args:
1146
+ prompt (`str` or `List[str]`, *optional*):
1147
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1148
+ instead.
1149
+ image (`PIL.Image.Image`):
1150
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1151
+ be masked out with `mask_image` and repainted according to `prompt`.
1152
+ mask_image (`PIL.Image.Image`):
1153
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1154
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1155
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1156
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
1157
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1158
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
1159
+ Anything below 512 pixels won't work well for
1160
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1161
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1162
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1163
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
1164
+ Anything below 512 pixels won't work well for
1165
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
1166
+ and checkpoints that are not specifically fine-tuned on low resolutions.
1167
+ padding_mask_crop (`int`, *optional*, defaults to `None`):
1168
+ The size of margin in the crop to be applied to the image and masking. If `None`, no crop is applied to
1169
+ image and mask_image. If `padding_mask_crop` is not `None`, it will first find a rectangular region
1170
+ with the same aspect ration of the image and contains all masked area, and then expand that area based
1171
+ on `padding_mask_crop`. The image and mask_image will then be cropped based on the expanded area before
1172
+ resizing to the original image size for inpainting. This is useful when the masked area is small while
1173
+ the image is large and contain information irrelevant for inpainting, such as background.
1174
+ strength (`float`, *optional*, defaults to 0.9999):
1175
+ Conceptually, indicates how much to transform the masked portion of the reference `image`. Must be
1176
+ between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the
1177
+ `strength`. The number of denoising steps depends on the amount of noise initially added. When
1178
+ `strength` is 1, added noise will be maximum and the denoising process will run for the full number of
1179
+ iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores the masked
1180
+ portion of the reference `image`. Note that in the case of `denoising_start` being declared as an
1181
+ integer, the value of `strength` will be ignored.
1182
+ num_inference_steps (`int`, *optional*, defaults to 50):
1183
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1184
+ expense of slower inference.
1185
+ timesteps (`List[int]`, *optional*):
1186
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1187
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1188
+ passed will be used. Must be in descending order.
1189
+ sigmas (`List[float]`, *optional*):
1190
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
1191
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
1192
+ will be used.
1193
+ denoising_start (`float`, *optional*):
1194
+ When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1195
+ bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1196
+ it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1197
+ strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1198
+ is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1199
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1200
+ denoising_end (`float`, *optional*):
1201
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1202
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1203
+ still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be
1204
+ denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the
1205
+ final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline
1206
+ forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1207
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1208
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1209
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1210
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1211
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1212
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1213
+ usually at the expense of lower image quality.
1214
+ negative_prompt (`str` or `List[str]`, *optional*):
1215
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1216
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1217
+ less than `1`).
1218
+ prompt_embeds (`torch.Tensor`, *optional*):
1219
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1220
+ provided, text embeddings will be generated from `prompt` input argument.
1221
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
1222
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1223
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1224
+ argument.
1225
+ pooled_prompt_embeds (`torch.Tensor`, *optional*):
1226
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1227
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1228
+ negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
1229
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1230
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1231
+ input argument.
1232
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1233
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
1234
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
1235
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
1236
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
1237
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
1238
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1239
+ The number of images to generate per prompt.
1240
+ eta (`float`, *optional*, defaults to 0.0):
1241
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1242
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1243
+ generator (`torch.Generator`, *optional*):
1244
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1245
+ to make generation deterministic.
1246
+ latents (`torch.Tensor`, *optional*):
1247
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1248
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1249
+ tensor will ge generated by sampling using the supplied random `generator`.
1250
+ output_type (`str`, *optional*, defaults to `"pil"`):
1251
+ The output format of the generate image. Choose between
1252
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1253
+ return_dict (`bool`, *optional*, defaults to `True`):
1254
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1255
+ plain tuple.
1256
+ cross_attention_kwargs (`dict`, *optional*):
1257
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1258
+ `self.processor` in
1259
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1260
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1261
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1262
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1263
+ explained in section 2.2 of
1264
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1265
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1266
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1267
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1268
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1269
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1270
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1271
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1272
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1273
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1274
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1275
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1276
+ micro-conditioning as explained in section 2.2 of
1277
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1278
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1279
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1280
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1281
+ micro-conditioning as explained in section 2.2 of
1282
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1283
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1284
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1285
+ To negatively condition the generation process based on a target image resolution. It should be as same
1286
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1287
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1288
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1289
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1290
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1291
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1292
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1293
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1294
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1295
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1296
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1297
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
1298
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
1299
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
1300
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
1301
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
1302
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1303
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1304
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1305
+ `._callback_tensor_inputs` attribute of your pipeline class.
1306
+
1307
+ Examples:
1308
+
1309
+ Returns:
1310
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] or `tuple`:
1311
+ [`~pipelines.stable_diffusion.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a
1312
+ `tuple. `tuple. When returning a tuple, the first element is a list with the generated images.
1313
+ """
1314
+
1315
+ callback = kwargs.pop("callback", None)
1316
+ callback_steps = kwargs.pop("callback_steps", None)
1317
+
1318
+ if callback is not None:
1319
+ deprecate(
1320
+ "callback",
1321
+ "1.0.0",
1322
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1323
+ )
1324
+ if callback_steps is not None:
1325
+ deprecate(
1326
+ "callback_steps",
1327
+ "1.0.0",
1328
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
1329
+ )
1330
+
1331
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1332
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1333
+
1334
+ # 0. Default height and width to unet
1335
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
1336
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
1337
+
1338
+ # 1. Check inputs
1339
+ self.check_inputs(
1340
+ prompt,
1341
+ image,
1342
+ mask_image,
1343
+ height,
1344
+ width,
1345
+ strength,
1346
+ callback_steps,
1347
+ output_type,
1348
+ negative_prompt,
1349
+ prompt_embeds,
1350
+ negative_prompt_embeds,
1351
+ ip_adapter_image,
1352
+ ip_adapter_image_embeds,
1353
+ callback_on_step_end_tensor_inputs,
1354
+ padding_mask_crop,
1355
+ )
1356
+
1357
+ self._guidance_scale = guidance_scale
1358
+ self._guidance_rescale = guidance_rescale
1359
+ self._cross_attention_kwargs = cross_attention_kwargs
1360
+ self._denoising_end = denoising_end
1361
+ self._denoising_start = denoising_start
1362
+ self._interrupt = False
1363
+
1364
+ # 2. Define call parameters
1365
+ if prompt is not None and isinstance(prompt, str):
1366
+ batch_size = 1
1367
+ elif prompt is not None and isinstance(prompt, list):
1368
+ batch_size = len(prompt)
1369
+ else:
1370
+ batch_size = prompt_embeds.shape[0]
1371
+
1372
+ device = self._execution_device
1373
+
1374
+ # 3. Encode input prompt
1375
+ text_encoder_lora_scale = (
1376
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1377
+ )
1378
+
1379
+ (
1380
+ prompt_embeds,
1381
+ negative_prompt_embeds,
1382
+ pooled_prompt_embeds,
1383
+ negative_pooled_prompt_embeds,
1384
+ ) = self.encode_prompt(
1385
+ prompt=prompt,
1386
+ device=device,
1387
+ num_images_per_prompt=num_images_per_prompt,
1388
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1389
+ negative_prompt=negative_prompt,
1390
+ prompt_embeds=prompt_embeds,
1391
+ negative_prompt_embeds=negative_prompt_embeds,
1392
+ pooled_prompt_embeds=pooled_prompt_embeds,
1393
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1394
+ lora_scale=text_encoder_lora_scale,
1395
+ )
1396
+
1397
+ # 4. set timesteps
1398
+ def denoising_value_valid(dnv):
1399
+ return isinstance(dnv, float) and 0 < dnv < 1
1400
+
1401
+ timesteps, num_inference_steps = retrieve_timesteps(
1402
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
1403
+ )
1404
+ timesteps, num_inference_steps = self.get_timesteps(
1405
+ num_inference_steps,
1406
+ strength,
1407
+ device,
1408
+ denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None,
1409
+ )
1410
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1411
+ if num_inference_steps < 1:
1412
+ raise ValueError(
1413
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1414
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1415
+ )
1416
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1417
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1418
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1419
+ is_strength_max = strength == 1.0
1420
+
1421
+ # 5. Preprocess mask and image
1422
+ if padding_mask_crop is not None:
1423
+ crops_coords = self.mask_processor.get_crop_region(mask_image, width, height, pad=padding_mask_crop)
1424
+ resize_mode = "fill"
1425
+ else:
1426
+ crops_coords = None
1427
+ resize_mode = "default"
1428
+
1429
+ original_image = image
1430
+ init_image = self.image_processor.preprocess(
1431
+ image, height=height, width=width, crops_coords=crops_coords, resize_mode=resize_mode
1432
+ )
1433
+ init_image = init_image.to(dtype=torch.float32)
1434
+
1435
+ mask = self.mask_processor.preprocess(
1436
+ mask_image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
1437
+ )
1438
+
1439
+ if masked_image_latents is not None:
1440
+ masked_image = masked_image_latents
1441
+ elif init_image.shape[1] == 4:
1442
+ # if images are in latent space, we can't mask it
1443
+ masked_image = None
1444
+ else:
1445
+ masked_image = init_image * (mask < 0.5)
1446
+
1447
+ # 6. Prepare latent variables
1448
+ num_channels_latents = self.vae.config.latent_channels
1449
+ num_channels_unet = self.unet.config.in_channels
1450
+ return_image_latents = num_channels_unet == 4
1451
+
1452
+ add_noise = True if self.denoising_start is None else False
1453
+ latents_outputs = self.prepare_latents(
1454
+ batch_size * num_images_per_prompt,
1455
+ num_channels_latents,
1456
+ height,
1457
+ width,
1458
+ prompt_embeds.dtype,
1459
+ device,
1460
+ generator,
1461
+ latents,
1462
+ image=init_image,
1463
+ timestep=latent_timestep,
1464
+ is_strength_max=is_strength_max,
1465
+ add_noise=add_noise,
1466
+ return_noise=True,
1467
+ return_image_latents=return_image_latents,
1468
+ )
1469
+
1470
+ if return_image_latents:
1471
+ latents, noise, image_latents = latents_outputs
1472
+ else:
1473
+ latents, noise = latents_outputs
1474
+
1475
+ # 7. Prepare mask latent variables
1476
+ mask, masked_image_latents = self.prepare_mask_latents(
1477
+ mask,
1478
+ masked_image,
1479
+ batch_size * num_images_per_prompt,
1480
+ height,
1481
+ width,
1482
+ prompt_embeds.dtype,
1483
+ device,
1484
+ generator,
1485
+ self.do_classifier_free_guidance,
1486
+ )
1487
+
1488
+ # 8. Check that sizes of mask, masked image and latents match
1489
+ if num_channels_unet == 9:
1490
+ # default case for runwayml/stable-diffusion-inpainting
1491
+ num_channels_mask = mask.shape[1]
1492
+ num_channels_masked_image = masked_image_latents.shape[1]
1493
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1494
+ raise ValueError(
1495
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1496
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1497
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1498
+ f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
1499
+ " `pipeline.unet` or your `mask_image` or `image` input."
1500
+ )
1501
+ elif num_channels_unet != 4:
1502
+ raise ValueError(
1503
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1504
+ )
1505
+ # 8.1 Prepare extra step kwargs.
1506
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1507
+
1508
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1509
+ height, width = latents.shape[-2:]
1510
+ height = height * self.vae_scale_factor
1511
+ width = width * self.vae_scale_factor
1512
+
1513
+ original_size = original_size or (height, width)
1514
+ target_size = target_size or (height, width)
1515
+
1516
+ # 10. Prepare added time ids & embeddings
1517
+ if negative_original_size is None:
1518
+ negative_original_size = original_size
1519
+ if negative_target_size is None:
1520
+ negative_target_size = target_size
1521
+
1522
+ add_text_embeds = pooled_prompt_embeds
1523
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1524
+
1525
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1526
+ original_size,
1527
+ crops_coords_top_left,
1528
+ target_size,
1529
+ aesthetic_score,
1530
+ negative_aesthetic_score,
1531
+ negative_original_size,
1532
+ negative_crops_coords_top_left,
1533
+ negative_target_size,
1534
+ dtype=prompt_embeds.dtype,
1535
+ text_encoder_projection_dim=text_encoder_projection_dim,
1536
+ )
1537
+ add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1538
+
1539
+ if self.do_classifier_free_guidance:
1540
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1541
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1542
+ add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1543
+ add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1544
+
1545
+ prompt_embeds = prompt_embeds.to(device)
1546
+ add_text_embeds = add_text_embeds.to(device)
1547
+ add_time_ids = add_time_ids.to(device)
1548
+
1549
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1550
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1551
+ ip_adapter_image,
1552
+ ip_adapter_image_embeds,
1553
+ device,
1554
+ batch_size * num_images_per_prompt,
1555
+ self.do_classifier_free_guidance,
1556
+ )
1557
+
1558
+ # 11. Denoising loop
1559
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1560
+
1561
+ if (
1562
+ self.denoising_end is not None
1563
+ and self.denoising_start is not None
1564
+ and denoising_value_valid(self.denoising_end)
1565
+ and denoising_value_valid(self.denoising_start)
1566
+ and self.denoising_start >= self.denoising_end
1567
+ ):
1568
+ raise ValueError(
1569
+ f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: "
1570
+ + f" {self.denoising_end} when using type float."
1571
+ )
1572
+ elif self.denoising_end is not None and denoising_value_valid(self.denoising_end):
1573
+ discrete_timestep_cutoff = int(
1574
+ round(
1575
+ self.scheduler.config.num_train_timesteps
1576
+ - (self.denoising_end * self.scheduler.config.num_train_timesteps)
1577
+ )
1578
+ )
1579
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1580
+ timesteps = timesteps[:num_inference_steps]
1581
+
1582
+ # 11.1 Optionally get Guidance Scale Embedding
1583
+ timestep_cond = None
1584
+ if self.unet.config.time_cond_proj_dim is not None:
1585
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1586
+ timestep_cond = self.get_guidance_scale_embedding(
1587
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1588
+ ).to(device=device, dtype=latents.dtype)
1589
+
1590
+ self._num_timesteps = len(timesteps)
1591
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1592
+ for i, t in enumerate(timesteps):
1593
+ if self.interrupt:
1594
+ continue
1595
+ # expand the latents if we are doing classifier free guidance
1596
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1597
+
1598
+ # concat latents, mask, masked_image_latents in the channel dimension
1599
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1600
+
1601
+ if num_channels_unet == 9:
1602
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1603
+
1604
+ # predict the noise residual
1605
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1606
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1607
+ added_cond_kwargs["image_embeds"] = image_embeds
1608
+ noise_pred = self.unet(
1609
+ latent_model_input,
1610
+ t,
1611
+ encoder_hidden_states=prompt_embeds,
1612
+ timestep_cond=timestep_cond,
1613
+ cross_attention_kwargs=self.cross_attention_kwargs,
1614
+ added_cond_kwargs=added_cond_kwargs,
1615
+ return_dict=False,
1616
+ )[0]
1617
+
1618
+ # perform guidance
1619
+ if self.do_classifier_free_guidance:
1620
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1621
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1622
+
1623
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1624
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1625
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1626
+
1627
+ # compute the previous noisy sample x_t -> x_t-1
1628
+ latents_dtype = latents.dtype
1629
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1630
+ if latents.dtype != latents_dtype:
1631
+ if torch.backends.mps.is_available():
1632
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1633
+ latents = latents.to(latents_dtype)
1634
+
1635
+ if num_channels_unet == 4:
1636
+ init_latents_proper = image_latents
1637
+ if self.do_classifier_free_guidance:
1638
+ init_mask, _ = mask.chunk(2)
1639
+ else:
1640
+ init_mask = mask
1641
+
1642
+ if i < len(timesteps) - 1:
1643
+ noise_timestep = timesteps[i + 1]
1644
+ init_latents_proper = self.scheduler.add_noise(
1645
+ init_latents_proper, noise, torch.tensor([noise_timestep])
1646
+ )
1647
+
1648
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1649
+
1650
+ if callback_on_step_end is not None:
1651
+ callback_kwargs = {}
1652
+ for k in callback_on_step_end_tensor_inputs:
1653
+ callback_kwargs[k] = locals()[k]
1654
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1655
+
1656
+ latents = callback_outputs.pop("latents", latents)
1657
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1658
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1659
+ add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
1660
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1661
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1662
+ )
1663
+ add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
1664
+ add_neg_time_ids = callback_outputs.pop("add_neg_time_ids", add_neg_time_ids)
1665
+ mask = callback_outputs.pop("mask", mask)
1666
+ masked_image_latents = callback_outputs.pop("masked_image_latents", masked_image_latents)
1667
+
1668
+ # call the callback, if provided
1669
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1670
+ progress_bar.update()
1671
+ if callback is not None and i % callback_steps == 0:
1672
+ step_idx = i // getattr(self.scheduler, "order", 1)
1673
+ callback(step_idx, t, latents)
1674
+
1675
+ if XLA_AVAILABLE:
1676
+ xm.mark_step()
1677
+
1678
+ if not output_type == "latent":
1679
+ # make sure the VAE is in float32 mode, as it overflows in float16
1680
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1681
+
1682
+ if needs_upcasting:
1683
+ self.upcast_vae()
1684
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1685
+ elif latents.dtype != self.vae.dtype:
1686
+ if torch.backends.mps.is_available():
1687
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1688
+ self.vae = self.vae.to(latents.dtype)
1689
+
1690
+ # unscale/denormalize the latents
1691
+ # denormalize with the mean and std if available and not None
1692
+ has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
1693
+ has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
1694
+ if has_latents_mean and has_latents_std:
1695
+ latents_mean = (
1696
+ torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1697
+ )
1698
+ latents_std = (
1699
+ torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
1700
+ )
1701
+ latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
1702
+ else:
1703
+ latents = latents / self.vae.config.scaling_factor
1704
+
1705
+ image = self.vae.decode(latents, return_dict=False)[0]
1706
+
1707
+ # cast back to fp16 if needed
1708
+ if needs_upcasting:
1709
+ self.vae.to(dtype=torch.float16)
1710
+ else:
1711
+ return StableDiffusionXLPipelineOutput(images=latents)
1712
+
1713
+ # apply watermark if available
1714
+ if self.watermark is not None:
1715
+ image = self.watermark.apply_watermark(image)
1716
+
1717
+ image = self.image_processor.postprocess(image, output_type=output_type)
1718
+
1719
+ if padding_mask_crop is not None:
1720
+ image = [self.image_processor.apply_overlay(mask_image, original_image, i, crops_coords) for i in image]
1721
+
1722
+ # Offload all models
1723
+ self.maybe_free_model_hooks()
1724
+
1725
+ if not return_dict:
1726
+ return (image,)
1727
+
1728
+ return StableDiffusionXLPipelineOutput(images=image)