MingZhong commited on
Commit
51056d0
1 Parent(s): d25816c

Upload pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +1122 -0
pipeline.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # for diffusers version 0.26.3
16
+
17
+ import inspect
18
+ from typing import Any, Callable, Dict, List, Optional, Union
19
+
20
+ import torch
21
+ from packaging import version
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
+
24
+ from diffusers.configuration_utils import FrozenDict
25
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
28
+ from diffusers.models.attention_processor import FusedAttnProcessor2_0
29
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
30
+ from diffusers.schedulers import KarrasDiffusionSchedulers
31
+ from diffusers.utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ replace_example_docstring,
36
+ scale_lora_layers,
37
+ unscale_lora_layers,
38
+ )
39
+ from diffusers.utils.torch_utils import randn_tensor
40
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
41
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
42
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
43
+
44
+
45
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> import torch
51
+ >>> from diffusers import StableDiffusionPipeline
52
+
53
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
54
+ >>> pipe = pipe.to("cuda")
55
+
56
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
57
+ >>> image = pipe(prompt).images[0]
58
+ ```
59
+ """
60
+
61
+
62
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
63
+ """
64
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
65
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
66
+ """
67
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
68
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
69
+ # rescale the results from guidance (fixes overexposure)
70
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
71
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
72
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
73
+ return noise_cfg
74
+
75
+
76
+ def retrieve_timesteps(
77
+ scheduler,
78
+ num_inference_steps: Optional[int] = None,
79
+ device: Optional[Union[str, torch.device]] = None,
80
+ timesteps: Optional[List[int]] = None,
81
+ **kwargs,
82
+ ):
83
+ """
84
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
85
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
86
+
87
+ Args:
88
+ scheduler (`SchedulerMixin`):
89
+ The scheduler to get timesteps from.
90
+ num_inference_steps (`int`):
91
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
92
+ `timesteps` must be `None`.
93
+ device (`str` or `torch.device`, *optional*):
94
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
95
+ timesteps (`List[int]`, *optional*):
96
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
97
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
98
+ must be `None`.
99
+
100
+ Returns:
101
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
102
+ second element is the number of inference steps.
103
+ """
104
+ if timesteps is not None:
105
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
106
+ if not accepts_timesteps:
107
+ raise ValueError(
108
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
109
+ f" timestep schedules. Please check whether you are using the correct scheduler."
110
+ )
111
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
112
+ timesteps = scheduler.timesteps
113
+ num_inference_steps = len(timesteps)
114
+ else:
115
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
116
+ timesteps = scheduler.timesteps
117
+ return timesteps, num_inference_steps
118
+
119
+
120
+ class StableDiffusionPipeline(
121
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
122
+ ):
123
+ r"""
124
+ Pipeline for text-to-image generation using Stable Diffusion.
125
+
126
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
127
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
128
+
129
+ The pipeline also inherits the following loading methods:
130
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
131
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
132
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
133
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
134
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
135
+
136
+ Args:
137
+ vae ([`AutoencoderKL`]):
138
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
139
+ text_encoder ([`~transformers.CLIPTextModel`]):
140
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
141
+ tokenizer ([`~transformers.CLIPTokenizer`]):
142
+ A `CLIPTokenizer` to tokenize text.
143
+ unet ([`UNet2DConditionModel`]):
144
+ A `UNet2DConditionModel` to denoise the encoded image latents.
145
+ scheduler ([`SchedulerMixin`]):
146
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
147
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
148
+ safety_checker ([`StableDiffusionSafetyChecker`]):
149
+ Classification module that estimates whether generated images could be considered offensive or harmful.
150
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
151
+ about a model's potential harms.
152
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
153
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
154
+ """
155
+
156
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
157
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
158
+ _exclude_from_cpu_offload = ["safety_checker"]
159
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
160
+
161
+ def __init__(
162
+ self,
163
+ vae: AutoencoderKL,
164
+ text_encoder: CLIPTextModel,
165
+ tokenizer: CLIPTokenizer,
166
+ unet: UNet2DConditionModel,
167
+ scheduler: KarrasDiffusionSchedulers,
168
+ safety_checker: StableDiffusionSafetyChecker,
169
+ feature_extractor: CLIPImageProcessor,
170
+ image_encoder: CLIPVisionModelWithProjection = None,
171
+ requires_safety_checker: bool = True,
172
+ ):
173
+ super().__init__()
174
+
175
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
176
+ deprecation_message = (
177
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
178
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
179
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
180
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
181
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
182
+ " file"
183
+ )
184
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
185
+ new_config = dict(scheduler.config)
186
+ new_config["steps_offset"] = 1
187
+ scheduler._internal_dict = FrozenDict(new_config)
188
+
189
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
190
+ deprecation_message = (
191
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
192
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
193
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
194
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
195
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
196
+ )
197
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
198
+ new_config = dict(scheduler.config)
199
+ new_config["clip_sample"] = False
200
+ scheduler._internal_dict = FrozenDict(new_config)
201
+
202
+ if safety_checker is None and requires_safety_checker:
203
+ logger.warning(
204
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
205
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
206
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
207
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
208
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
209
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
210
+ )
211
+
212
+ if safety_checker is not None and feature_extractor is None:
213
+ raise ValueError(
214
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
215
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
216
+ )
217
+
218
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
219
+ version.parse(unet.config._diffusers_version).base_version
220
+ ) < version.parse("0.9.0.dev0")
221
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
222
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
223
+ deprecation_message = (
224
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
225
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
226
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
227
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
228
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
229
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
230
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
231
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
232
+ " the `unet/config.json` file"
233
+ )
234
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
235
+ new_config = dict(unet.config)
236
+ new_config["sample_size"] = 64
237
+ unet._internal_dict = FrozenDict(new_config)
238
+
239
+ self.register_modules(
240
+ vae=vae,
241
+ text_encoder=text_encoder,
242
+ tokenizer=tokenizer,
243
+ unet=unet,
244
+ scheduler=scheduler,
245
+ safety_checker=safety_checker,
246
+ feature_extractor=feature_extractor,
247
+ image_encoder=image_encoder,
248
+ )
249
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
250
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
251
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
252
+
253
+ def enable_vae_slicing(self):
254
+ r"""
255
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
256
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
257
+ """
258
+ self.vae.enable_slicing()
259
+
260
+ def disable_vae_slicing(self):
261
+ r"""
262
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
263
+ computing decoding in one step.
264
+ """
265
+ self.vae.disable_slicing()
266
+
267
+ def enable_vae_tiling(self):
268
+ r"""
269
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
270
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
271
+ processing larger images.
272
+ """
273
+ self.vae.enable_tiling()
274
+
275
+ def disable_vae_tiling(self):
276
+ r"""
277
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
278
+ computing decoding in one step.
279
+ """
280
+ self.vae.disable_tiling()
281
+
282
+ def _encode_prompt(
283
+ self,
284
+ prompt,
285
+ device,
286
+ num_images_per_prompt,
287
+ do_classifier_free_guidance,
288
+ negative_prompt=None,
289
+ prompt_embeds: Optional[torch.FloatTensor] = None,
290
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
291
+ lora_scale: Optional[float] = None,
292
+ **kwargs,
293
+ ):
294
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
295
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
296
+
297
+ prompt_embeds_tuple = self.encode_prompt(
298
+ prompt=prompt,
299
+ device=device,
300
+ num_images_per_prompt=num_images_per_prompt,
301
+ do_classifier_free_guidance=do_classifier_free_guidance,
302
+ negative_prompt=negative_prompt,
303
+ prompt_embeds=prompt_embeds,
304
+ negative_prompt_embeds=negative_prompt_embeds,
305
+ lora_scale=lora_scale,
306
+ **kwargs,
307
+ )
308
+
309
+ # concatenate for backwards comp
310
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
311
+
312
+ return prompt_embeds
313
+
314
+ def encode_prompt(
315
+ self,
316
+ prompt,
317
+ device,
318
+ num_images_per_prompt,
319
+ do_classifier_free_guidance,
320
+ negative_prompt=None,
321
+ prompt_embeds: Optional[torch.FloatTensor] = None,
322
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
323
+ lora_scale: Optional[float] = None,
324
+ clip_skip: Optional[int] = None,
325
+ ):
326
+ r"""
327
+ Encodes the prompt into text encoder hidden states.
328
+
329
+ Args:
330
+ prompt (`str` or `List[str]`, *optional*):
331
+ prompt to be encoded
332
+ device: (`torch.device`):
333
+ torch device
334
+ num_images_per_prompt (`int`):
335
+ number of images that should be generated per prompt
336
+ do_classifier_free_guidance (`bool`):
337
+ whether to use classifier free guidance or not
338
+ negative_prompt (`str` or `List[str]`, *optional*):
339
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
340
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
341
+ less than `1`).
342
+ prompt_embeds (`torch.FloatTensor`, *optional*):
343
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
344
+ provided, text embeddings will be generated from `prompt` input argument.
345
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
346
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
347
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
348
+ argument.
349
+ lora_scale (`float`, *optional*):
350
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
351
+ clip_skip (`int`, *optional*):
352
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
353
+ the output of the pre-final layer will be used for computing the prompt embeddings.
354
+ """
355
+ # set lora scale so that monkey patched LoRA
356
+ # function of text encoder can correctly access it
357
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
358
+ self._lora_scale = lora_scale
359
+
360
+ # dynamically adjust the LoRA scale
361
+ if not USE_PEFT_BACKEND:
362
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
363
+ else:
364
+ scale_lora_layers(self.text_encoder, lora_scale)
365
+
366
+ if prompt is not None and isinstance(prompt, str):
367
+ batch_size = 1
368
+ elif prompt is not None and isinstance(prompt, list):
369
+ batch_size = len(prompt)
370
+ else:
371
+ batch_size = prompt_embeds.shape[0]
372
+
373
+ if prompt_embeds is None:
374
+ # textual inversion: procecss multi-vector tokens if necessary
375
+ if isinstance(self, TextualInversionLoaderMixin):
376
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
377
+
378
+ text_inputs = self.tokenizer(
379
+ prompt,
380
+ padding="max_length",
381
+ max_length=self.tokenizer.model_max_length,
382
+ truncation=True,
383
+ return_tensors="pt",
384
+ )
385
+ text_input_ids = text_inputs.input_ids
386
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
387
+
388
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
389
+ text_input_ids, untruncated_ids
390
+ ):
391
+ removed_text = self.tokenizer.batch_decode(
392
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
393
+ )
394
+ logger.warning(
395
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
396
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
397
+ )
398
+
399
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
400
+ attention_mask = text_inputs.attention_mask.to(device)
401
+ else:
402
+ attention_mask = None
403
+
404
+ if clip_skip is None:
405
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
406
+ prompt_embeds = prompt_embeds[0]
407
+ else:
408
+ prompt_embeds = self.text_encoder(
409
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
410
+ )
411
+ # Access the `hidden_states` first, that contains a tuple of
412
+ # all the hidden states from the encoder layers. Then index into
413
+ # the tuple to access the hidden states from the desired layer.
414
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
415
+ # We also need to apply the final LayerNorm here to not mess with the
416
+ # representations. The `last_hidden_states` that we typically use for
417
+ # obtaining the final prompt representations passes through the LayerNorm
418
+ # layer.
419
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
420
+
421
+ if self.text_encoder is not None:
422
+ prompt_embeds_dtype = self.text_encoder.dtype
423
+ elif self.unet is not None:
424
+ prompt_embeds_dtype = self.unet.dtype
425
+ else:
426
+ prompt_embeds_dtype = prompt_embeds.dtype
427
+
428
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
429
+
430
+ bs_embed, seq_len, _ = prompt_embeds.shape
431
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
432
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
433
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
434
+
435
+ # get unconditional embeddings for classifier free guidance
436
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
437
+ uncond_tokens: List[str]
438
+ if negative_prompt is None:
439
+ uncond_tokens = [""] * batch_size
440
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
441
+ raise TypeError(
442
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
443
+ f" {type(prompt)}."
444
+ )
445
+ elif isinstance(negative_prompt, str):
446
+ uncond_tokens = [negative_prompt]
447
+ elif batch_size != len(negative_prompt):
448
+ raise ValueError(
449
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
450
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
451
+ " the batch size of `prompt`."
452
+ )
453
+ else:
454
+ uncond_tokens = negative_prompt
455
+
456
+ # textual inversion: procecss multi-vector tokens if necessary
457
+ if isinstance(self, TextualInversionLoaderMixin):
458
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
459
+
460
+ max_length = prompt_embeds.shape[1]
461
+ uncond_input = self.tokenizer(
462
+ uncond_tokens,
463
+ padding="max_length",
464
+ max_length=max_length,
465
+ truncation=True,
466
+ return_tensors="pt",
467
+ )
468
+
469
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
470
+ attention_mask = uncond_input.attention_mask.to(device)
471
+ else:
472
+ attention_mask = None
473
+
474
+ negative_prompt_embeds = self.text_encoder(
475
+ uncond_input.input_ids.to(device),
476
+ attention_mask=attention_mask,
477
+ )
478
+ negative_prompt_embeds = negative_prompt_embeds[0]
479
+
480
+ if do_classifier_free_guidance:
481
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
482
+ seq_len = negative_prompt_embeds.shape[1]
483
+
484
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
485
+
486
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
487
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
488
+
489
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
490
+ # Retrieve the original scale by scaling back the LoRA layers
491
+ unscale_lora_layers(self.text_encoder, lora_scale)
492
+
493
+ return prompt_embeds, negative_prompt_embeds
494
+
495
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
496
+ dtype = next(self.image_encoder.parameters()).dtype
497
+
498
+ if not isinstance(image, torch.Tensor):
499
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
500
+
501
+ image = image.to(device=device, dtype=dtype)
502
+ if output_hidden_states:
503
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
504
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
505
+ uncond_image_enc_hidden_states = self.image_encoder(
506
+ torch.zeros_like(image), output_hidden_states=True
507
+ ).hidden_states[-2]
508
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
509
+ num_images_per_prompt, dim=0
510
+ )
511
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
512
+ else:
513
+ image_embeds = self.image_encoder(image).image_embeds
514
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
515
+ uncond_image_embeds = torch.zeros_like(image_embeds)
516
+
517
+ return image_embeds, uncond_image_embeds
518
+
519
+ def prepare_ip_adapter_image_embeds(self, ip_adapter_image, device, num_images_per_prompt):
520
+ if not isinstance(ip_adapter_image, list):
521
+ ip_adapter_image = [ip_adapter_image]
522
+
523
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
524
+ raise ValueError(
525
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
526
+ )
527
+
528
+ image_embeds = []
529
+ for single_ip_adapter_image, image_proj_layer in zip(
530
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
531
+ ):
532
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
533
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
534
+ single_ip_adapter_image, device, 1, output_hidden_state
535
+ )
536
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
537
+ single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0)
538
+
539
+ if self.do_classifier_free_guidance:
540
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
541
+ single_image_embeds = single_image_embeds.to(device)
542
+
543
+ image_embeds.append(single_image_embeds)
544
+
545
+ return image_embeds
546
+
547
+ def run_safety_checker(self, image, device, dtype):
548
+ if self.safety_checker is None:
549
+ has_nsfw_concept = None
550
+ else:
551
+ if torch.is_tensor(image):
552
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
553
+ else:
554
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
555
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
556
+ image, has_nsfw_concept = self.safety_checker(
557
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
558
+ )
559
+ return image, has_nsfw_concept
560
+
561
+ def decode_latents(self, latents):
562
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
563
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
564
+
565
+ latents = 1 / self.vae.config.scaling_factor * latents
566
+ image = self.vae.decode(latents, return_dict=False)[0]
567
+ image = (image / 2 + 0.5).clamp(0, 1)
568
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
569
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
570
+ return image
571
+
572
+ def prepare_extra_step_kwargs(self, generator, eta):
573
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
574
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
575
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
576
+ # and should be between [0, 1]
577
+
578
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
579
+ extra_step_kwargs = {}
580
+ if accepts_eta:
581
+ extra_step_kwargs["eta"] = eta
582
+
583
+ # check if the scheduler accepts generator
584
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
585
+ if accepts_generator:
586
+ extra_step_kwargs["generator"] = generator
587
+ return extra_step_kwargs
588
+
589
+ def check_inputs(
590
+ self,
591
+ prompt,
592
+ height,
593
+ width,
594
+ callback_steps,
595
+ negative_prompt=None,
596
+ prompt_embeds=None,
597
+ negative_prompt_embeds=None,
598
+ callback_on_step_end_tensor_inputs=None,
599
+ ):
600
+ if height % 8 != 0 or width % 8 != 0:
601
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
602
+
603
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
604
+ raise ValueError(
605
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
606
+ f" {type(callback_steps)}."
607
+ )
608
+ if callback_on_step_end_tensor_inputs is not None and not all(
609
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
610
+ ):
611
+ raise ValueError(
612
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
613
+ )
614
+
615
+ if prompt is not None and prompt_embeds is not None:
616
+ raise ValueError(
617
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
618
+ " only forward one of the two."
619
+ )
620
+ elif prompt is None and prompt_embeds is None:
621
+ raise ValueError(
622
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
623
+ )
624
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
625
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
626
+
627
+ if negative_prompt is not None and negative_prompt_embeds is not None:
628
+ raise ValueError(
629
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
630
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
631
+ )
632
+
633
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
634
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
635
+ raise ValueError(
636
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
637
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
638
+ f" {negative_prompt_embeds.shape}."
639
+ )
640
+
641
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
642
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
643
+ if isinstance(generator, list) and len(generator) != batch_size:
644
+ raise ValueError(
645
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
646
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
647
+ )
648
+
649
+ if latents is None:
650
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
651
+ else:
652
+ latents = latents.to(device)
653
+
654
+ # scale the initial noise by the standard deviation required by the scheduler
655
+ latents = latents * self.scheduler.init_noise_sigma
656
+ return latents
657
+
658
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
659
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
660
+
661
+ The suffixes after the scaling factors represent the stages where they are being applied.
662
+
663
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
664
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
665
+
666
+ Args:
667
+ s1 (`float`):
668
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
669
+ mitigate "oversmoothing effect" in the enhanced denoising process.
670
+ s2 (`float`):
671
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
672
+ mitigate "oversmoothing effect" in the enhanced denoising process.
673
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
674
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
675
+ """
676
+ if not hasattr(self, "unet"):
677
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
678
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
679
+
680
+ def disable_freeu(self):
681
+ """Disables the FreeU mechanism if enabled."""
682
+ self.unet.disable_freeu()
683
+
684
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
685
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
686
+ """
687
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
688
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
689
+
690
+ <Tip warning={true}>
691
+
692
+ This API is 🧪 experimental.
693
+
694
+ </Tip>
695
+
696
+ Args:
697
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
698
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
699
+ """
700
+ self.fusing_unet = False
701
+ self.fusing_vae = False
702
+
703
+ if unet:
704
+ self.fusing_unet = True
705
+ self.unet.fuse_qkv_projections()
706
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
707
+
708
+ if vae:
709
+ if not isinstance(self.vae, AutoencoderKL):
710
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
711
+
712
+ self.fusing_vae = True
713
+ self.vae.fuse_qkv_projections()
714
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
715
+
716
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
717
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
718
+ """Disable QKV projection fusion if enabled.
719
+
720
+ <Tip warning={true}>
721
+
722
+ This API is 🧪 experimental.
723
+
724
+ </Tip>
725
+
726
+ Args:
727
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
728
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
729
+
730
+ """
731
+ if unet:
732
+ if not self.fusing_unet:
733
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
734
+ else:
735
+ self.unet.unfuse_qkv_projections()
736
+ self.fusing_unet = False
737
+
738
+ if vae:
739
+ if not self.fusing_vae:
740
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
741
+ else:
742
+ self.vae.unfuse_qkv_projections()
743
+ self.fusing_vae = False
744
+
745
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
746
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
747
+ """
748
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
749
+
750
+ Args:
751
+ timesteps (`torch.Tensor`):
752
+ generate embedding vectors at these timesteps
753
+ embedding_dim (`int`, *optional*, defaults to 512):
754
+ dimension of the embeddings to generate
755
+ dtype:
756
+ data type of the generated embeddings
757
+
758
+ Returns:
759
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
760
+ """
761
+ assert len(w.shape) == 1
762
+ w = w * 1000.0
763
+
764
+ half_dim = embedding_dim // 2
765
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
766
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
767
+ emb = w.to(dtype)[:, None] * emb[None, :]
768
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
769
+ if embedding_dim % 2 == 1: # zero pad
770
+ emb = torch.nn.functional.pad(emb, (0, 1))
771
+ assert emb.shape == (w.shape[0], embedding_dim)
772
+ return emb
773
+
774
+ @property
775
+ def guidance_scale(self):
776
+ return self._guidance_scale
777
+
778
+ @property
779
+ def guidance_rescale(self):
780
+ return self._guidance_rescale
781
+
782
+ @property
783
+ def clip_skip(self):
784
+ return self._clip_skip
785
+
786
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
787
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
788
+ # corresponds to doing no classifier free guidance.
789
+ @property
790
+ def do_classifier_free_guidance(self):
791
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
792
+
793
+ @property
794
+ def cross_attention_kwargs(self):
795
+ return self._cross_attention_kwargs
796
+
797
+ @property
798
+ def num_timesteps(self):
799
+ return self._num_timesteps
800
+
801
+ @property
802
+ def interrupt(self):
803
+ return self._interrupt
804
+
805
+ @torch.no_grad()
806
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
807
+ def __call__(
808
+ self,
809
+ prompt: Union[str, List[str]] = None,
810
+ height: Optional[int] = None,
811
+ width: Optional[int] = None,
812
+ num_inference_steps: int = 50,
813
+ timesteps: List[int] = None,
814
+ guidance_scale: float = 7.5,
815
+ negative_prompt: Optional[Union[str, List[str]]] = None,
816
+ num_images_per_prompt: Optional[int] = 1,
817
+ eta: float = 0.0,
818
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
819
+ latents: Optional[torch.FloatTensor] = None,
820
+ prompt_embeds: Optional[torch.FloatTensor] = None,
821
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
822
+ ip_adapter_image: Optional[PipelineImageInput] = None,
823
+ output_type: Optional[str] = "pil",
824
+ return_dict: bool = True,
825
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
826
+ guidance_rescale: float = 0.0,
827
+ clip_skip: Optional[int] = None,
828
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
829
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
830
+ lora_composite: bool = False,
831
+ **kwargs,
832
+ ):
833
+ r"""
834
+ The call function to the pipeline for generation.
835
+
836
+ Args:
837
+ prompt (`str` or `List[str]`, *optional*):
838
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
839
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
840
+ The height in pixels of the generated image.
841
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
842
+ The width in pixels of the generated image.
843
+ num_inference_steps (`int`, *optional*, defaults to 50):
844
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
845
+ expense of slower inference.
846
+ timesteps (`List[int]`, *optional*):
847
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
848
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
849
+ passed will be used. Must be in descending order.
850
+ guidance_scale (`float`, *optional*, defaults to 7.5):
851
+ A higher guidance scale value encourages the model to generate images closely linked to the text
852
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
853
+ negative_prompt (`str` or `List[str]`, *optional*):
854
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
855
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
856
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
857
+ The number of images to generate per prompt.
858
+ eta (`float`, *optional*, defaults to 0.0):
859
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
860
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
861
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
862
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
863
+ generation deterministic.
864
+ latents (`torch.FloatTensor`, *optional*):
865
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
866
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
867
+ tensor is generated by sampling using the supplied random `generator`.
868
+ prompt_embeds (`torch.FloatTensor`, *optional*):
869
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
870
+ provided, text embeddings are generated from the `prompt` input argument.
871
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
872
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
873
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
874
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
875
+ output_type (`str`, *optional*, defaults to `"pil"`):
876
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
877
+ return_dict (`bool`, *optional*, defaults to `True`):
878
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
879
+ plain tuple.
880
+ cross_attention_kwargs (`dict`, *optional*):
881
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
882
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
883
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
884
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
885
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
886
+ using zero terminal SNR.
887
+ clip_skip (`int`, *optional*):
888
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
889
+ the output of the pre-final layer will be used for computing the prompt embeddings.
890
+ callback_on_step_end (`Callable`, *optional*):
891
+ A function that calls at the end of each denoising steps during the inference. The function is called
892
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
893
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
894
+ `callback_on_step_end_tensor_inputs`.
895
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
896
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
897
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
898
+ `._callback_tensor_inputs` attribute of your pipeline class.
899
+ lora_composite (`bool`, *optional*, defaults to `False`):
900
+ Whether to use the `LoRA Composite` method from the paper
901
+ `Multi-LoRA Composition for Image Generation` to generate the image
902
+ given multiple LoRAs.
903
+ Examples:
904
+
905
+ Returns:
906
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
907
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
908
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
909
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
910
+ "not-safe-for-work" (nsfw) content.
911
+ """
912
+
913
+ callback = kwargs.pop("callback", None)
914
+ callback_steps = kwargs.pop("callback_steps", None)
915
+
916
+ if callback is not None:
917
+ deprecate(
918
+ "callback",
919
+ "1.0.0",
920
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
921
+ )
922
+ if callback_steps is not None:
923
+ deprecate(
924
+ "callback_steps",
925
+ "1.0.0",
926
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
927
+ )
928
+
929
+ # 0. Default height and width to unet
930
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
931
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
932
+ # to deal with lora scaling and other possible forward hooks
933
+
934
+ # 1. Check inputs. Raise error if not correct
935
+ self.check_inputs(
936
+ prompt,
937
+ height,
938
+ width,
939
+ callback_steps,
940
+ negative_prompt,
941
+ prompt_embeds,
942
+ negative_prompt_embeds,
943
+ callback_on_step_end_tensor_inputs,
944
+ )
945
+
946
+ self._guidance_scale = guidance_scale
947
+ self._guidance_rescale = guidance_rescale
948
+ self._clip_skip = clip_skip
949
+ self._cross_attention_kwargs = cross_attention_kwargs
950
+ self._interrupt = False
951
+
952
+ # 2. Define call parameters
953
+ if prompt is not None and isinstance(prompt, str):
954
+ batch_size = 1
955
+ elif prompt is not None and isinstance(prompt, list):
956
+ batch_size = len(prompt)
957
+ else:
958
+ batch_size = prompt_embeds.shape[0]
959
+
960
+ device = self._execution_device
961
+
962
+ # 3. Encode input prompt
963
+ lora_scale = (
964
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
965
+ )
966
+
967
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
968
+ prompt,
969
+ device,
970
+ num_images_per_prompt,
971
+ self.do_classifier_free_guidance,
972
+ negative_prompt,
973
+ prompt_embeds=prompt_embeds,
974
+ negative_prompt_embeds=negative_prompt_embeds,
975
+ lora_scale=lora_scale,
976
+ clip_skip=self.clip_skip,
977
+ )
978
+
979
+ # For classifier free guidance, we need to do two forward passes.
980
+ # Here we concatenate the unconditional and text embeddings into a single batch
981
+ # to avoid doing two forward passes
982
+ if self.do_classifier_free_guidance:
983
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
984
+
985
+ if ip_adapter_image is not None:
986
+ image_embeds = self.prepare_ip_adapter_image_embeds(
987
+ ip_adapter_image, device, batch_size * num_images_per_prompt
988
+ )
989
+
990
+ # 4. Prepare timesteps
991
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
992
+
993
+ # 5. Prepare latent variables
994
+ num_channels_latents = self.unet.config.in_channels
995
+ latents = self.prepare_latents(
996
+ batch_size * num_images_per_prompt,
997
+ num_channels_latents,
998
+ height,
999
+ width,
1000
+ prompt_embeds.dtype,
1001
+ device,
1002
+ generator,
1003
+ latents,
1004
+ )
1005
+
1006
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1007
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1008
+
1009
+ # 6.1 Add image embeds for IP-Adapter
1010
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1011
+
1012
+ # 6.2 Optionally get Guidance Scale Embedding
1013
+ timestep_cond = None
1014
+ if self.unet.config.time_cond_proj_dim is not None:
1015
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1016
+ timestep_cond = self.get_guidance_scale_embedding(
1017
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1018
+ ).to(device=device, dtype=latents.dtype)
1019
+
1020
+ # 7. Denoising loop
1021
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1022
+
1023
+ if lora_composite:
1024
+ adapters = self.get_active_adapters()
1025
+
1026
+ self._num_timesteps = len(timesteps)
1027
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1028
+ for i, t in enumerate(timesteps):
1029
+ if self.interrupt:
1030
+ continue
1031
+
1032
+ # expand the latents if we are doing classifier free guidance
1033
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1034
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1035
+
1036
+ # predict the noise residual
1037
+ if lora_composite:
1038
+ noise_preds = []
1039
+ # get noise_pred conditioned on each lora
1040
+ self.enable_lora()
1041
+ for adapter in adapters:
1042
+ self.set_adapters(adapter)
1043
+ noise_pred = self.unet(
1044
+ latent_model_input,
1045
+ t,
1046
+ encoder_hidden_states=prompt_embeds,
1047
+ timestep_cond=timestep_cond,
1048
+ cross_attention_kwargs=self.cross_attention_kwargs,
1049
+ added_cond_kwargs=added_cond_kwargs,
1050
+ return_dict=False,
1051
+ )[0]
1052
+ noise_preds.append(noise_pred)
1053
+ else:
1054
+ noise_pred = self.unet(
1055
+ latent_model_input,
1056
+ t,
1057
+ encoder_hidden_states=prompt_embeds,
1058
+ timestep_cond=timestep_cond,
1059
+ cross_attention_kwargs=self.cross_attention_kwargs,
1060
+ added_cond_kwargs=added_cond_kwargs,
1061
+ return_dict=False,
1062
+ )[0]
1063
+
1064
+ # perform guidance
1065
+ if self.do_classifier_free_guidance:
1066
+ if lora_composite:
1067
+ noise_preds = torch.stack(noise_preds, dim=0)
1068
+ noise_pred_uncond, noise_pred_text = noise_preds.chunk(2, dim=1)
1069
+ noise_pred_uncond = noise_pred_uncond.mean(dim=0)
1070
+ noise_pred_text = noise_pred_text.mean(dim=0)
1071
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1072
+ else:
1073
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1074
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1075
+
1076
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1077
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1078
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1079
+
1080
+ # compute the previous noisy sample x_t -> x_t-1
1081
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1082
+
1083
+ if callback_on_step_end is not None:
1084
+ callback_kwargs = {}
1085
+ for k in callback_on_step_end_tensor_inputs:
1086
+ callback_kwargs[k] = locals()[k]
1087
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1088
+
1089
+ latents = callback_outputs.pop("latents", latents)
1090
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1091
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1092
+
1093
+ # call the callback, if provided
1094
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1095
+ progress_bar.update()
1096
+ if callback is not None and i % callback_steps == 0:
1097
+ step_idx = i // getattr(self.scheduler, "order", 1)
1098
+ callback(step_idx, t, latents)
1099
+
1100
+ if not output_type == "latent":
1101
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1102
+ 0
1103
+ ]
1104
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1105
+ else:
1106
+ image = latents
1107
+ has_nsfw_concept = None
1108
+
1109
+ if has_nsfw_concept is None:
1110
+ do_denormalize = [True] * image.shape[0]
1111
+ else:
1112
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1113
+
1114
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1115
+
1116
+ # Offload all models
1117
+ self.maybe_free_model_hooks()
1118
+
1119
+ if not return_dict:
1120
+ return (image, has_nsfw_concept)
1121
+
1122
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)