Skip to content

Commit

Permalink
Precompute is_sdxl_inpaint flag
Browse files Browse the repository at this point in the history
  • Loading branch information
huchenlei committed May 15, 2024
1 parent 1c0a0c4 commit 9eb2f78
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 22 deletions.
28 changes: 11 additions & 17 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,20 +115,17 @@ def txt2img_image_conditioning(sd_model, x, width, height):
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)

else:
sd = sd_model.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
if diffusion_model_input is not None:
if diffusion_model_input.shape[1] == 9:
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
image_conditioning = images_tensor_to_samples(image_conditioning,
approximation_indexes.get(opts.sd_vae_encode_method))
if sd_model.model.is_sdxl_inpaint:
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
image_conditioning = images_tensor_to_samples(image_conditioning,
approximation_indexes.get(opts.sd_vae_encode_method))

# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)

return image_conditioning
return image_conditioning

# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call.
Expand Down Expand Up @@ -390,11 +387,8 @@ def img2img_image_conditioning(self, source_image, latent_image, image_mask=None
if self.sampler.conditioning_key == "crossattn-adm":
return self.unclip_image_conditioning(source_image)

sd = self.sampler.model_wrap.inner_model.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
if diffusion_model_input is not None:
if diffusion_model_input.shape[1] == 9:
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
if self.sampler.model_wrap.inner_model.model.is_sdxl_inpaint:
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)

# Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
Expand Down
7 changes: 7 additions & 0 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
model.is_sd1 = not model.is_sdxl and not model.is_sd2
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
# Set is_sdxl_inpaint flag.
diffusion_model_input = state_dict.get('diffusion_model.input_blocks.0.0.weight', None)
model.is_sdxl_inpaint = (
model.is_sdxl and
diffusion_model_input is not None and
diffusion_model_input.shape[1] == 9
)
if model.is_sdxl:
sd_models_xl.extend_sdxl(model)

Expand Down
9 changes: 4 additions & 5 deletions modules/sd_models_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,10 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:


def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
sd = self.model.state_dict()
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
if diffusion_model_input is not None:
if diffusion_model_input.shape[1] == 9:
x = torch.cat([x] + cond['c_concat'], dim=1)
"""WARNING: This function is called once per denoising iteration. DO NOT add
expensive functionc calls such as `model.state_dict`. """
if self.model.is_sdxl_inpaint:
x = torch.cat([x] + cond['c_concat'], dim=1)

return self.model(x, t, cond)

Expand Down

0 comments on commit 9eb2f78

Please sign in to comment.