Can't generate any photos? #2691
edwardk1234
started this conversation in
General
Replies: 1 comment
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
-
Not sure what's the issue but I can't seem to be generating any photos. Can you please let me know how to fix it?
Thanks!
*** Error completing request
*** Arguments: ('task(djlibqzxlfpltib)', 'realistic portrait photo of beautiful woman ', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x7bb6a0ead4e0>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False) {}
Traceback (most recent call last):
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/txt2img.py", line 55, in txt2img
processed = processing.process_images(p)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 734, in process_images
res = process_images_inner(p)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 868, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/processing.py", line 1142, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 235, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_common.py", line 261, in launch_sampling
return func()
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/src/k-diffusion/k_diffusion/external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_utils.py", line 26, in call
return self.__sub_func(self.__orig_func, *args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_unet.py", line 48, in apply_model
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/models/diffusion/ddpm.py", line 1329, in forward
out = self.diffusion_model(x, t, context=cc)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_unet.py", line 91, in UNetModel_forward
return original_forward(self, x, timesteps, context, *args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/openaimodel.py", line 776, in forward
h = module(h, emb, context)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/openaimodel.py", line 84, in forward
x = layer(x, context)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 334, in forward
x = block(x, context=context[i])
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 269, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/util.py", line 114, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "/usr/local/lib/python3.10/dist-packages/torch/autograd/function.py", line 539, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/diffusionmodules/util.py", line 129, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "/content/gdrive/MyDrive/sd/stablediffusion/ldm/modules/attention.py", line 272, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/content/gdrive/MyDrive/sd/stable-diffusion-webui/modules/sd_hijack_optimizations.py", line 496, in xformers_attention_forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 223, in memory_efficient_attention
return _memory_efficient_attention(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 321, in _memory_efficient_attention
return _memory_efficient_attention_forward(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/init.py", line 337, in _memory_efficient_attention_forward
op = _dispatch_fw(inp, False)
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 120, in _dispatch_fw
return _run_priority_list(
File "/usr/local/lib/python3.10/dist-packages/xformers/ops/fmha/dispatch.py", line 63, in _run_priority_list
raise NotImplementedError(msg)
NotImplementedError: No operator found for
memory_efficient_attention_forward
with inputs:query : shape=(2, 4096, 8, 40) (torch.float16)
key : shape=(2, 4096, 8, 40) (torch.float16)
value : shape=(2, 4096, 8, 40) (torch.float16)
attn_bias : <class 'NoneType'>
p : 0.0
decoderF
is not supported because:xFormers wasn't build with CUDA support
attn_bias type is <class 'NoneType'>
operator wasn't built - see
python -m xformers.info
for more info[email protected]
is not supported because:xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see
python -m xformers.info
for more infotritonflashattF
is not supported because:xFormers wasn't build with CUDA support
requires device with capability > (8, 0) but your GPU has capability (7, 5) (too old)
operator wasn't built - see
python -m xformers.info
for more infotriton is not available
requires GPU with sm80 minimum compute capacity, e.g., A100/H100/L4
Only work on pre-MLIR triton for now
cutlassF
is not supported because:xFormers wasn't build with CUDA support
operator wasn't built - see
python -m xformers.info
for more infosmallkF
is not supported because:max(query.shape[-1] != value.shape[-1]) > 32
xFormers wasn't build with CUDA support
dtype=torch.float16 (supported: {torch.float32})
operator wasn't built - see
python -m xformers.info
for more infounsupported embed per head: 40
Beta Was this translation helpful? Give feedback.
All reactions