Replies: 4 comments 9 replies
-
Check the output before Error completing request - last traceback usually just extension's hook fails because of previous error |
Beta Was this translation helpful? Give feedback.
-
what happens when you startup webui and try it? It might be relating to switching from a module, but try generating again (or even a third time afterward) |
Beta Was this translation helpful? Give feedback.
-
Removing mmcv-full fixed this for me. See #70 . I didn't figure out which extension it was related to, that I was using. |
Beta Was this translation helpful? Give feedback.
-
I got it to work partially but not really. It wont make a segment map, even with the preprocessor, I get this error but it still makes an image. 30%|████████████ | 6/20 [00:07<00:18, 1.30s/it] |
Beta Was this translation helpful? Give feedback.
-
I'm getting this error when trying to use it:
0%| | 0/20 [00:02<?, ?it/s]
Error completing request
Arguments: ('task(g6fede3wbf14x7h)', 'x marks the spot, pirate map', '', [], 20, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, [], 0, 0, 0, 0, 0, 0.25, False, True, False, 0, -1, False, 'keyword prompt', 'keyword1, keyword2', 'None', 'textual inversion first', True, False, 1, False, False, False, 1.1, 1.5, 100, 0.7, False, False, True, False, False, 0, 'Gustavosta/MagicPrompt-Stable-Diffusion', '', False, 7, 100, 'Constant', 0, 'Constant', 0, 4, False, 'x264', 'mci', 10, 0, False, True, True, True, 'intermediate', 'animation', False, False, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'Refresh models', True, 'segmentation', 'control_sd15_seg [fef5e48e]', 1, {'image': array([[[33, 21, 20],
[35, 22, 22],
[34, 21, 22],
...,
[24, 10, 8],
[23, 9, 8],
[24, 11, 9]],
Traceback (most recent call last):
File "D:\stable-diffusion-webui\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "D:\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "D:\stable-diffusion-webui\modules\txt2img.py", line 56, in txt2img
processed = process_images(p)
File "D:\stable-diffusion-webui\modules\processing.py", line 486, in process_images
res = process_images_inner(p)
File "D:\stable-diffusion-webui\modules\processing.py", line 628, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
File "D:\stable-diffusion-webui\modules\processing.py", line 828, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "D:\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 323, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "D:\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 221, in launch_sampling
return func()
File "D:\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 323, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "D:\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 122, in forward
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "D:\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "D:\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "D:\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "D:\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1212, in _call_impl
result = forward_call(*input, **kwargs)
File "D:\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1329, in forward
out = self.diffusion_model(x, t, context=cc)
File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\cldm.py", line 150, in forward2
return forward(*args, **kwargs)
File "D:\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\cldm.py", line 111, in forward
if abs(x.shape[-1] - outer.hint_cond.shape[-1] // 8) > 8:
AttributeError: 'NoneType' object has no attribute 'shape'
Beta Was this translation helpful? Give feedback.
All reactions