RuntimeError: Tensors must have same number of dimensions: got 4 and 3 error #1530
Replies: 2 comments 1 reply
-
From the log, the error is not from the latest version of CN. Please update and make sure you see at least 1.1.214. If problem is still, will reopen |
Beta Was this translation helpful? Give feedback.
-
macbook pro m1. I has the similar problem, I git pull the newest code. v1.1.224, then I want to preprocess 【depth_leres++】。 who can help me? Thanks。 |
Beta Was this translation helpful? Give feedback.
-
When I use some of the t2i Adapter scripts I keep running into these type of tensor dimension errors.
Here is the error I get:
Loaded state_dict from [C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\annotator\downloads\pidinet\table5_pidinet.pth]
0%| | 0/40 [00:00<?, ?it/s]
Error completing request
Traceback (most recent call last):
File "C:\ai\stable-diffusion-webui-ux\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "C:\ai\stable-diffusion-webui-ux\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\modules\txt2img.py", line 57, in txt2img
processed = processing.process_images(p)
File "C:\ai\stable-diffusion-webui-ux\modules\processing.py", line 611, in process_images
res = process_images_inner(p)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\modules\processing.py", line 729, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\hook.py", line 291, in process_sample
return process.sample_before_CN_hack(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\modules\processing.py", line 977, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\ai\stable-diffusion-webui-ux\modules\sd_samplers_kdiffusion.py", line 383, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\ai\stable-diffusion-webui-ux\modules\sd_samplers_kdiffusion.py", line 257, in launch_sampling
return func()
File "C:\ai\stable-diffusion-webui-ux\modules\sd_samplers_kdiffusion.py", line 383, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\modules\sd_samplers_kdiffusion.py", line 137, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in))
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "C:\ai\stable-diffusion-webui-ux\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "C:\ai\stable-diffusion-webui-ux\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\hook.py", line 587, in forward_webui
return forward(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\hook.py", line 369, in forward
control = param.control_model(x=x, hint=param.used_hint_cond, timesteps=timesteps, context=context)
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\adapter.py", line 104, in forward
self.control = self.control_model(hint_in)
File "C:\ai\stable-diffusion-webui-ux\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ai\stable-diffusion-webui-ux\extensions\sd-webui-controlnet\scripts\adapter.py", line 324, in forward
x = torch.cat([x, style_embedding], dim=1)
RuntimeError: Tensors must have same number of dimensions: got 4 and 3
I also seem to get other weird things going on with the preprocessor preview showing a static noise image too.
Beta Was this translation helpful? Give feedback.
All reactions