You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I have searched the existing issues and checked the recent builds/commits of both this extension and the webui
Have you read FAQ on README?
I have updated WebUI and this extension to the latest version
What happened?
The extension just generates 16 (the frame count) separate images that do not resemble each other, and then stitches them into a gif. I don't believe the motion module is used at all. Afterwards, it completely breaks the webUI, and this error occurs for any generation afterwards:
Traceback (most recent call last):
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 55, in f
res = list(func(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 35, in f
res = func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\txt2img.py", line 57, in txt2img
processed = processing.process_images(p)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 620, in process_images
res = process_images_inner(p)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 739, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 992, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in sample
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 51, in launch_sampling
return func()
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 104, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 164, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 58, in p_sample_ddim_hook
res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 212, in p_sample_ddim
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward
h = module(h, emb, context)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff.py", line 19, in mm_tes_forward
x = layer(x, emb)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 249, in forward
return checkpoint(
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 262, in _forward
h = self.in_layers(x)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward
input = module(input)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 226, in forward
return super().forward(x.float()).type(x.dtype)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [2560] and input of shape [32, 5120, 12, 8]
Steps to reproduce the problem
Go to txt2img
Enable Animatediff
Press generate
Does as above says
What should have happened?
It should have generated a gif using the motion module instead of 16 images that reflect my prompt, and then prevents any more use of my webui until a restart.
--deepdanbooru --no-half-vae --xformers
Note*: Attempted without xformers as well.
Console logs
Traceback (most recent call last):
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 55, in f
res = list(func(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 35, in f
res = func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\txt2img.py", line 57, in txt2img
processed = processing.process_images(p)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 620, in process_images
res = process_images_inner(p)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 739, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 992, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in sample
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 51, in launch_sampling
returnfunc()
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in<lambda>
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 104, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 164, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 58, in p_sample_ddim_hook
res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 212, in p_sample_ddim
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in<lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward
h = module(h, emb, context)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff.py", line 19, in mm_tes_forward
x = layer(x, emb)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 249, in forward
return checkpoint(
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
returnsuper().apply(*args, **kwargs) # type: ignore[misc]
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 262, in _forward
h = self.in_layers(x)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward
input = module(input)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in<lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 226, in forward
returnsuper().forward(x.float()).type(x.dtype)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [2560] and input of shape [32, 5120, 12, 8]
---
Additional information
No response
The text was updated successfully, but these errors were encountered:
Is there an existing issue for this?
Have you read FAQ on README?
What happened?
The extension just generates 16 (the frame count) separate images that do not resemble each other, and then stitches them into a gif. I don't believe the motion module is used at all. Afterwards, it completely breaks the webUI, and this error occurs for any generation afterwards:
Traceback (most recent call last):
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 55, in f
res = list(func(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\call_queue.py", line 35, in f
res = func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\txt2img.py", line 57, in txt2img
processed = processing.process_images(p)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 620, in process_images
res = process_images_inner(p)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 739, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "B:\AiGen\stable-diffusion-webui\modules\processing.py", line 992, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in sample
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 51, in launch_sampling
return func()
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 222, in
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 104, in sample
samples, intermediates = self.ddim_sampling(conditioning, size,
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 164, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "B:\AiGen\stable-diffusion-webui\modules\sd_samplers_compvis.py", line 58, in p_sample_ddim_hook
res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddim.py", line 212, in p_sample_ddim
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward
h = module(h, emb, context)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff.py", line 19, in mm_tes_forward
x = layer(x, emb)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 249, in forward
return checkpoint(
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 262, in _forward
h = self.in_layers(x)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\container.py", line 217, in forward
input = module(input)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "B:\AiGen\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "B:\AiGen\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 226, in forward
return super().forward(x.float()).type(x.dtype)
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "B:\AiGen\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [2560] and input of shape [32, 5120, 12, 8]
Steps to reproduce the problem
What should have happened?
It should have generated a gif using the motion module instead of 16 images that reflect my prompt, and then prevents any more use of my webui until a restart.
Commit where the problem happens
webui: f865d3e11647dfd6c7b2cdf90dde24680e58acd8
extension: 88a04c3
What browsers do you use to access the UI ?
Google Chrome
Command Line Arguments
--deepdanbooru --no-half-vae --xformers Note*: Attempted without xformers as well.
Console logs
Additional information
No response
The text was updated successfully, but these errors were encountered: