We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
It seems i can't make DWPose to work with webui Forge. Not even openpose is working. I get a bunch of errors like:
Traceback (most recent call last): File "G:\StableDiffusion\Forge\webui\modules_forge\main_thread.py", line 37, in loop task.work() File "G:\StableDiffusion\Forge\webui\modules_forge\main_thread.py", line 26, in work self.result = self.func(*self.args, **self.kwargs) File "G:\StableDiffusion\Forge\webui\modules\txt2img.py", line 111, in txt2img_function processed = processing.process_images(p) File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 752, in process_images res = process_images_inner(p) File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 922, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "G:\StableDiffusion\Forge\webui\extensions\sd-webui-comfyui\lib_comfyui\webui\patches.py", line 95, in p_sample_patch x = original_function(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 1275, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_common.py", line 263, in launch_sampling return func() File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m denoised = model(x, sigmas[i] * s_in, **extra_args) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self.call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_cfg_denoiser.py", line 182, in forward denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params, File "G:\StableDiffusion\Forge\webui\modules_forge\forge_sampler.py", line 88, in forge_sample denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed) File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\samplers.py", line 289, in sampling_function cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\samplers.py", line 252, in calc_cond_uncond_batch c['control'] = control.get_control(input_x, timestep, control_cond, len(cond_or_uncond)) File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\controlnet.py", line 273, in get_control control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint.to(self.device), timesteps=timestep.float(), context=context.to(dtype), y=y) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\controlnet\cldm.py", line 305, in forward h = module(h, emb, context) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 74, in forward return forward_timestep_embed(self, *args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 55, in forward_timestep_embed x = layer(x, context, transformer_options) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 620, in forward x = block(x, context=context[i], transformer_options=transformer_options) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 447, in forward return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\util.py", line 194, in checkpoint return func(*inputs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 547, in _forward n = self.attn2(n, context=context_attn2, value=value_attn2, transformer_options=extra_options) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 391, in forward k = self.to_k(context) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\ops.py", line 96, in forward return self.forward_ldm_patched_cast_weights(*args, **kwargs) File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\ops.py", line 92, in forward_ldm_patched_cast_weights return torch.nn.functional.linear(input, weight, bias) RuntimeError: mat1 and mat2 shapes cannot be multiplied (77x2048 and 768x320) mat1 and mat2 shapes cannot be multiplied (77x2048 and 768x320) *** Error completing request *** Arguments: ('task(7iln8oruaqyegfz)', <gradio.routes.Request object at 0x0000023B98BD9E10>, 'Redhead girl, classy outfit, full body portrait', '', [], 6, 'DPM++ 2M Karras', 1, 1, 2, 1216, 832, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, '5c801b21-0f0a-4985-86e4-863eeab17138', True, {'postprocess_latent_txt2img': False, 'postprocess_txt2img': False, 'postprocess_image_txt2img': False, 'before_save_image_txt2img': False}, ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=array([[[0, 0, 0], *** [0, 0, 0], *** [0, 0, 0], *** ..., *** [0, 0, 0], *** [0, 0, 0], *** [0, 0, 0]],
*** [[0, 0, 0], *** [0, 0, 0], *** [0, 0, 0], *** ..., *** [0, 0, 0], *** [0, 0, 0], *** [0, 0, 0]],
*** ...,
*** [[0, 0, 0], *** [0, 0, 0], *** [0, 0, 0], *** ..., *** [0, 0, 0], *** [0, 0, 0], *** [0, 0, 0]]], dtype=uint8), mask_image=None, hr_option='Both', enabled=True, module='openpose_full', model='control_v11p_sd15_openpose [cab727d4]', weight=1, image={'image': array([[[ 92, 86, 26], *** [ 93, 87, 27], *** [ 94, 88, 28], *** ..., *** [ 47, 65, 79], *** [ 44, 62, 76], *** [ 45, 63, 77]],
*** [[ 90, 84, 24], *** [ 91, 85, 25], *** [ 92, 86, 26], *** ..., *** [ 47, 65, 79], *** [ 44, 62, 76], *** [ 45, 63, 77]],
*** [[ 87, 81, 23], *** [ 88, 82, 24], *** [ 90, 84, 26], *** ..., *** [ 47, 65, 79], *** [ 44, 62, 76], *** [ 44, 62, 76]],
*** [[115, 117, 54], *** [116, 117, 59], *** [123, 126, 73], *** ..., *** [ 45, 53, 4], *** [ 35, 44, 0], *** [ 39, 47, 0]],
*** [[117, 120, 53], *** [122, 124, 61], *** [130, 133, 76], *** ..., *** [ 42, 48, 12], *** [ 31, 37, 3], *** [ 29, 34, 4]],
*** [[112, 116, 40], *** [115, 118, 47], *** [120, 125, 61], *** ..., *** [ 33, 36, 15], *** [ 24, 27, 10], *** [ 32, 34, 20]]], dtype=uint8), 'mask': array([[[0, 0, 0], *** [0, 0, 0], *** [0, 0, 0], *** ..., *** [0, 0, 0], *** [0, 0, 0], *** [0, 0, 0]],
*** [[0, 0, 0], *** [0, 0, 0], *** [0, 0, 0], *** ..., *** [0, 0, 0], *** [0, 0, 0], *** [0, 0, 0]]], dtype=uint8)}, resize_mode='Crop and Resize', processor_res=832, threshold_a=0.5, threshold_b=0.5, guidance_start=0, guidance_end=1, pixel_perfect=True, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), False, 7, 1, 'Constant', 0, 'Constant', 0, 1, 'enable', 'MEAN', 'AD', 1, False, 1.01, 1.02, 0.99, 0.95, False, 0.5, 2, False, 256, 2, 0, False, False, 3, 2, 0, 0.35, True, 'bicubic', 'bicubic', False, 0, 'anisotropic', 0, 'reinhard', 100, 0, 'subtract', 0, 0, 'gaussian', 'add', 0, 100, 127, 0, 'hard_clamp', 5, 0, 'None', 'None', False, 'MultiDiffusion', 768, 768, 64, 4, False, False, False, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {} Traceback (most recent call last): File "G:\StableDiffusion\Forge\webui\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) TypeError: 'NoneType' object is not iterable
The text was updated successfully, but these errors were encountered:
What is your Forge Version? Mine is working fine.
Sorry, something went wrong.
No branches or pull requests
It seems i can't make DWPose to work with webui Forge. Not even openpose is working. I get a bunch of errors like:
Traceback (most recent call last):
File "G:\StableDiffusion\Forge\webui\modules_forge\main_thread.py", line 37, in loop
task.work()
File "G:\StableDiffusion\Forge\webui\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "G:\StableDiffusion\Forge\webui\modules\txt2img.py", line 111, in txt2img_function
processed = processing.process_images(p)
File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 752, in process_images
res = process_images_inner(p)
File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 922, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "G:\StableDiffusion\Forge\webui\extensions\sd-webui-comfyui\lib_comfyui\webui\patches.py", line 95, in p_sample_patch
x = original_function(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\modules\processing.py", line 1275, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_common.py", line 263, in launch_sampling
return func()
File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_kdiffusion.py", line 251, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\modules\sd_samplers_cfg_denoiser.py", line 182, in forward
denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
File "G:\StableDiffusion\Forge\webui\modules_forge\forge_sampler.py", line 88, in forge_sample
denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\samplers.py", line 289, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options)
File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\samplers.py", line 252, in calc_cond_uncond_batch
c['control'] = control.get_control(input_x, timestep, control_cond, len(cond_or_uncond))
File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\controlnet.py", line 273, in get_control
control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint.to(self.device), timesteps=timestep.float(), context=context.to(dtype), y=y)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\controlnet\cldm.py", line 305, in forward
h = module(h, emb, context)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 74, in forward
return forward_timestep_embed(self, *args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 55, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 620, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 447, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\diffusionmodules\util.py", line 194, in checkpoint
return func(*inputs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 547, in _forward
n = self.attn2(n, context=context_attn2, value=value_attn2, transformer_options=extra_options)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\ldm\modules\attention.py", line 391, in forward
k = self.to_k(context)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "G:\StableDiffusion\Forge\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\ops.py", line 96, in forward
return self.forward_ldm_patched_cast_weights(*args, **kwargs)
File "G:\StableDiffusion\Forge\webui\ldm_patched\modules\ops.py", line 92, in forward_ldm_patched_cast_weights
return torch.nn.functional.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (77x2048 and 768x320)
mat1 and mat2 shapes cannot be multiplied (77x2048 and 768x320)
*** Error completing request
*** Arguments: ('task(7iln8oruaqyegfz)', <gradio.routes.Request object at 0x0000023B98BD9E10>, 'Redhead girl, classy outfit, full body portrait', '', [], 6, 'DPM++ 2M Karras', 1, 1, 2, 1216, 832, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, '5c801b21-0f0a-4985-86e4-863eeab17138', True, {'postprocess_latent_txt2img': False, 'postprocess_txt2img': False, 'postprocess_image_txt2img': False, 'before_save_image_txt2img': False}, ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=array([[[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** ...,
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]]], dtype=uint8), mask_image=None, hr_option='Both', enabled=True, module='openpose_full', model='control_v11p_sd15_openpose [cab727d4]', weight=1, image={'image': array([[[ 92, 86, 26],
*** [ 93, 87, 27],
*** [ 94, 88, 28],
*** ...,
*** [ 47, 65, 79],
*** [ 44, 62, 76],
*** [ 45, 63, 77]],
*** [[ 90, 84, 24],
*** [ 91, 85, 25],
*** [ 92, 86, 26],
*** ...,
*** [ 47, 65, 79],
*** [ 44, 62, 76],
*** [ 45, 63, 77]],
*** [[ 87, 81, 23],
*** [ 88, 82, 24],
*** [ 90, 84, 26],
*** ...,
*** [ 47, 65, 79],
*** [ 44, 62, 76],
*** [ 44, 62, 76]],
*** ...,
*** [[115, 117, 54],
*** [116, 117, 59],
*** [123, 126, 73],
*** ...,
*** [ 45, 53, 4],
*** [ 35, 44, 0],
*** [ 39, 47, 0]],
*** [[117, 120, 53],
*** [122, 124, 61],
*** [130, 133, 76],
*** ...,
*** [ 42, 48, 12],
*** [ 31, 37, 3],
*** [ 29, 34, 4]],
*** [[112, 116, 40],
*** [115, 118, 47],
*** [120, 125, 61],
*** ...,
*** [ 33, 36, 15],
*** [ 24, 27, 10],
*** [ 32, 34, 20]]], dtype=uint8), 'mask': array([[[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** ...,
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]],
*** [[0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0],
*** ...,
*** [0, 0, 0],
*** [0, 0, 0],
*** [0, 0, 0]]], dtype=uint8)}, resize_mode='Crop and Resize', processor_res=832, threshold_a=0.5, threshold_b=0.5, guidance_start=0, guidance_end=1, pixel_perfect=True, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), ControlNetUnit(input_mode=<InputMode.SIMPLE: 'simple'>, use_preview_as_input=False, batch_image_dir='', batch_mask_dir='', batch_input_gallery=[], batch_mask_gallery=[], generated_image=None, mask_image=None, hr_option='Both', enabled=False, module='None', model='None', weight=1, image=None, resize_mode='Crop and Resize', processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), False, 7, 1, 'Constant', 0, 'Constant', 0, 1, 'enable', 'MEAN', 'AD', 1, False, 1.01, 1.02, 0.99, 0.95, False, 0.5, 2, False, 256, 2, 0, False, False, 3, 2, 0, 0.35, True, 'bicubic', 'bicubic', False, 0, 'anisotropic', 0, 'reinhard', 100, 0, 'subtract', 0, 0, 'gaussian', 'add', 0, 100, 127, 0, 'hard_clamp', 5, 0, 'None', 'None', False, 'MultiDiffusion', 768, 768, 64, 4, False, False, False, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "G:\StableDiffusion\Forge\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
TypeError: 'NoneType' object is not iterable
The text was updated successfully, but these errors were encountered: