Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions __init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .custom_samplers import SamplerDistanceAdvanced
from .custom_samplers import SamplerDistance, SamplerDistanceAdvanced
from .presets_to_add import extra_samplers

def add_samplers():
Expand All @@ -21,5 +21,6 @@ def add_samplers():
add_samplers()

NODE_CLASS_MAPPINGS = {
"SamplerDistance": SamplerDistanceAdvanced,
}
"SamplerDistance": SamplerDistance,
"SamplerDistanceAdvanced": SamplerDistanceAdvanced,
}
240 changes: 191 additions & 49 deletions custom_samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
from comfy.k_diffusion.sampling import trange, to_d
import comfy.model_patcher
import comfy.samplers
from comfy.k_diffusion import sampling
from comfy import model_sampling
from math import pi
mmnorm = lambda x: (x - x.min()) / (x.max() - x.min())
selfnorm = lambda x: x / x.norm()
Expand Down Expand Up @@ -70,12 +72,60 @@ def normalize_adjust(a,b,strength=1):
a[~torch.isfinite(a)] = c[~torch.isfinite(a)]
return a

def get_ancestral_step_ext(sigma, sigma_next, eta=1.0, is_rf=False):
if sigma_next == 0 or eta == 0:
return sigma_next, sigma_next * 0.0, 1.0
if not is_rf:
return (*sampling.get_ancestral_step(sigma, sigma_next, eta=eta), 1.0)
# Referenced from ComfyUI.
downstep_ratio = 1.0 + (sigma_next / sigma - 1.0) * eta
sigma_down = sigma_next * downstep_ratio
alpha_ip1, alpha_down = 1.0 - sigma_next, 1.0 - sigma_down
sigma_up = (sigma_next**2 - sigma_down**2 * alpha_ip1**2 / alpha_down**2)**0.5
x_coeff = alpha_ip1 / alpha_down
return sigma_down, sigma_up, x_coeff

def internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler):
x = x + d * dt
if sigma_up == 0 or noise_sampler is None:
return x
noise = noise_sampler(sigma, sigma_next).mul_(sigma_up)
if x_coeff != 1:
# x gets scaled for flow models.
x *= x_coeff
return x.add_(noise)

def fix_step_range(steps, start, end):
if start < 0:
start = steps + start
if end < 0:
end = steps + end
start = max(0, min(steps - 1, start))
end = max(0, min(steps - 1, end))
return (end, start) if start > end else (start, end)

# Euler and CFGpp part taken from comfy_extras/nodes_advanced_samplers
def distance_wrap(resample,resample_end=-1,cfgpp=False,sharpen=False,use_softmax=False,first_only=False,use_slerp=False,perp_step=False,smooth=False,use_negative=False):
def distance_wrap(
resample, resample_end=-1, cfgpp=False, sharpen=False, use_softmax=False,
distance_first=0, distance_last=-1, eta_first=0, eta_last=-1, distance_eta_first=0, distance_eta_last=-1,
use_slerp=False, perp_step=False, smooth=False, use_negative=False, eta=0.0, s_noise=1.0,
distance_step_eta=0.0, distance_step_s_noise=1.0, distance_step_seed_offset=42,
):
@torch.no_grad()
def sample_distance_advanced(model, x, sigmas, extra_args=None, callback=None, disable=None):
def sample_distance_advanced(model, x, sigmas, eta=eta, s_noise=s_noise, noise_sampler=None, distance_step_noise_sampler=None, extra_args=None, callback=None, disable=None):
nonlocal distance_first, distance_last, eta_first, eta_last, distance_eta_first, distance_eta_last

extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed")
dstep_noise_sampler = None if distance_step_eta == 0 else distance_step_noise_sampler or noise_sampler or sampling.default_noise_sampler(x, seed=seed + distance_step_seed_offset if seed is not None else None)
noise_sampler = None if eta == 0 else noise_sampler or sampling.default_noise_sampler(x, seed=seed)
is_rf = isinstance(model.inner_model.inner_model.model_sampling, model_sampling.CONST)
uncond = None
steps = len(sigmas) - 1

distance_first, distance_last = fix_step_range(steps, distance_first, distance_last)
eta_first, eta_last = fix_step_range(steps, eta_first, eta_last)
distance_eta_first, distance_eta_last = fix_step_range(steps, distance_eta_first, distance_eta_last)

if cfgpp or use_negative:
uncond = None
Expand All @@ -96,58 +146,66 @@ def post_cfg_function(args):
current_resample = resample
total = 0
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
sigma_hat = sigmas[i]
for i in trange(steps, disable=disable):
use_distance = distance_first <= i <= distance_last
use_eta = eta_first <= i <= eta_last
use_distance_eta = distance_eta_first <= i <= distance_eta_last
sigma, sigma_next = sigmas[i:i + 2]
sigma_down, sigma_up, x_coeff = get_ancestral_step_ext(sigma, sigma_next, eta=eta if use_eta else 0.0, is_rf=is_rf)
sigma_up *= s_noise
dstep_sigma_down, dstep_sigma_up, dstep_x_coeff = get_ancestral_step_ext(sigma, sigma_next, eta=distance_step_eta if use_distance_eta else 0.0, is_rf=is_rf)
dstep_sigma_up *= distance_step_s_noise

res_mul = progression(sigma_hat)
res_mul = progression(sigma)
if resample_end >= 0:
resample_steps = max(min(current_resample,resample_end),min(max(current_resample,resample_end),int(current_resample * res_mul + resample_end * (1 - res_mul))))
else:
resample_steps = current_resample

denoised = model(x, sigma_hat * s_in, **extra_args)
denoised = model(x, sigma * s_in, **extra_args)
total += 1

if cfgpp and torch.any(uncond):
d = to_d(x - denoised + uncond, sigmas[i], denoised)
d = to_d(x - denoised + uncond, sigma, denoised)
else:
d = to_d(x, sigma_hat, denoised)
d = to_d(x, sigma, denoised)

if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
dt = sigmas[i + 1] - sigma_hat
callback({'x': x, 'i': i, 'sigma': sigmas, 'sigma_hat': sigma, 'denoised': denoised})
dt = sigma_down - sigma
dstep_dt = dstep_sigma_down - sigma

if sigmas[i + 1] == 0 or resample_steps == 0 or (i > 0 and first_only):
if sigma_next == 0 or resample_steps == 0 or not use_distance:
# Euler method
x = x + d * dt
else:
# not Euler method
x_n = [d]
for re_step in range(resample_steps):
x_new = x + d * dt
new_denoised = model(x_new, sigmas[i + 1] * s_in, **extra_args)
if smooth:
new_denoised = new_denoised.abs().pow(1 / new_denoised.std().sqrt()) * new_denoised.sign()
new_denoised = new_denoised.div(new_denoised.std().sqrt())
total += 1
if cfgpp and torch.any(uncond):
new_d = to_d(x_new - new_denoised + uncond, sigmas[i + 1], new_denoised)
else:
new_d = to_d(x_new, sigmas[i + 1] * s_in, new_denoised)
x_n.append(new_d)
if re_step == 0:
d = (new_d + d) / 2
else:
u = uncond if (use_negative and uncond is not None and torch.any(uncond)) else None
d = fast_distance_weights(torch.stack(x_n), use_softmax=use_softmax, use_slerp=use_slerp, uncond=u)
if sharpen or perp_step:
if sharpen and d_prev is not None:
d = normalize_adjust(d, d_prev, 1)
elif perp_step and d_prev is not None:
d = diff_step(d, d_prev, 0.5)
d_prev = d.clone()
x_n.append(d)
x = x + d * dt
x = internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler)
continue
# not Euler method
x_n = [d]
for re_step in trange(resample_steps, initial=1, disable=disable or resample_steps < 2, leave=False, desc=" Distance"):
x_new = internal_step(x, d, dstep_dt, sigma, sigma_next, dstep_sigma_up, dstep_x_coeff, dstep_noise_sampler)
new_denoised = model(x_new, sigma_next * s_in, **extra_args)
if smooth:
new_denoised = new_denoised.abs().pow(1 / new_denoised.std().sqrt()) * new_denoised.sign()
new_denoised = new_denoised.div(new_denoised.std().sqrt())
total += 1
if cfgpp and torch.any(uncond):
new_d = to_d(x_new - new_denoised + uncond, sigma_next, new_denoised)
else:
new_d = to_d(x_new, sigma_next * s_in, new_denoised)
x_n.append(new_d)
if re_step == 0:
d = (new_d + d) / 2
continue
u = uncond if (use_negative and uncond is not None and torch.any(uncond)) else None
d = fast_distance_weights(torch.stack(x_n), use_softmax=use_softmax, use_slerp=use_slerp, uncond=u)
if sharpen or perp_step:
if sharpen and d_prev is not None:
d = normalize_adjust(d, d_prev, 1)
elif perp_step and d_prev is not None:
d = diff_step(d, d_prev, 0.5)
d_prev = d.clone()
x_n.append(d)
x = internal_step(x, d, dt, sigma, sigma_next, sigma_up, x_coeff, noise_sampler)
return x
return sample_distance_advanced

Expand Down Expand Up @@ -202,19 +260,103 @@ def simplified_euler(model, x, sigmas, extra_args=None, callback=None, disable=N
x = x + d * dt
return x

class SamplerDistanceAdvanced:
class SamplerDistanceBase:
_DISTANCE_OPTIONS = None # All options by default.
_DISTANCE_PARAMS = {
"resample": ("INT", {
"default": 3, "min": -1, "max": 32, "step": 1,
"tooltip": "0 all along gives Euler. 1 gives Heun.\nAnything starting from 2 will use the distance method.\n-1 will do remaining steps + 1 as the resample value. This can be pretty slow.",
}),
"resample_end": ("INT", {
"default": -1, "min": -1, "max": 32, "step": 1,
"tooltip": "How many resamples for the end. -1 means constant.",
}),
"cfgpp": ("BOOLEAN", {
"default": True,
"tooltip": "Controls whether to use CFG++ sampling. When enabled, you should set CFG to a fairly low value.",
}),
"eta": ("FLOAT", {
"default": 0.0, "min": 0.0, "max": 32.0, "step": 0.01,
"tooltip": "Controls the ancestralness of the main sampler steps. 0.0 means to use non-ancestral sampling. Note: May not work well with some of the other options.",
}),
"s_noise": ("FLOAT", {
"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01,
"tooltip": "Scale factor for ancestral noise added during sampling. Generally should be left at 1.0 and only has an effect when ancestral sampling is used.",
}),
"distance_step_eta": ("FLOAT", {
"default": 0.0, "min": 0.0, "max": 32.0, "step": 0.01,
"tooltip": "Experimental option that allows using ancestral sampling for the internal distance steps. When used, should generally be a fairly low value such as 0.25. 0.0 means to use non-ancestral sampling for the internal distance steps.",
}),
"distance_step_s_noise": ("FLOAT", {
"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01,
"tooltip": "Scale factor for ancestral noise added in the internal distance steps. Generally should be left at 1.0 and only has an effect when distance_step_eta is non-zero.",
}),
"use_softmax": ("BOOLEAN", {
"default": False,
"tooltip": "Rather than using a min/max normalization and an exponent will use a softmax instead.",
}),
"use_slerp": ("BOOLEAN", {
"default": False,
"tooltip": "Will SLERP the predictions instead of doing a weighted average. The difference is more obvious when using use_negative.",
}),
"perp_step": ("BOOLEAN", {
"default": False,
"tooltip": "Experimental, not yet recommended.",
}),
"use_negative": ("BOOLEAN", {
"default": False,
"tooltip": "Will use the negative prediction to prepare the distance scores. This tends to give images with less errors from my testing.",
}),
"smooth": ("BOOLEAN", {
"default": False,
"tooltip": "Not recommended, will make everything brighter. Not smoother.",
}),
"sharpen": ("BOOLEAN", {
"default": False,
"tooltip": "Not recommended, attempts to sharpen the results but instead tends to make things fuzzy.",
}),
"distance_first": ("INT", {
"default": 0, "min": -10000, "max": 10000, "step": 1,
"tooltip": "First step to use distance sampling. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
"distance_last": ("INT", {
"default": -1, "min": -10000, "max": 10000, "step": 1,
"tooltip": "Last step to use distance sampling. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
"eta_first": ("INT", {
"default": 0, "min": -10000, "max": 10000, "step": 1,
"tooltip": "First step to use ancestral sampling. Only applies when ETA is non-zero. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
"eta_last": ("INT", {
"default": -1, "min": -10000, "max": 10000, "step": 1,
"tooltip": "Last step to use ancestral sampling. Only applies when ETA is non-zero. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
"distance_eta_first": ("INT", {
"default": 0, "min": -10000, "max": 10000, "step": 1,
"tooltip": "First step to use ancestral sampling for the distance steps. Only applies when distance ETA is non-zero. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
"distance_eta_last": ("INT", {
"default": -1, "min": -10000, "max": 10000, "step": 1,
"tooltip": "Last step to use ancestral sampling for the distance steps. Only applies when distance ETA is non-zero. You can use negative values to count from the end. Note: Steps are zero-based.",
}),
}

@classmethod
def INPUT_TYPES(s):
return {"required": {"resample": ("INT", {"default": 3, "min": -1, "max": 32, "step": 1,
"tooltip":"0 all along gives Euler. 1 gives Heun.\nAnything starting from 2 will use the distance method.\n-1 will do remaining steps + 1 as the resample value. This can be pretty slow."}),
"resample_end": ("INT", {"default": -1, "min": -1, "max": 32, "step": 1, "tooltip":"How many resamples for the end. -1 means constant."}),
"cfgpp" : ("BOOLEAN", {"default": True}),
}}
if s._DISTANCE_OPTIONS is None:
return {"required": s._DISTANCE_PARAMS.copy()}
return {"required": {k: s._DISTANCE_PARAMS[k] for k in s._DISTANCE_OPTIONS}}

RETURN_TYPES = ("SAMPLER",)
CATEGORY = "sampling/custom_sampling/samplers"
FUNCTION = "get_sampler"

def get_sampler(self,resample,resample_end,cfgpp):
sampler = comfy.samplers.KSAMPLER(
distance_wrap(resample=resample,cfgpp=cfgpp,resample_end=resample_end))
def get_sampler(self, **kwargs):
sampler = comfy.samplers.KSAMPLER(distance_wrap(**kwargs))
return (sampler, )

class SamplerDistance(SamplerDistanceBase):
_DISTANCE_OPTIONS = ("resample", "resample_end", "cfgpp")

class SamplerDistanceAdvanced(SamplerDistanceBase):
pass # Includes all options by default.