Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled

Includes 30 custom nodes committed directly, 7 Civitai-exclusive
loras stored via Git LFS, and a setup script that installs all
dependencies and downloads HuggingFace-hosted models on vast.ai.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-09 00:55:26 +00:00
parent 2b70ab9ad0
commit f09734b0ee
2274 changed files with 748556 additions and 3 deletions

View File

@@ -0,0 +1,460 @@
import comfy
import torch
from .libs import utils
from einops import rearrange
import random
import math
from .libs import common
import logging
supported_noise_modes = ["GPU(=A1111)", "CPU", "GPU+internal_seed", "CPU+internal_seed"]
class Inspire_RandomNoise:
def __init__(self, seed, mode, incremental_seed_mode, variation_seed, variation_strength, variation_method="linear", internal_seed=None):
device = comfy.model_management.get_torch_device()
# HOTFIX: https://github.com/comfyanonymous/ComfyUI/commit/916d1e14a93ef331adef7c0deff2fdcf443b05cf#commitcomment-151914788
# seed value should be different with generated noise
self.seed = internal_seed
self.noise_seed = seed
self.noise_device = "cpu" if mode == "CPU" else device
self.incremental_seed_mode = incremental_seed_mode
self.variation_seed = variation_seed
self.variation_strength = variation_strength
self.variation_method = variation_method
def generate_noise(self, input_latent):
latent_image = input_latent["samples"]
batch_inds = input_latent["batch_index"] if "batch_index" in input_latent else None
noise = utils.prepare_noise(latent_image, self.noise_seed, batch_inds, self.noise_device, self.incremental_seed_mode,
variation_seed=self.variation_seed, variation_strength=self.variation_strength, variation_method=self.variation_method)
return noise.cpu()
class RandomNoise:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed for the initial noise applied to the latent."}),
"noise_mode": (["GPU(=A1111)", "CPU"],),
"batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],),
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
},
"optional":
{
"variation_method": (["linear", "slerp"],),
"internal_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed used for generating noise in intermediate steps when using ancestral and SDE-based samplers.\nNOTE: If `noise_mode` is in GPU mode and `internal_seed` is the same as `seed`, the generated image may be distorted."}),
}
}
RETURN_TYPES = ("NOISE",)
FUNCTION = "get_noise"
CATEGORY = "InspirePack/a1111_compat"
def get_noise(self, noise_seed, noise_mode, batch_seed_mode, variation_seed, variation_strength, variation_method="linear", internal_seed=None):
if internal_seed is None:
internal_seed = noise_seed
return (Inspire_RandomNoise(noise_seed, noise_mode, batch_seed_mode, variation_seed, variation_strength, variation_method=variation_method, internal_seed=internal_seed),)
def inspire_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0,
noise_mode="CPU", disable_noise=False, start_step=None, last_step=None, force_full_denoise=False,
incremental_seed_mode="comfy", variation_seed=None, variation_strength=None, noise=None, callback=None, variation_method="linear",
scheduler_func=None, internal_seed=None):
device = comfy.model_management.get_torch_device()
noise_device = "cpu" if 'cpu' in noise_mode.lower() else device
latent_image = latent["samples"]
if hasattr(comfy.sample, 'fix_empty_latent_channels'):
latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image)
latent = latent.copy()
if noise is not None and latent_image.shape[1] != noise.shape[1]:
logging.info("[Inspire Pack] inspire_ksampler: The type of latent input for noise generation does not match the model's latent type. When using the SD3 model, you must use the SD3 Empty Latent.")
raise Exception("The type of latent input for noise generation does not match the model's latent type. When using the SD3 model, you must use the SD3 Empty Latent.")
if noise is None:
if disable_noise:
torch.manual_seed(seed)
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device=noise_device)
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = utils.prepare_noise(latent_image, seed, batch_inds, noise_device, incremental_seed_mode,
variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method)
if start_step is None:
if denoise == 1.0:
start_step = 0
else:
advanced_steps = math.floor(steps / denoise)
start_step = advanced_steps - steps
steps = advanced_steps
if internal_seed is None:
internal_seed = seed
if 'internal_seed' in noise_mode:
seed = internal_seed
try:
samples = common.impact_sampling(
model=model, add_noise=not disable_noise, seed=seed, steps=steps, cfg=cfg, sampler_name=sampler_name, scheduler=scheduler, positive=positive, negative=negative,
latent_image=latent, start_at_step=start_step, end_at_step=last_step, return_with_leftover_noise=not force_full_denoise, noise=noise, callback=callback,
scheduler_func=scheduler_func)
except Exception as e:
if "unexpected keyword argument 'scheduler_func'" in str(e):
logging.info("[Inspire Pack] Impact Pack is outdated. (Cannot use GITS scheduler.)")
samples = common.impact_sampling(
model=model, add_noise=not disable_noise, seed=seed, steps=steps, cfg=cfg, sampler_name=sampler_name, scheduler=scheduler, positive=positive, negative=negative,
latent_image=latent, start_at_step=start_step, end_at_step=last_step, return_with_leftover_noise=not force_full_denoise, noise=noise, callback=callback)
else:
raise e
return samples, noise
class KSampler_inspire:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed for the initial noise applied to the latent."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"noise_mode": (supported_noise_modes,),
"batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],),
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
},
"optional":
{
"variation_method": (["linear", "slerp"],),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
"internal_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed used for generating noise in intermediate steps when using ancestral and SDE-based samplers.\nNOTE: If `noise_mode` is in GPU mode and `internal_seed` is the same as `seed`, the generated image may be distorted."}),
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "doit"
CATEGORY = "InspirePack/a1111_compat"
@staticmethod
def doit(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode,
batch_seed_mode="comfy", variation_seed=None, variation_strength=None, variation_method="linear", scheduler_func_opt=None,
internal_seed=None):
return (inspire_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode,
incremental_seed_mode=batch_seed_mode, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method,
scheduler_func=scheduler_func_opt, internal_seed=internal_seed)[0], )
class KSamplerAdvanced_inspire:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"model": ("MODEL",),
"add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed for the initial noise applied to the latent."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
"noise_mode": (supported_noise_modes,),
"return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
"batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],),
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
},
"optional":
{
"variation_method": (["linear", "slerp"],),
"noise_opt": ("NOISE_IMAGE",),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
"internal_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed used for generating noise in intermediate steps when using ancestral and SDE-based samplers.\nNOTE: If `noise_mode` is in GPU mode and `internal_seed` is the same as `seed`, the generated image may be distorted."}),
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "doit"
CATEGORY = "InspirePack/a1111_compat"
@staticmethod
def sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, noise_mode, return_with_leftover_noise,
denoise=1.0, batch_seed_mode="comfy", variation_seed=None, variation_strength=None, noise_opt=None, callback=None, variation_method="linear", scheduler_func_opt=None, internal_seed=None):
force_full_denoise = True
if return_with_leftover_noise:
force_full_denoise = False
disable_noise = False
if not add_noise:
disable_noise = True
return inspire_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step,
force_full_denoise=force_full_denoise, noise_mode=noise_mode, incremental_seed_mode=batch_seed_mode,
variation_seed=variation_seed, variation_strength=variation_strength, noise=noise_opt, callback=callback, variation_method=variation_method,
scheduler_func=scheduler_func_opt, internal_seed=internal_seed)
def doit(self, *args, **kwargs):
return (self.sample(*args, **kwargs)[0],)
class KSampler_inspire_pipe:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"basic_pipe": ("BASIC_PIPE",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed for the initial noise applied to the latent."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"latent_image": ("LATENT", ),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"noise_mode": (supported_noise_modes,),
"batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],),
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
},
"optional":
{
"scheduler_func_opt": ("SCHEDULER_FUNC",),
"internal_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed used for generating noise in intermediate steps when using ancestral and SDE-based samplers.\nNOTE: If `noise_mode` is in GPU mode and `internal_seed` is the same as `seed`, the generated image may be distorted."}),
}
}
RETURN_TYPES = ("LATENT", "VAE")
FUNCTION = "sample"
CATEGORY = "InspirePack/a1111_compat"
def sample(self, basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise, noise_mode, batch_seed_mode="comfy",
variation_seed=None, variation_strength=None, scheduler_func_opt=None, internal_seed=None):
model, clip, vae, positive, negative = basic_pipe
latent = inspire_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode, incremental_seed_mode=batch_seed_mode,
variation_seed=variation_seed, variation_strength=variation_strength, scheduler_func=scheduler_func_opt, internal_seed=internal_seed)[0]
return latent, vae
class KSamplerAdvanced_inspire_pipe:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"basic_pipe": ("BASIC_PIPE",),
"add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed for the initial noise applied to the latent."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"latent_image": ("LATENT", ),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
"noise_mode": (supported_noise_modes,),
"return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
"batch_seed_mode": (["incremental", "comfy", "variation str inc:0.01", "variation str inc:0.05"],),
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
},
"optional":
{
"noise_opt": ("NOISE_IMAGE",),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
"internal_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "This is the seed used for generating noise in intermediate steps when using ancestral and SDE-based samplers.\nNOTE: If `noise_mode` is in GPU mode and `internal_seed` is the same as `seed`, the generated image may be distorted."}),
}
}
RETURN_TYPES = ("LATENT", "VAE", )
FUNCTION = "sample"
CATEGORY = "InspirePack/a1111_compat"
def sample(self, basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, noise_mode, return_with_leftover_noise,
denoise=1.0, batch_seed_mode="comfy", variation_seed=None, variation_strength=None, noise_opt=None, scheduler_func_opt=None, internal_seed=None):
model, clip, vae, positive, negative = basic_pipe
latent = KSamplerAdvanced_inspire().sample(model=model, add_noise=add_noise, noise_seed=noise_seed,
steps=steps, cfg=cfg, sampler_name=sampler_name, scheduler=scheduler,
positive=positive, negative=negative, latent_image=latent_image,
start_at_step=start_at_step, end_at_step=end_at_step,
noise_mode=noise_mode, return_with_leftover_noise=return_with_leftover_noise,
denoise=denoise, batch_seed_mode=batch_seed_mode, variation_seed=variation_seed,
variation_strength=variation_strength, noise_opt=noise_opt, scheduler_func_opt=scheduler_func_opt,
internal_seed=internal_seed)[0]
return latent, vae
# Modified version of ComfyUI main code
# https://github.com/comfyanonymous/ComfyUI/blob/master/comfy_extras/nodes_hypertile.py
def get_closest_divisors(hw: int, aspect_ratio: float) -> tuple[int, int]:
pairs = [(i, hw // i) for i in range(int(math.sqrt(hw)), 1, -1) if hw % i == 0]
pair = min(((i, hw // i) for i in range(2, hw + 1) if hw % i == 0),
key=lambda x: abs(x[1] / x[0] - aspect_ratio))
pairs.append(pair)
res = min(pairs, key=lambda x: max(x) / min(x))
return res
def calc_optimal_hw(hw: int, aspect_ratio: float) -> tuple[int, int]:
hcand = round(math.sqrt(hw * aspect_ratio))
wcand = hw // hcand
if hcand * wcand != hw:
wcand = round(math.sqrt(hw / aspect_ratio))
hcand = hw // wcand
if hcand * wcand != hw:
return get_closest_divisors(hw, aspect_ratio)
return hcand, wcand
def random_divisor(value: int, min_value: int, /, max_options: int = 1, rand_obj=random.Random()) -> int:
# print(f"value={value}, min_value={min_value}, max_options={max_options}")
min_value = min(min_value, value)
# All big divisors of value (inclusive)
divisors = [i for i in range(min_value, value + 1) if value % i == 0]
ns = [value // i for i in divisors[:max_options]] # has at least 1 element
if len(ns) - 1 > 0:
idx = rand_obj.randint(0, len(ns) - 1)
else:
idx = 0
# print(f"ns={ns}, idx={idx}")
return ns[idx]
# def get_divisors(value: int, min_value: int, /, max_options: int = 1) -> list[int]:
# """
# Returns divisors of value that
# x * min_value <= value
# in big -> small order, amount of divisors is limited by max_options
# """
# max_options = max(1, max_options) # at least 1 option should be returned
# min_value = min(min_value, value)
# divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order
# ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order
# return ns
# def random_divisor(value: int, min_value: int, /, max_options: int = 1, rand_obj=None) -> int:
# """
# Returns a random divisor of value that
# x * min_value <= value
# if max_options is 1, the behavior is deterministic
# """
# print(f"value={value}, min_value={min_value}, max_options={max_options}")
# ns = get_divisors(value, min_value, max_options=max_options) # get cached divisors
# idx = rand_obj.randint(0, len(ns) - 1)
# print(f"ns={ns}, idx={idx}")
#
# return ns[idx]
class HyperTileInspire:
@classmethod
def INPUT_TYPES(s):
return {"required": {"model": ("MODEL",),
"tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
"swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
"max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
"scale_depth": ("BOOLEAN", {"default": False}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "InspirePack/__for_testing"
def patch(self, model, tile_size, swap_size, max_depth, scale_depth, seed):
latent_tile_size = max(32, tile_size) // 8
temp = None
rand_obj = random.Random()
rand_obj.seed(seed)
def hypertile_in(q, k, v, extra_options):
nonlocal temp
model_chans = q.shape[-2]
orig_shape = extra_options['original_shape']
apply_to = []
for i in range(max_depth + 1):
apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
if model_chans in apply_to:
shape = extra_options["original_shape"]
aspect_ratio = shape[-1] / shape[-2]
hw = q.size(1)
# h, w = calc_optimal_hw(hw, aspect_ratio)
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
nh = random_divisor(h, latent_tile_size * factor, swap_size, rand_obj)
nw = random_divisor(w, latent_tile_size * factor, swap_size, rand_obj)
logging.debug(f"factor: {factor} <--- params.depth: {apply_to.index(model_chans)} / scale_depth: {scale_depth} / latent_tile_size={latent_tile_size}")
# print(f"h: {h}, w:{w} --> nh: {nh}, nw: {nw}")
if nh * nw > 1:
q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
temp = (nh, nw, h, w)
# else:
# temp = None
logging.debug(f"q={q} / k={k} / v={v}")
return q, k, v
return q, k, v
def hypertile_out(out, extra_options):
nonlocal temp
if temp is not None:
nh, nw, h, w = temp
temp = None
out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
return out
m = model.clone()
m.set_model_attn1_patch(hypertile_in)
m.set_model_attn1_output_patch(hypertile_out)
return (m, )
NODE_CLASS_MAPPINGS = {
"KSampler //Inspire": KSampler_inspire,
"KSamplerAdvanced //Inspire": KSamplerAdvanced_inspire,
"KSamplerPipe //Inspire": KSampler_inspire_pipe,
"KSamplerAdvancedPipe //Inspire": KSamplerAdvanced_inspire_pipe,
"RandomNoise //Inspire": RandomNoise,
"HyperTile //Inspire": HyperTileInspire
}
NODE_DISPLAY_NAME_MAPPINGS = {
"KSampler //Inspire": "KSampler (inspire)",
"KSamplerAdvanced //Inspire": "KSamplerAdvanced (inspire)",
"KSamplerPipe //Inspire": "KSampler [pipe] (inspire)",
"KSamplerAdvancedPipe //Inspire": "KSamplerAdvanced [pipe] (inspire)",
"RandomNoise //Inspire": "RandomNoise (inspire)",
"HyperTile //Inspire": "HyperTile (Inspire)"
}

View File

@@ -0,0 +1,764 @@
import json
import os
from .libs import common
import folder_paths
import nodes
from server import PromptServer
from .libs.utils import TaggedCache, any_typ
import logging
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
settings_file = os.path.join(root_dir, 'cache_settings.json')
try:
with open(settings_file) as f:
cache_settings = json.load(f)
except Exception as e:
logging.error(e)
cache_settings = {}
cache = TaggedCache(cache_settings)
cache_count = {}
def update_cache(k, tag, v):
cache[k] = (tag, v)
cnt = cache_count.get(k)
if cnt is None:
cnt = 0
cache_count[k] = cnt
else:
cache_count[k] += 1
def cache_weak_hash(k):
cnt = cache_count.get(k)
if cnt is None:
cnt = 0
return k, cnt
class CacheBackendData:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}),
"tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}),
"data": (any_typ,),
}
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("data opt",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
@staticmethod
def doit(key, tag, data):
global cache
if key == '*':
logging.warning("[Inspire Pack] CacheBackendData: '*' is reserved key. Cannot use that key")
return (None,)
update_cache(key, tag, (False, data))
return (data,)
class CacheBackendDataNumberKey:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}),
"data": (any_typ,),
}
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("data opt",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
@staticmethod
def doit(key, tag, data):
global cache
update_cache(key, tag, (False, data))
return (data,)
class CacheBackendDataList:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}),
"tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}),
"data": (any_typ,),
}
}
INPUT_IS_LIST = True
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("data opt",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
@staticmethod
def doit(key, tag, data):
global cache
if key == '*':
logging.warning("[Inspire Pack] CacheBackendDataList: '*' is reserved key. Cannot use that key")
return (None,)
update_cache(key[0], tag[0], (True, data))
return (data,)
class CacheBackendDataNumberKeyList:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"tag": ("STRING", {"multiline": False, "placeholder": "Tag: short description"}),
"data": (any_typ,),
}
}
INPUT_IS_LIST = True
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("data opt",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
def doit(self, key, tag, data):
global cache
update_cache(key[0], tag[0], (True, data))
return (data,)
class RetrieveBackendData:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("STRING", {"multiline": False, "placeholder": "Input data key (e.g. 'model a', 'chunli lora', 'girl latent 3', ...)"}),
}
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("data",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
@staticmethod
def doit(key):
global cache
v = cache.get(key)
if v is None:
logging.warning(f"[RetrieveBackendData] '{key}' is unregistered key.")
return ([None],)
is_list, data = v[1]
if is_list:
return (data,)
else:
return ([data],)
@staticmethod
def IS_CHANGED(key):
return cache_weak_hash(key)
class RetrieveBackendDataNumberKey(RetrieveBackendData):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
}
}
class RemoveBackendData:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("STRING", {"multiline": False, "placeholder": "Input data key ('*' = clear all)"}),
},
"optional": {
"signal_opt": (any_typ,),
}
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("signal",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
@staticmethod
def doit(key, signal_opt=None):
global cache
if key == '*':
cache = TaggedCache(cache_settings)
elif key in cache:
del cache[key]
else:
logging.warning(f"[Inspire Pack] RemoveBackendData: invalid data key {key}")
return (signal_opt,)
class RemoveBackendDataNumberKey(RemoveBackendData):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
},
"optional": {
"signal_opt": (any_typ,),
}
}
@staticmethod
def doit(key, signal_opt=None):
global cache
if key in cache:
del cache[key]
else:
logging.warning(f"[Inspire Pack] RemoveBackendDataNumberKey: invalid data key {key}")
return (signal_opt,)
class ShowCachedInfo:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"cache_info": ("STRING", {"multiline": True, "default": ""}),
"key": ("STRING", {"multiline": False, "default": ""}),
},
"hidden": {"unique_id": "UNIQUE_ID"},
}
RETURN_TYPES = ()
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
OUTPUT_NODE = True
@staticmethod
def get_data():
global cache
text1 = "---- [String Key Caches] ----\n"
text2 = "---- [Number Key Caches] ----\n"
for k, v in cache.items():
tag = 'N/A(tag)' if v[0] == '' else v[0]
if isinstance(k, str):
text1 += f'{k}: {tag}\n'
else:
text2 += f'{k}: {tag}\n'
text3 = "---- [TagCache Settings] ----\n"
for k, v in cache._tag_settings.items():
text3 += f'{k}: {v}\n'
for k, v in cache._data.items():
if k not in cache._tag_settings:
text3 += f'{k}: {v.maxsize}\n'
return f'{text1}\n{text2}\n{text3}'
@staticmethod
def set_cache_settings(data: str):
global cache
settings = data.split("---- [TagCache Settings] ----\n")[-1].strip().split("\n")
new_tag_settings = {}
for s in settings:
k, v = s.split(":")
new_tag_settings[k] = int(v.strip())
if new_tag_settings == cache._tag_settings:
# tag settings is not changed
return
new_cache = TaggedCache(new_tag_settings)
for k, v in cache.items():
new_cache[k] = v
cache = new_cache
def doit(self, cache_info, key, unique_id):
text = ShowCachedInfo.get_data()
PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": unique_id, "widget_name": "cache_info", "type": "text", "data": text})
return {}
@classmethod
def IS_CHANGED(cls, **kwargs):
return float("NaN")
class CheckpointLoaderSimpleShared(nodes.CheckpointLoaderSimple):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
"key_opt": ("STRING", {"multiline": False, "placeholder": "If empty, use 'ckpt_name' as the key."}),
},
"optional": {
"mode": (['Auto', 'Override Cache', 'Read Only'],),
}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "STRING")
RETURN_NAMES = ("model", "clip", "vae", "cache key")
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
def doit(self, ckpt_name, key_opt, mode='Auto'):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[CheckpointLoaderSimpleShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = ckpt_name
else:
key = key_opt.strip()
if key not in cache or mode == 'Override Cache':
res = self.load_checkpoint(ckpt_name)
update_cache(key, "ckpt", (False, res))
cache_kind = 'ckpt'
logging.info(f"[Inspire Pack] CheckpointLoaderSimpleShared: Ckpt '{ckpt_name}' is cached to '{key}'.")
else:
cache_kind, (_, res) = cache[key]
logging.info(f"[Inspire Pack] CheckpointLoaderSimpleShared: Cached ckpt '{key}' is loaded. (Loading skip)")
if cache_kind == 'ckpt':
model, clip, vae = res
elif cache_kind == 'unclip_ckpt':
model, clip, vae, _ = res
else:
raise Exception(f"[CheckpointLoaderSimpleShared] Unexpected cache_kind '{cache_kind}'")
return model, clip, vae, key
@staticmethod
def IS_CHANGED(ckpt_name, key_opt, mode='Auto'):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[CheckpointLoaderSimpleShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = ckpt_name
else:
key = key_opt.strip()
if mode == 'Read Only':
return (None, cache_weak_hash(key))
elif mode == 'Override Cache':
return (ckpt_name, key)
return (None, cache_weak_hash(key))
class LoadDiffusionModelShared(nodes.UNETLoader):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model_name": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "Diffusion Model Name"}),
"weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2"],),
"key_opt": ("STRING", {"multiline": False, "placeholder": "If empty, use 'model_name' as the key."}),
"mode": (['Auto', 'Override Cache', 'Read Only'],),
}
}
RETURN_TYPES = ("MODEL", "STRING")
RETURN_NAMES = ("model", "cache key")
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
def doit(self, model_name, weight_dtype, key_opt, mode='Auto'):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[LoadDiffusionModelShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = f"{model_name}_{weight_dtype}"
else:
key = key_opt.strip()
if key not in cache or mode == 'Override Cache':
model = self.load_unet(model_name, weight_dtype)[0]
update_cache(key, "diffusion", (False, model))
logging.info(f"[Inspire Pack] LoadDiffusionModelShared: diffusion model '{model_name}' is cached to '{key}'.")
else:
_, (_, model) = cache[key]
logging.info(f"[Inspire Pack] LoadDiffusionModelShared: Cached diffusion model '{key}' is loaded. (Loading skip)")
return model, key
@staticmethod
def IS_CHANGED(model_name, weight_dtype, key_opt, mode='Auto'):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[LoadDiffusionModelShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = f"{model_name}_{weight_dtype}"
else:
key = key_opt.strip()
if mode == 'Read Only':
return None, cache_weak_hash(key)
elif mode == 'Override Cache':
return model_name, key
return None, cache_weak_hash(key)
class LoadTextEncoderShared:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model_name1": (folder_paths.get_filename_list("text_encoders"), ),
"model_name2": (["None"] + folder_paths.get_filename_list("text_encoders"), ),
"model_name3": (["None"] + folder_paths.get_filename_list("text_encoders"), ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "sdxl", "flux", "hunyuan_video"], ),
"key_opt": ("STRING", {"multiline": False, "placeholder": "If empty, use 'model_name' as the key."}),
"mode": (['Auto', 'Override Cache', 'Read Only'],),
},
"optional": { "device": (["default", "cpu"], {"advanced": True}), }
}
RETURN_TYPES = ("CLIP", "STRING")
RETURN_NAMES = ("clip", "cache key")
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
DESCRIPTION = \
("[Recipes single]\n"
"stable_diffusion: clip-l\n"
"stable_cascade: clip-g\n"
"sd3: t5 / clip-g / clip-l\n"
"stable_audio: t5\n"
"mochi: t5\n"
"cosmos: old t5 xxl\n\n"
"[Recipes dual]\n"
"sdxl: clip-l, clip-g\n"
"sd3: clip-l, clip-g / clip-l, t5 / clip-g, t5\n"
"flux: clip-l, t5\n\n"
"[Recipes triple]\n"
"sd3: clip-l, clip-g, t5")
def doit(self, model_name1, model_name2, model_name3, type, key_opt, mode='Auto', device="default"):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[LoadTextEncoderShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = model_name1
if model_name2 is not None:
key += f"_{model_name2}"
if model_name3 is not None:
key += f"_{model_name3}"
key += f"_{type}_{device}"
else:
key = key_opt.strip()
if key not in cache or mode == 'Override Cache':
if model_name2 != "None" and model_name3 != "None": # triple text encoder
if len({model_name1, model_name2, model_name3}) < 3:
logging.error("[LoadTextEncoderShared] The same model has been selected multiple times.")
raise ValueError("The same model has been selected multiple times.")
if type not in ["sd3"]:
logging.error("[LoadTextEncoderShared] Currently, the triple text encoder is only supported in `sd3`.")
raise ValueError("Currently, the triple text encoder is only supported in `sd3`.")
tcloader = nodes.NODE_CLASS_MAPPINGS["TripleCLIPLoader"]()
if hasattr(tcloader, 'execute'):
# node v3
res = tcloader.execute(model_name1, model_name2, model_name3)[0]
else:
# legacy compatibility
res = tcloader.load_clip(model_name1, model_name2, model_name3)[0]
elif model_name2 != "None" or model_name3 != "None": # dual text encoder
second_model = model_name2 if model_name2 != "None" else model_name3
if model_name1 == second_model:
logging.error("[LoadTextEncoderShared] You have selected the same model for both.")
raise ValueError("[LoadTextEncoderShared] You have selected the same model for both.")
if type not in ["sdxl", "sd3", "flux", "hunyuan_video"]:
logging.error("[LoadTextEncoderShared] Currently, the triple text encoder is only supported in `sdxl, sd3, flux, hunyuan_video`.")
raise ValueError("Currently, the triple text encoder is only supported in `sdxl, sd3, flux, hunyuan_video`.")
res = nodes.NODE_CLASS_MAPPINGS["DualCLIPLoader"]().load_clip(model_name1, second_model, type=type, device=device)[0]
else: # single text encoder
if type not in ["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos"]:
logging.error("[LoadTextEncoderShared] Currently, the single text encoder is only supported in `stable_diffusion, stable_cascade, sd3, stable_audio, mochi, ltxv, pixart, cosmos`.")
raise ValueError("Currently, the single text encoder is only supported in `stable_diffusion, stable_cascade, sd3, stable_audio, mochi, ltxv, pixart, cosmos`.")
res = nodes.NODE_CLASS_MAPPINGS["CLIPLoader"]().load_clip(model_name1, type=type, device=device)[0]
update_cache(key, "diffusion", (False, res))
logging.info(f"[Inspire Pack] LoadTextEncoderShared: text encoder model set is cached to '{key}'.")
else:
_, (_, res) = cache[key]
logging.info(f"[Inspire Pack] LoadTextEncoderShared: Cached text encoder model set '{key}' is loaded. (Loading skip)")
return res, key
@staticmethod
def IS_CHANGED(model_name1, model_name2, model_name3, type, key_opt, mode='Auto', device="default"):
if mode == 'Read Only':
if key_opt.strip() == '':
raise Exception("[LoadTextEncoderShared] key_opt cannot be omit if mode is 'Read Only'")
key = key_opt.strip()
elif key_opt.strip() == '':
key = model_name1
if model_name2 is not None:
key += f"_{model_name2}"
if model_name3 is not None:
key += f"_{model_name3}"
key += f"_{type}_{device}"
else:
key = key_opt.strip()
if mode == 'Read Only':
return None, cache_weak_hash(key)
elif mode == 'Override Cache':
return f"{model_name1}_{model_name2}_{model_name3}_{type}_{device}", key
return None, cache_weak_hash(key)
class StableCascade_CheckpointLoader:
@classmethod
def INPUT_TYPES(s):
ckpts = folder_paths.get_filename_list("checkpoints")
default_stage_b = ''
default_stage_c = ''
sc_ckpts = [x for x in ckpts if 'cascade' in x.lower()]
sc_b_ckpts = [x for x in sc_ckpts if 'stage_b' in x.lower()]
sc_c_ckpts = [x for x in sc_ckpts if 'stage_c' in x.lower()]
if len(sc_b_ckpts) == 0:
sc_b_ckpts = [x for x in ckpts if 'stage_b' in x.lower()]
if len(sc_c_ckpts) == 0:
sc_c_ckpts = [x for x in ckpts if 'stage_c' in x.lower()]
if len(sc_b_ckpts) == 0:
sc_b_ckpts = ckpts
if len(sc_c_ckpts) == 0:
sc_c_ckpts = ckpts
if len(sc_b_ckpts) > 0:
default_stage_b = sc_b_ckpts[0]
if len(sc_c_ckpts) > 0:
default_stage_c = sc_c_ckpts[0]
return {"required": {
"stage_b": (ckpts, {'default': default_stage_b}),
"key_opt_b": ("STRING", {"multiline": False, "placeholder": "If empty, use 'stage_b' as the key."}),
"stage_c": (ckpts, {'default': default_stage_c}),
"key_opt_c": ("STRING", {"multiline": False, "placeholder": "If empty, use 'stage_c' as the key."}),
"cache_mode": (["none", "stage_b", "stage_c", "all"], {"default": "none"}),
}}
RETURN_TYPES = ("MODEL", "VAE", "MODEL", "VAE", "CLIP_VISION", "CLIP", "STRING", "STRING")
RETURN_NAMES = ("b_model", "b_vae", "c_model", "c_vae", "c_clip_vision", "clip", "key_b", "key_c")
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
def doit(self, stage_b, key_opt_b, stage_c, key_opt_c, cache_mode):
if key_opt_b.strip() == '':
key_b = stage_b
else:
key_b = key_opt_b.strip()
if key_opt_c.strip() == '':
key_c = stage_c
else:
key_c = key_opt_c.strip()
if cache_mode in ['stage_b', "all"]:
if key_b not in cache:
res_b = nodes.CheckpointLoaderSimple().load_checkpoint(ckpt_name=stage_b)
update_cache(key_b, "ckpt", (False, res_b))
logging.info(f"[Inspire Pack] StableCascade_CheckpointLoader: Ckpt '{stage_b}' is cached to '{key_b}'.")
else:
_, (_, res_b) = cache[key_b]
logging.info(f"[Inspire Pack] StableCascade_CheckpointLoader: Cached ckpt '{key_b}' is loaded. (Loading skip)")
b_model, clip, b_vae = res_b
else:
b_model, clip, b_vae = nodes.CheckpointLoaderSimple().load_checkpoint(ckpt_name=stage_b)
if cache_mode in ['stage_c', "all"]:
if key_c not in cache:
res_c = nodes.unCLIPCheckpointLoader().load_checkpoint(ckpt_name=stage_c)
update_cache(key_c, "unclip_ckpt", (False, res_c))
logging.info(f"[Inspire Pack] StableCascade_CheckpointLoader: Ckpt '{stage_c}' is cached to '{key_c}'.")
else:
_, (_, res_c) = cache[key_c]
logging.info(f"[Inspire Pack] StableCascade_CheckpointLoader: Cached ckpt '{key_c}' is loaded. (Loading skip)")
c_model, _, c_vae, clip_vision = res_c
else:
c_model, _, c_vae, clip_vision = nodes.unCLIPCheckpointLoader().load_checkpoint(ckpt_name=stage_c)
return b_model, b_vae, c_model, c_vae, clip_vision, clip, key_b, key_c
class IsCached:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"key": ("STRING", {"multiline": False}),
},
"hidden": {
"unique_id": "UNIQUE_ID"
}
}
RETURN_TYPES = ("BOOLEAN", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
@staticmethod
def IS_CHANGED(key, unique_id):
return common.is_changed(unique_id, key in cache)
def doit(self, key, unique_id):
return (key in cache,)
# WIP: not properly working, yet
class CacheBridge:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"value": (any_typ,),
"mode": ("BOOLEAN", {"default": True, "label_off": "cached", "label_on": "passthrough"}),
},
"hidden": {
"unique_id": "UNIQUE_ID"
}
}
RETURN_TYPES = (any_typ, )
RETURN_NAMES = ("value",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Backend"
@staticmethod
def IS_CHANGED(value, mode, unique_id):
if not mode and unique_id in common.changed_cache:
return common.not_changed_value(unique_id)
else:
return common.changed_value(unique_id)
def doit(self, value, mode, unique_id):
if not mode:
# cache mode
if unique_id not in common.changed_cache:
common.changed_cache[unique_id] = value
common.changed_count_cache[unique_id] = 0
return (common.changed_cache[unique_id],)
else:
common.changed_cache[unique_id] = value
common.changed_count_cache[unique_id] = 0
return (common.changed_cache[unique_id],)
NODE_CLASS_MAPPINGS = {
"CacheBackendData //Inspire": CacheBackendData,
"CacheBackendDataNumberKey //Inspire": CacheBackendDataNumberKey,
"CacheBackendDataList //Inspire": CacheBackendDataList,
"CacheBackendDataNumberKeyList //Inspire": CacheBackendDataNumberKeyList,
"RetrieveBackendData //Inspire": RetrieveBackendData,
"RetrieveBackendDataNumberKey //Inspire": RetrieveBackendDataNumberKey,
"RemoveBackendData //Inspire": RemoveBackendData,
"RemoveBackendDataNumberKey //Inspire": RemoveBackendDataNumberKey,
"ShowCachedInfo //Inspire": ShowCachedInfo,
"CheckpointLoaderSimpleShared //Inspire": CheckpointLoaderSimpleShared,
"LoadDiffusionModelShared //Inspire": LoadDiffusionModelShared,
"LoadTextEncoderShared //Inspire": LoadTextEncoderShared,
"StableCascade_CheckpointLoader //Inspire": StableCascade_CheckpointLoader,
"IsCached //Inspire": IsCached,
# "CacheBridge //Inspire": CacheBridge,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"CacheBackendData //Inspire": "Cache Backend Data (Inspire)",
"CacheBackendDataNumberKey //Inspire": "Cache Backend Data [NumberKey] (Inspire)",
"CacheBackendDataList //Inspire": "Cache Backend Data List (Inspire)",
"CacheBackendDataNumberKeyList //Inspire": "Cache Backend Data List [NumberKey] (Inspire)",
"RetrieveBackendData //Inspire": "Retrieve Backend Data (Inspire)",
"RetrieveBackendDataNumberKey //Inspire": "Retrieve Backend Data [NumberKey] (Inspire)",
"RemoveBackendData //Inspire": "Remove Backend Data (Inspire)",
"RemoveBackendDataNumberKey //Inspire": "Remove Backend Data [NumberKey] (Inspire)",
"ShowCachedInfo //Inspire": "Show Cached Info (Inspire)",
"CheckpointLoaderSimpleShared //Inspire": "Shared Checkpoint Loader (Inspire)",
"LoadDiffusionModelShared //Inspire": "Shared Diffusion Model Loader (Inspire)",
"LoadTextEncoderShared //Inspire": "Shared Text Encoder Loader (Inspire)",
"StableCascade_CheckpointLoader //Inspire": "Stable Cascade Checkpoint Loader (Inspire)",
"IsCached //Inspire": "Is Cached (Inspire)",
# "CacheBridge //Inspire": "Cache Bridge (Inspire)"
}

View File

@@ -0,0 +1,165 @@
import torch
import nodes
import inspect
from .libs import utils
from nodes import MAX_RESOLUTION
import logging
class ConcatConditioningsWithMultiplier:
@classmethod
def INPUT_TYPES(s):
stack = inspect.stack()
if stack[1].function == 'get_input_info':
# bypass validation
class AllContainer:
def __contains__(self, item):
return True
def __getitem__(self, key):
return "FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}
return {
"required": {"conditioning1": ("CONDITIONING",), },
"optional": AllContainer()
}
return {
"required": {"conditioning1": ("CONDITIONING",), },
"optional": {"multiplier1": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), },
}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "doit"
CATEGORY = "InspirePack/__for_testing"
def doit(self, **kwargs):
if "ConditioningMultiplier_PoP" in nodes.NODE_CLASS_MAPPINGS:
obj = nodes.NODE_CLASS_MAPPINGS["ConditioningMultiplier_PoP"]()
else:
utils.try_install_custom_node('https://github.com/picturesonpictures/comfy_PoP',
"To use 'ConcatConditioningsWithMultiplier' node, 'comfy_PoP' extension is required.")
raise Exception("'comfy_PoP' node isn't installed.")
conditioning_to = kwargs['conditioning1']
conditioning_to = obj.multiply_conditioning_strength(conditioning=conditioning_to, multiplier=float(kwargs['multiplier1']))[0]
out = None
for k, conditioning_from in kwargs.items():
if k == 'conditioning1' or not k.startswith('conditioning'):
continue
out = []
if len(conditioning_from) > 1:
logging.warning(f"[Inspire Pack] ConcatConditioningsWithMultiplier {k} contains more than 1 cond, only the first one will actually be applied to conditioning1.")
mkey = 'multiplier' + k[12:]
multiplier = float(kwargs[mkey])
conditioning_from = obj.multiply_conditioning_strength(conditioning=conditioning_from, multiplier=multiplier)[0]
cond_from = conditioning_from[0][0]
for i in range(len(conditioning_to)):
t1 = conditioning_to[i][0]
tw = torch.cat((t1, cond_from), 1)
n = [tw, conditioning_to[i][1].copy()]
out.append(n)
conditioning_to = out
if out is None:
return (kwargs['conditioning1'],)
else:
return (out,)
# CREDIT for ConditioningStretch, ConditioningUpscale: Davemane42
# Imported to support archived custom nodes.
# original code: https://github.com/Davemane42/ComfyUI_Dave_CustomNode/blob/main/MultiAreaConditioning.py
class ConditioningStretch:
def __init__(self) -> None:
pass
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"conditioning": ("CONDITIONING",),
"resolutionX": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
"resolutionY": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
"newWidth": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
"newHeight": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
# "scalar": ("INT", {"default": 2, "min": 1, "max": 100, "step": 0.5}),
},
}
RETURN_TYPES = ("CONDITIONING",)
CATEGORY = "InspirePack/conditioning"
FUNCTION = 'upscale'
@staticmethod
def upscale(conditioning, resolutionX, resolutionY, newWidth, newHeight, scalar=1):
c = []
for t in conditioning:
n = [t[0], t[1].copy()]
if 'area' in n[1]:
newWidth *= scalar
newHeight *= scalar
x = ((n[1]['area'][3] * 8) * newWidth / resolutionX) // 8
y = ((n[1]['area'][2] * 8) * newHeight / resolutionY) // 8
w = ((n[1]['area'][1] * 8) * newWidth / resolutionX) // 8
h = ((n[1]['area'][0] * 8) * newHeight / resolutionY) // 8
n[1]['area'] = tuple(map(lambda x: (((int(x) + 7) >> 3) << 3), [h, w, y, x]))
c.append(n)
return (c,)
class ConditioningUpscale:
def __init__(self) -> None:
pass
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"conditioning": ("CONDITIONING",),
"scalar": ("INT", {"default": 2, "min": 1, "max": 100, "step": 0.5}),
},
}
RETURN_TYPES = ("CONDITIONING",)
CATEGORY = "InspirePack/conditioning"
FUNCTION = 'upscale'
@staticmethod
def upscale(conditioning, scalar):
c = []
for t in conditioning:
n = [t[0], t[1].copy()]
if 'area' in n[1]:
n[1]['area'] = tuple(map(lambda x: ((x * scalar + 7) >> 3) << 3, n[1]['area']))
c.append(n)
return (c,)
NODE_CLASS_MAPPINGS = {
"ConcatConditioningsWithMultiplier //Inspire": ConcatConditioningsWithMultiplier,
"ConditioningUpscale //Inspire": ConditioningUpscale,
"ConditioningStretch //Inspire": ConditioningStretch,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ConcatConditioningsWithMultiplier //Inspire": "Concat Conditionings with Multiplier (Inspire)",
"ConditioningUpscale //Inspire": "Conditioning Upscale (Inspire)",
"ConditioningStretch //Inspire": "Conditioning Stretch (Inspire)",
}

View File

@@ -0,0 +1,511 @@
import os
import torch
from PIL import ImageOps
try:
import pillow_jxl # noqa: F401
jxl = True
except ImportError:
jxl = False
import comfy
import folder_paths
import base64
from io import BytesIO
from .libs.utils import ByPassTypeTuple, empty_pil_tensor, empty_latent
from PIL import Image
import numpy as np
import logging
import re
def extract_first_number(s):
match = re.search(r'\d+', s)
return int(match.group()) if match else float('inf')
sort_methods = [
"None",
"Alphabetical (ASC)",
"Alphabetical (DESC)",
"Numerical (ASC)",
"Numerical (DESC)",
"Datetime (ASC)",
"Datetime (DESC)"
]
def sort_by(items, base_path='.', method=None):
def fullpath(x): return os.path.join(base_path, x)
def get_timestamp(path):
try:
return os.path.getmtime(path)
except FileNotFoundError:
return float('-inf')
if method == "Alphabetical (ASC)":
return sorted(items)
elif method == "Alphabetical (DESC)":
return sorted(items, reverse=True)
elif method == "Numerical (ASC)":
return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]))
elif method == "Numerical (DESC)":
return sorted(items, key=lambda x: extract_first_number(os.path.splitext(x)[0]), reverse=True)
elif method == "Datetime (ASC)":
return sorted(items, key=lambda x: get_timestamp(fullpath(x)))
elif method == "Datetime (DESC)":
return sorted(items, key=lambda x: get_timestamp(fullpath(x)), reverse=True)
else:
return items
class LoadImagesFromDirBatch:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"directory": ("STRING", {"default": ""}),
},
"optional": {
"image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}),
"start_index": ("INT", {"default": 0, "min": -1, "max": 0xffffffffffffffff, "step": 1}),
"load_always": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}),
"sort_method": (sort_methods,),
}
}
RETURN_TYPES = ("IMAGE", "MASK", "INT")
FUNCTION = "load_images"
CATEGORY = "image"
@classmethod
def IS_CHANGED(cls, **kwargs):
if 'load_always' in kwargs and kwargs['load_always']:
return float("NaN")
else:
return hash(frozenset(kwargs))
def load_images(self, directory: str, image_load_cap: int = 0, start_index: int = 0, load_always=False, sort_method=None):
if not os.path.isdir(directory):
raise FileNotFoundError(f"Directory '{directory} cannot be found.'")
dir_files = os.listdir(directory)
if len(dir_files) == 0:
raise FileNotFoundError(f"No files in directory '{directory}'.")
# Filter files by extension
valid_extensions = ['.jpg', '.jpeg', '.png', '.webp']
if jxl:
valid_extensions.extend('.jxl')
dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)]
dir_files = sort_by(dir_files, directory, sort_method)
dir_files = [os.path.join(directory, x) for x in dir_files]
# start at start_index
dir_files = dir_files[start_index:]
images = []
masks = []
limit_images = False
if image_load_cap > 0:
limit_images = True
image_count = 0
has_non_empty_mask = False
for image_path in dir_files:
if os.path.isdir(image_path) and os.path.ex:
continue
if limit_images and image_count >= image_load_cap:
break
i = Image.open(image_path)
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
has_non_empty_mask = True
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask)
image_count += 1
if len(images) == 1:
return (images[0], masks[0], 1)
elif len(images) > 1:
image1 = images[0]
mask1 = None
for image2 in images[1:]:
if image1.shape[1:] != image2.shape[1:]:
image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1)
image1 = torch.cat((image1, image2), dim=0)
for mask2 in masks:
if has_non_empty_mask:
if image1.shape[1:3] != mask2.shape:
mask2 = torch.nn.functional.interpolate(mask2.unsqueeze(0).unsqueeze(0), size=(image1.shape[1], image1.shape[2]), mode='bilinear', align_corners=False)
mask2 = mask2.squeeze(0)
else:
mask2 = mask2.unsqueeze(0)
else:
mask2 = mask2.unsqueeze(0)
if mask1 is None:
mask1 = mask2
else:
mask1 = torch.cat((mask1, mask2), dim=0)
return (image1, mask1, len(images))
class LoadImagesFromDirList:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"directory": ("STRING", {"default": ""}),
},
"optional": {
"image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}),
"start_index": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
"load_always": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}),
"sort_method": (sort_methods,),
}
}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
RETURN_NAMES = ("IMAGE", "MASK", "FILE PATH")
OUTPUT_IS_LIST = (True, True, True)
FUNCTION = "load_images"
CATEGORY = "image"
@classmethod
def IS_CHANGED(cls, **kwargs):
if 'load_always' in kwargs and kwargs['load_always']:
return float("NaN")
else:
return hash(frozenset(kwargs))
def load_images(self, directory: str, image_load_cap: int = 0, start_index: int = 0, load_always=False, sort_method=None):
if not os.path.isdir(directory):
raise FileNotFoundError(f"Directory '{directory}' cannot be found.")
dir_files = os.listdir(directory)
if len(dir_files) == 0:
raise FileNotFoundError(f"No files in directory '{directory}'.")
# Filter files by extension
valid_extensions = ['.jpg', '.jpeg', '.png', '.webp']
if jxl:
valid_extensions.extend('.jxl')
dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)]
dir_files = sort_by(dir_files, directory, sort_method)
dir_files = [os.path.join(directory, x) for x in dir_files]
# start at start_index
dir_files = dir_files[start_index:]
images = []
masks = []
file_paths = []
limit_images = False
if image_load_cap > 0:
limit_images = True
image_count = 0
for image_path in dir_files:
if os.path.isdir(image_path) and os.path.ex:
continue
if limit_images and image_count >= image_load_cap:
break
i = Image.open(image_path)
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask)
file_paths.append(str(image_path))
image_count += 1
return (images, masks, file_paths)
class LoadImageInspire:
@classmethod
def INPUT_TYPES(s):
input_dir = folder_paths.get_input_directory()
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
return {"required": {
"image": (sorted(files) + ["#DATA"], {"image_upload": True}),
"image_data": ("STRING", {"multiline": False}),
}
}
CATEGORY = "InspirePack/image"
RETURN_TYPES = ("IMAGE", "MASK")
FUNCTION = "load_image"
def load_image(self, image, image_data):
image_data = base64.b64decode(image_data.split(",")[1])
i = Image.open(BytesIO(image_data))
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None,]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
return (image, mask.unsqueeze(0))
class ChangeImageBatchSize:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"image": ("IMAGE",),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}),
"mode": (["simple"],)
}
}
CATEGORY = "InspirePack/Util"
RETURN_TYPES = ("IMAGE", )
FUNCTION = "doit"
@staticmethod
def resize_tensor(input_tensor, batch_size, mode):
if mode == "simple":
if len(input_tensor) < batch_size:
last_frame = input_tensor[-1].unsqueeze(0).expand(batch_size - len(input_tensor), -1, -1, -1)
output_tensor = torch.concat((input_tensor, last_frame), dim=0)
else:
output_tensor = input_tensor[:batch_size, :, :, :]
return output_tensor
else:
logging.warning(f"[Inspire Pack] ChangeImage(Latent)BatchSize: Unknown mode `{mode}` - ignored")
return input_tensor
@staticmethod
def doit(image, batch_size, mode):
res = ChangeImageBatchSize.resize_tensor(image, batch_size, mode)
return (res,)
class ChangeLatentBatchSize:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"latent": ("LATENT",),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}),
"mode": (["simple"],)
}
}
CATEGORY = "InspirePack/Util"
RETURN_TYPES = ("LATENT", )
FUNCTION = "doit"
@staticmethod
def doit(latent, batch_size, mode):
res_latent = latent.copy()
samples = res_latent['samples']
samples = ChangeImageBatchSize.resize_tensor(samples, batch_size, mode)
res_latent['samples'] = samples
return (res_latent,)
class ImageBatchSplitter:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"images": ("IMAGE",),
"split_count": ("INT", {"default": 4, "min": 0, "max": 50, "step": 1}),
},
}
RETURN_TYPES = ByPassTypeTuple(("IMAGE", ))
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, images, split_count):
cnt = min(split_count, len(images))
res = [image.unsqueeze(0) for image in images[:cnt]]
if split_count >= len(images):
lack_cnt = split_count - cnt + 1 # including remained
empty_image = empty_pil_tensor()
for x in range(0, lack_cnt):
res.append(empty_image)
elif cnt < len(images):
remained_cnt = len(images) - cnt
remained_image = images[-remained_cnt:]
res.append(remained_image)
return tuple(res)
class LatentBatchSplitter:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"latent": ("LATENT",),
"split_count": ("INT", {"default": 4, "min": 0, "max": 50, "step": 1}),
},
}
RETURN_TYPES = ByPassTypeTuple(("LATENT", ))
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, latent, split_count):
samples = latent['samples']
latent_base = latent.copy()
del latent_base['samples']
cnt = min(split_count, len(samples))
res = []
for single_samples in samples[:cnt]:
item = latent_base.copy()
item['samples'] = single_samples.unsqueeze(0)
res.append(item)
if split_count >= len(samples):
lack_cnt = split_count - cnt + 1 # including remained
item = latent_base.copy()
item['samples'] = empty_latent()
for x in range(0, lack_cnt):
res.append(item)
elif cnt < len(samples):
remained_cnt = len(samples) - cnt
remained_latent = latent_base.copy()
remained_latent['samples'] = samples[-remained_cnt:]
res.append(remained_latent)
return tuple(res)
def top_k_colors(image_tensor, k, min_pixels):
flattened_image = image_tensor.view(-1, image_tensor.size(-1))
unique_colors, counts = torch.unique(flattened_image, dim=0, return_counts=True)
sorted_counts, sorted_indices = torch.sort(counts, descending=True)
sorted_colors = unique_colors[sorted_indices]
filtered_colors = sorted_colors[sorted_counts >= min_pixels]
return filtered_colors[:k]
def create_mask(image_tensor, color):
mask_tensor = torch.zeros_like(image_tensor[:, :, :, 0])
mask_tensor = torch.where(torch.all(image_tensor == color, dim=-1, keepdim=False), 1, mask_tensor)
return mask_tensor
class ColorMapToMasks:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"color_map": ("IMAGE",),
"min_pixels": ("INT", {"default": 500, "min": 1, "max": 0xffffffffffffffff, "step": 1}),
"max_count": ("INT", {"default": 5, "min": 0, "max": 1000, "step": 1}),
},
}
RETURN_TYPES = ("MASK",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, color_map, max_count, min_pixels):
if len(color_map) > 0:
logging.warning("[Inspire Pack] ColorMapToMasks - Sure, here's the translation: `color_map` can only be a single image. Only the first image will be processed. If you want to utilize the remaining images, convert the Image Batch to an Image List.")
top_colors = top_k_colors(color_map[0], max_count, min_pixels)
masks = None
for color in top_colors:
this_mask = create_mask(color_map, color)
if masks is None:
masks = this_mask
else:
masks = torch.concat((masks, this_mask), dim=0)
if masks is None:
masks = torch.zeros_like(color_map[0, :, :, 0])
masks.unsqueeze(0)
return (masks,)
class SelectNthMask:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"masks": ("MASK",),
"idx": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
},
}
RETURN_TYPES = ("MASK",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, masks, idx):
return (masks[idx].unsqueeze(0),)
NODE_CLASS_MAPPINGS = {
"LoadImagesFromDir //Inspire": LoadImagesFromDirBatch,
"LoadImageListFromDir //Inspire": LoadImagesFromDirList,
"LoadImage //Inspire": LoadImageInspire,
"ChangeImageBatchSize //Inspire": ChangeImageBatchSize,
"ChangeLatentBatchSize //Inspire": ChangeLatentBatchSize,
"ImageBatchSplitter //Inspire": ImageBatchSplitter,
"LatentBatchSplitter //Inspire": LatentBatchSplitter,
"ColorMapToMasks //Inspire": ColorMapToMasks,
"SelectNthMask //Inspire": SelectNthMask
}
NODE_DISPLAY_NAME_MAPPINGS = {
"LoadImagesFromDir //Inspire": "Load Image Batch From Dir (Inspire)",
"LoadImageListFromDir //Inspire": "Load Image List From Dir (Inspire)",
"LoadImage //Inspire": "Load Image (Inspire)",
"ChangeImageBatchSize //Inspire": "Change Image Batch Size (Inspire)",
"ChangeLatentBatchSize //Inspire": "Change Latent Batch Size (Inspire)",
"ImageBatchSplitter //Inspire": "Image Batch Splitter (Inspire)",
"LatentBatchSplitter //Inspire": "Latent Batch Splitter (Inspire)",
"ColorMapToMasks //Inspire": "Color Map To Masks (Inspire)",
"SelectNthMask //Inspire": "Select Nth Mask (Inspire)"
}

View File

@@ -0,0 +1,412 @@
import random
import nodes
import server
from enum import Enum
from . import prompt_support
from aiohttp import web
from . import backend_support
from .libs import common
import logging
max_seed = 2**32 - 1
@server.PromptServer.instance.routes.get("/inspire/prompt_builder")
def prompt_builder(request):
result = {"presets": []}
if "category" in request.rel_url.query:
category = request.rel_url.query["category"]
if category in prompt_support.prompt_builder_preset:
result['presets'] = prompt_support.prompt_builder_preset[category]
return web.json_response(result)
@server.PromptServer.instance.routes.get("/inspire/cache/remove")
def cache_remove(request):
if "key" in request.rel_url.query:
key = request.rel_url.query["key"]
del backend_support.cache[key]
return web.Response(status=200)
@server.PromptServer.instance.routes.get("/inspire/cache/clear")
def cache_clear(request):
backend_support.cache.clear()
return web.Response(status=200)
@server.PromptServer.instance.routes.get("/inspire/cache/list")
def cache_refresh(request):
return web.Response(text=backend_support.ShowCachedInfo.get_data(), status=200)
@server.PromptServer.instance.routes.post("/inspire/cache/settings")
async def set_cache_settings(request):
data = await request.text()
try:
backend_support.ShowCachedInfo.set_cache_settings(data)
return web.Response(text='OK', status=200)
except Exception as e:
return web.Response(text=f"{e}", status=500)
class SGmode(Enum):
FIX = 1
INCR = 2
DECR = 3
RAND = 4
class SeedGenerator:
def __init__(self, base_value, action):
self.base_value = base_value
if action == "fixed" or action == "increment" or action == "decrement" or action == "randomize":
self.action = SGmode.FIX
elif action == 'increment for each node':
self.action = SGmode.INCR
elif action == 'decrement for each node':
self.action = SGmode.DECR
elif action == 'randomize for each node':
self.action = SGmode.RAND
def next(self):
seed = self.base_value
if self.action == SGmode.INCR:
self.base_value += 1
if self.base_value > max_seed:
self.base_value = 0
elif self.action == SGmode.DECR:
self.base_value -= 1
if self.base_value < 0:
self.base_value = max_seed
elif self.action == SGmode.RAND:
self.base_value = random.randint(0, max_seed)
return seed
def control_seed(v):
action = v['inputs']['action']
value = v['inputs']['value']
if action == 'increment' or action == 'increment for each node':
value += 1
if value > max_seed:
value = 0
elif action == 'decrement' or action == 'decrement for each node':
value -= 1
if value < 0:
value = max_seed
elif action == 'randomize' or action == 'randomize for each node':
value = random.randint(0, max_seed)
v['inputs']['value'] = value
return value
def prompt_seed_update(json_data):
try:
widget_idx_map = json_data['extra_data']['extra_pnginfo']['workflow']['widget_idx_map']
except Exception:
return False, None
value = None
mode = None
node = None
action = None
for k, v in json_data['prompt'].items():
if 'class_type' not in v:
continue
cls = v['class_type']
if cls == 'GlobalSeed //Inspire':
mode = v['inputs']['mode']
action = v['inputs']['action']
value = v['inputs']['value']
node = k, v
# control before generated
if mode is not None and mode:
value = control_seed(node[1])
if value is not None:
seed_generator = SeedGenerator(value, action)
for k, v in json_data['prompt'].items():
for k2, v2 in v['inputs'].items():
if isinstance(v2, str) and '$GlobalSeed.value$' in v2:
v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value))
if k not in widget_idx_map or ('seed' not in widget_idx_map[k] and 'noise_seed' not in widget_idx_map[k]):
continue
if 'seed' in v['inputs']:
if isinstance(v['inputs']['seed'], int):
v['inputs']['seed'] = seed_generator.next()
if 'noise_seed' in v['inputs']:
if isinstance(v['inputs']['noise_seed'], int):
v['inputs']['noise_seed'] = seed_generator.next()
for k2, v2 in v['inputs'].items():
if isinstance(v2, str) and '$GlobalSeed.value$' in v2:
v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value))
# control after generated
if mode is not None and not mode:
control_seed(node[1])
return value is not None, mode
def workflow_seed_update(json_data, mode):
nodes = json_data['extra_data']['extra_pnginfo']['workflow']['nodes']
widget_idx_map = json_data['extra_data']['extra_pnginfo']['workflow']['widget_idx_map']
prompt = json_data['prompt']
updated_seed_map = {}
value = None
for node in nodes:
node_id = str(node['id'])
if node_id in prompt:
if node['type'] == 'GlobalSeed //Inspire':
if mode is True:
node['widgets_values'][3] = node['widgets_values'][0]
node['widgets_values'][0] = prompt[node_id]['inputs']['value']
node['widgets_values'][2] = 'fixed'
value = prompt[node_id]['inputs']['value']
elif node_id in widget_idx_map:
widget_idx = None
seed = None
if 'noise_seed' in prompt[node_id]['inputs']:
seed = prompt[node_id]['inputs']['noise_seed']
widget_idx = widget_idx_map[node_id].get('noise_seed')
elif 'seed' in prompt[node_id]['inputs']:
seed = prompt[node_id]['inputs']['seed']
widget_idx = widget_idx_map[node_id].get('seed')
if widget_idx is not None:
node['widgets_values'][widget_idx] = seed
updated_seed_map[node_id] = seed
server.PromptServer.instance.send_sync("inspire-global-seed", {"value": value, "seed_map": updated_seed_map})
def prompt_sampler_update(json_data):
try:
widget_idx_map = json_data['extra_data']['extra_pnginfo']['workflow']['widget_idx_map']
except Exception:
return None
nodes = json_data['extra_data']['extra_pnginfo']['workflow']['nodes']
prompt = json_data['prompt']
sampler_name = None
scheduler = None
for v in prompt.values():
cls = v.get('class_type')
if cls == 'GlobalSampler //Inspire':
sampler_name = v['inputs']['sampler_name']
scheduler = v['inputs']['scheduler']
if sampler_name is None:
return
for node in nodes:
cls = node.get('type')
if cls == 'GlobalSampler //Inspire' or cls is None:
continue
node_id = str(node['id'])
if node_id in prompt and node_id in widget_idx_map:
sampler_widget_idx = widget_idx_map[node_id].get('sampler_name')
scheduler_widget_idx = widget_idx_map[node_id].get('scheduler')
prompt_inputs = prompt[node_id]['inputs']
if ('sampler_name' in prompt_inputs and 'scheduler' in prompt_inputs and
isinstance(prompt_inputs['sampler_name'], str) and 'scheduler' in prompt_inputs):
if sampler_widget_idx is not None:
prompt_inputs['sampler_name'] = sampler_name
node['widgets_values'][sampler_widget_idx] = sampler_name
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": node_id, "widget_name": 'sampler_name', "type": "text", "data": sampler_name})
if scheduler_widget_idx is not None:
prompt_inputs['scheduler'] = scheduler
node['widgets_values'][scheduler_widget_idx] = scheduler
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": node_id, "widget_name": 'scheduler', "type": "text", "data": scheduler})
def workflow_loadimage_update(json_data):
prompt = json_data['prompt']
for v in prompt.values():
if 'class_type' in v and v['class_type'] == 'LoadImage //Inspire':
v['inputs']['image'] = "#DATA"
def populate_wildcards(json_data):
prompt = json_data['prompt']
if 'ImpactWildcardProcessor' in nodes.NODE_CLASS_MAPPINGS:
if not hasattr(nodes.NODE_CLASS_MAPPINGS['ImpactWildcardProcessor'], 'process'):
logging.warning("[Inspire Pack] Your Impact Pack is outdated. Please update to the latest version.")
return
wildcard_process = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardProcessor'].process
updated_widget_values = {}
mbp_updated_widget_values = {}
for k, v in prompt.items():
if 'class_type' in v and v['class_type'] == 'WildcardEncode //Inspire':
inputs = v['inputs']
# legacy adapter
if isinstance(inputs['mode'], bool):
if inputs['mode']:
new_mode = 'populate'
else:
new_mode = 'fixed'
inputs['mode'] = new_mode
if inputs['mode'] == 'populate' and isinstance(inputs['populated_text'], str):
if isinstance(inputs['seed'], list):
try:
input_node = prompt[inputs['seed'][0]]
if input_node['class_type'] == 'ImpactInt':
input_seed = int(input_node['inputs']['value'])
if not isinstance(input_seed, int):
continue
if input_node['class_type'] == 'Seed (rgthree)':
input_seed = int(input_node['inputs']['seed'])
if not isinstance(input_seed, int):
continue
else:
logging.warning("[Inspire Pack] Only `ImpactInt`, `Seed (rgthree)` and `Primitive` Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ")
continue
except:
continue
else:
input_seed = int(inputs['seed'])
inputs['populated_text'] = wildcard_process(text=inputs['wildcard_text'], seed=input_seed)
inputs['mode'] = 'reproduce'
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "populated_text", "type": "text", "data": inputs['populated_text']})
updated_widget_values[k] = inputs['populated_text']
if inputs['mode'] == 'reproduce':
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "mode", "type": "text", "value": 'populate'})
elif 'class_type' in v and v['class_type'] == 'MakeBasicPipe //Inspire':
inputs = v['inputs']
if inputs['wildcard_mode'] == 'populate' and (isinstance(inputs['positive_populated_text'], str) or isinstance(inputs['negative_populated_text'], str)):
if isinstance(inputs['seed'], list):
try:
input_node = prompt[inputs['seed'][0]]
if input_node['class_type'] == 'ImpactInt':
input_seed = int(input_node['inputs']['value'])
if not isinstance(input_seed, int):
continue
if input_node['class_type'] == 'Seed (rgthree)':
input_seed = int(input_node['inputs']['seed'])
if not isinstance(input_seed, int):
continue
else:
logging.warning("[Inspire Pack] Only `ImpactInt`, `Seed (rgthree)` and `Primitive` Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ")
continue
except:
continue
else:
input_seed = int(inputs['seed'])
if isinstance(inputs['positive_populated_text'], str):
inputs['positive_populated_text'] = wildcard_process(text=inputs['positive_wildcard_text'], seed=input_seed)
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "positive_populated_text", "type": "text", "data": inputs['positive_populated_text']})
if isinstance(inputs['negative_populated_text'], str):
inputs['negative_populated_text'] = wildcard_process(text=inputs['negative_wildcard_text'], seed=input_seed)
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "negative_populated_text", "type": "text", "data": inputs['negative_populated_text']})
inputs['wildcard_mode'] = 'reproduce'
mbp_updated_widget_values[k] = inputs['positive_populated_text'], inputs['negative_populated_text']
if inputs['wildcard_mode'] == 'reproduce':
server.PromptServer.instance.send_sync("inspire-node-feedback", {"node_id": k, "widget_name": "wildcard_mode", "type": "text", "value": 'populate'})
if 'extra_data' in json_data and 'extra_pnginfo' in json_data['extra_data']:
extra_pnginfo = json_data['extra_data']['extra_pnginfo']
if 'workflow' in extra_pnginfo and extra_pnginfo['workflow'] is not None and 'nodes' in extra_pnginfo['workflow']:
for node in extra_pnginfo['workflow']['nodes']:
key = str(node['id'])
if key in updated_widget_values:
node['widgets_values'][3] = updated_widget_values[key]
node['widgets_values'][4] = 'reproduce'
if key in mbp_updated_widget_values:
node['widgets_values'][7] = mbp_updated_widget_values[key][0]
node['widgets_values'][8] = mbp_updated_widget_values[key][1]
node['widgets_values'][5] = 'reproduce'
def force_reset_useless_params(json_data):
prompt = json_data['prompt']
for k, v in prompt.items():
if 'class_type' in v and v['class_type'] == 'PromptBuilder //Inspire':
v['inputs']['category'] = '#PLACEHOLDER'
return json_data
def clear_unused_node_changed_cache(json_data):
prompt = json_data['prompt']
unused = []
for x in common.changed_cache.keys():
if x not in prompt:
unused.append(x)
for x in unused:
del common.changed_cache[x]
del common.changed_count_cache[x]
return json_data
def onprompt(json_data):
prompt_support.list_counter_map = {}
is_changed, mode = prompt_seed_update(json_data)
if is_changed:
workflow_seed_update(json_data, mode)
prompt_sampler_update(json_data)
workflow_loadimage_update(json_data)
populate_wildcards(json_data)
force_reset_useless_params(json_data)
clear_unused_node_changed_cache(json_data)
return json_data
server.PromptServer.instance.add_on_prompt_handler(onprompt)
NODE_CLASS_MAPPINGS = {}
NODE_DISPLAY_NAME_MAPPINGS = {}

View File

@@ -0,0 +1,87 @@
import comfy
import nodes
from . import utils
import logging
from server import PromptServer
ADDITIONAL_SCHEDULERS = ['AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]', 'LTXV[default]', 'OSS FLUX', 'OSS Wan', 'OSS Chroma']
def get_schedulers():
return list(comfy.samplers.SCHEDULER_HANDLERS) + ADDITIONAL_SCHEDULERS
def impact_sampling(*args, **kwargs):
if 'RegionalSampler' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/ltdrdata/ComfyUI-Impact-Pack',
"'Impact Pack' extension is required.")
raise Exception("[ERROR] You need to install 'ComfyUI-Impact-Pack'")
return nodes.NODE_CLASS_MAPPINGS['RegionalSampler'].separated_sample(*args, **kwargs)
changed_count_cache = {}
changed_cache = {}
def changed_value(uid):
v = changed_count_cache.get(uid, 0)
changed_count_cache[uid] = v + 1
return v + 1
def not_changed_value(uid):
return changed_count_cache.get(uid, 0)
def is_changed(uid, value):
if uid not in changed_cache or changed_cache[uid] != value:
res = changed_value(uid)
else:
res = not_changed_value(uid)
changed_cache[uid] = value
logging.info(f"keys: {changed_cache.keys()}")
return res
def update_node_status(node, text, progress=None):
if PromptServer.instance.client_id is None:
return
PromptServer.instance.send_sync("inspire/update_status", {
"node": node,
"progress": progress,
"text": text
}, PromptServer.instance.client_id)
class ListWrapper:
def __init__(self, data, aux=None):
if isinstance(data, ListWrapper):
self._data = data
if aux is None:
self.aux = data.aux
else:
self.aux = aux
else:
self._data = list(data)
self.aux = aux
def __getitem__(self, index):
if isinstance(index, slice):
return ListWrapper(self._data[index], self.aux)
else:
return self._data[index]
def __setitem__(self, index, value):
self._data[index] = value
def __len__(self):
return len(self._data)
def __repr__(self):
return f"ListWrapper({self._data}, aux={self.aux})"

View File

@@ -0,0 +1,351 @@
import itertools
from typing import Optional
import numpy as np
import torch
from PIL import Image, ImageDraw
import math
import cv2
import folder_paths
import logging
def apply_variation_noise(latent_image, noise_device, variation_seed, variation_strength, mask=None, variation_method='linear'):
latent_size = latent_image.size()
latent_size_1batch = [1, latent_size[1], latent_size[2], latent_size[3]]
if noise_device == "cpu":
variation_generator = torch.manual_seed(variation_seed)
else:
torch.cuda.manual_seed(variation_seed)
variation_generator = None
variation_latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout,
generator=variation_generator, device=noise_device)
variation_noise = variation_latent.expand(latent_image.size()[0], -1, -1, -1)
if variation_strength == 0:
return latent_image
elif mask is None:
result = (1 - variation_strength) * latent_image + variation_strength * variation_noise
else:
# this seems precision is not enough when variation_strength is 0.0
mixed_noise = mix_noise(latent_image, variation_noise, variation_strength, variation_method=variation_method)
result = (mask == 1).float() * mixed_noise + (mask == 0).float() * latent_image
return result
# CREDIT: https://github.com/BlenderNeko/ComfyUI_Noise/blob/afb14757216257b12268c91845eac248727a55e2/nodes.py#L68
# https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high):
dims = low.shape
low = low.reshape(dims[0], -1)
high = high.reshape(dims[0], -1)
low_norm = low/torch.norm(low, dim=1, keepdim=True)
high_norm = high/torch.norm(high, dim=1, keepdim=True)
low_norm[low_norm != low_norm] = 0.0
high_norm[high_norm != high_norm] = 0.0
omega = torch.acos((low_norm*high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
return res.reshape(dims)
def mix_noise(from_noise, to_noise, strength, variation_method):
to_noise = to_noise.to(from_noise.device)
if variation_method == 'slerp':
mixed_noise = slerp(strength, from_noise, to_noise)
else:
# linear
mixed_noise = (1 - strength) * from_noise + strength * to_noise
# NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling.
scale_factor = math.sqrt((1 - strength) ** 2 + strength ** 2)
mixed_noise /= scale_factor
return mixed_noise
def prepare_noise(latent_image, seed, noise_inds=None, noise_device="cpu", incremental_seed_mode="comfy", variation_seed=None, variation_strength=None, variation_method="linear"):
"""
creates random noise given a latent image and a seed.
optional arg skip can be used to skip and discard x number of noise generations for a given seed
"""
latent_size = latent_image.size()
latent_size_1batch = [1, latent_size[1], latent_size[2], latent_size[3]]
if variation_strength is not None and variation_strength > 0 or incremental_seed_mode.startswith("variation str inc"):
if noise_device == "cpu":
variation_generator = torch.manual_seed(variation_seed)
else:
torch.cuda.manual_seed(variation_seed)
variation_generator = None
variation_latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout,
generator=variation_generator, device=noise_device)
else:
variation_latent = None
def apply_variation(input_latent, strength_up=None):
if variation_latent is None:
return input_latent
else:
strength = variation_strength
if strength_up is not None:
strength += strength_up
variation_noise = variation_latent.expand(input_latent.size()[0], -1, -1, -1)
mixed_noise = mix_noise(input_latent, variation_noise, strength, variation_method)
return mixed_noise
# method: incremental seed batch noise
if noise_inds is None and incremental_seed_mode == "incremental":
batch_cnt = latent_size[0]
latents = None
for i in range(batch_cnt):
if noise_device == "cpu":
generator = torch.manual_seed(seed+i)
else:
torch.cuda.manual_seed(seed+i)
generator = None
latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout,
generator=generator, device=noise_device)
latent = apply_variation(latent)
if latents is None:
latents = latent
else:
latents = torch.cat((latents, latent), dim=0)
return latents
# method: incremental variation batch noise
elif noise_inds is None and incremental_seed_mode.startswith("variation str inc"):
batch_cnt = latent_size[0]
latents = None
for i in range(batch_cnt):
if noise_device == "cpu":
generator = torch.manual_seed(seed)
else:
torch.cuda.manual_seed(seed)
generator = None
latent = torch.randn(latent_size_1batch, dtype=latent_image.dtype, layout=latent_image.layout,
generator=generator, device=noise_device)
step = float(incremental_seed_mode[18:])
latent = apply_variation(latent, step*i)
if latents is None:
latents = latent
else:
latents = torch.cat((latents, latent), dim=0)
return latents
# method: comfy batch noise
if noise_device == "cpu":
generator = torch.manual_seed(seed)
else:
torch.cuda.manual_seed(seed)
generator = None
if noise_inds is None:
latents = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout,
generator=generator, device=noise_device)
latents = apply_variation(latents)
return latents
unique_inds, inverse = np.unique(noise_inds, return_inverse=True)
noises = []
for i in range(unique_inds[-1] + 1):
noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout,
generator=generator, device=noise_device)
if i in unique_inds:
noises.append(noise)
noises = [noises[i] for i in inverse]
noises = torch.cat(noises, axis=0)
return noises
def pil2tensor(image):
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
def empty_pil_tensor(w=64, h=64):
image = Image.new("RGB", (w, h))
draw = ImageDraw.Draw(image)
draw.rectangle((0, 0, w-1, h-1), fill=(0, 0, 0))
return pil2tensor(image)
def try_install_custom_node(custom_node_url, msg):
try:
import cm_global
cm_global.try_call(api='cm.try-install-custom-node',
sender="Inspire Pack", custom_node_url=custom_node_url, msg=msg)
except Exception as e: # noqa: F841
logging.error(msg)
logging.error("[Inspire Pack] ComfyUI-Manager is outdated. The custom node installation feature is not available.")
def empty_latent():
return torch.zeros([1, 4, 8, 8])
# wildcard trick is taken from pythongossss's
class AnyType(str):
def __ne__(self, __value: object) -> bool:
return False
any_typ = AnyType("*")
# author: Trung0246 --->
class TautologyStr(str):
def __ne__(self, other):
return False
class ByPassTypeTuple(tuple):
def __getitem__(self, index):
if index > 0:
index = 0
item = super().__getitem__(index)
if isinstance(item, str):
return TautologyStr(item)
return item
class TaggedCache:
def __init__(self, tag_settings: Optional[dict]=None):
self._tag_settings = tag_settings or {} # tag cache size
self._data = {}
def __getitem__(self, key):
for tag_data in self._data.values():
if key in tag_data:
return tag_data[key]
raise KeyError(f'Key `{key}` does not exist')
def __setitem__(self, key, value: tuple):
# value: (tag: str, (islist: bool, data: *))
# if key already exists, pop old value
for tag_data in self._data.values():
if key in tag_data:
tag_data.pop(key, None)
break
tag = value[0]
if tag not in self._data:
try:
from cachetools import LRUCache
default_size = 20
if 'ckpt' in tag:
default_size = 5
elif tag in ['latent', 'image']:
default_size = 100
self._data[tag] = LRUCache(maxsize=self._tag_settings.get(tag, default_size))
except (ImportError, ModuleNotFoundError):
# TODO: implement a simple lru dict
self._data[tag] = {}
self._data[tag][key] = value
def __delitem__(self, key):
for tag_data in self._data.values():
if key in tag_data:
del tag_data[key]
return
raise KeyError(f'Key `{key}` does not exist')
def __contains__(self, key):
return any(key in tag_data for tag_data in self._data.values())
def items(self):
yield from itertools.chain(*map(lambda x :x.items(), self._data.values()))
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
for tag_data in self._data.values():
if key in tag_data:
return tag_data[key]
return default
def clear(self):
# clear all cache
self._data = {}
def make_3d_mask(mask):
if len(mask.shape) == 4:
return mask.squeeze(0)
elif len(mask.shape) == 2:
return mask.unsqueeze(0)
return mask
def dilate_mask(mask: torch.Tensor, dilation_factor: float) -> torch.Tensor:
"""Dilate a mask using a square kernel with a given dilation factor."""
kernel_size = int(dilation_factor * 2) + 1
kernel = np.ones((abs(kernel_size), abs(kernel_size)), np.uint8)
masks = make_3d_mask(mask).numpy()
dilated_masks = []
for m in masks:
if dilation_factor > 0:
m2 = cv2.dilate(m, kernel, iterations=1)
else:
m2 = cv2.erode(m, kernel, iterations=1)
dilated_masks.append(torch.from_numpy(m2))
return torch.stack(dilated_masks)
def flatten_non_zero_override(masks: torch.Tensor):
"""
flatten multiple layer mask tensor to 1 layer mask tensor.
Override the lower layer with the tensor from the upper layer, but only override non-zero values.
:param masks: 3d mask
:return: flatten mask
"""
final_mask = masks[0]
for i in range(1, masks.size(0)):
non_zero_mask = masks[i] != 0
final_mask[non_zero_mask] = masks[i][non_zero_mask]
return final_mask
def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions):
for full_folder_path in full_folder_paths:
folder_paths.add_model_folder_path(folder_name, full_folder_path)
if folder_name in folder_paths.folder_names_and_paths:
current_paths, current_extensions = folder_paths.folder_names_and_paths[folder_name]
updated_extensions = current_extensions | extensions
folder_paths.folder_names_and_paths[folder_name] = (current_paths, updated_extensions)
else:
folder_paths.folder_names_and_paths[folder_name] = (full_folder_paths, extensions)

View File

@@ -0,0 +1,266 @@
import logging
from comfy_execution.graph_utils import GraphBuilder, is_link
from .libs.utils import any_typ
from .libs.common import update_node_status, ListWrapper
class FloatRange:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"start": ("FLOAT", {"default": 0.0, "min": -100.0, "max": 100.0, 'step': 0.000000001}),
"stop": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, 'step': 0.000000001}),
"step": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 100.0, 'step': 0.000000001}),
"limit": ("INT", {"default": 100, "min": 2, "max": 4096, "step": 1}),
"ensure_end": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
}
}
RETURN_TYPES = ("FLOAT",)
OUTPUT_IS_LIST = (True,)
FUNCTION = "doit"
CATEGORY = "InspirePack/List"
def doit(self, start, stop, step, limit, ensure_end):
if start == stop or step == 0:
return ([start], )
reverse = False
if start > stop:
reverse = True
start, stop = stop, start
res = []
x = start
last = x
while x <= stop and limit > 0:
res.append(x)
last = x
limit -= 1
x += step
if ensure_end and last != stop:
if len(res) >= limit:
res.pop()
res.append(stop)
if reverse:
res.reverse()
return (res, )
class WorklistToItemList:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"item": (any_typ, ),
}
}
INPUT_IS_LIST = True
RETURN_TYPES = ("ITEM_LIST",)
RETURN_NAMES = ("item_list",)
FUNCTION = "doit"
DESCRIPTION = "The list in ComfyUI allows for repeated execution of a sub-workflow.\nThis groups these repetitions (a.k.a. list) into a single ITEM_LIST output.\nITEM_LIST can then be used in ForeachList."
CATEGORY = "InspirePack/List"
def doit(self, item):
return (item, )
# Loop nodes are implemented based on BadCafeCode's reference loop implementation
# https://github.com/BadCafeCode/execution-inversion-demo-comfyui/blob/main/flow_control.py
class ForeachListBegin:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"item_list": ("ITEM_LIST", {"tooltip": "ITEM_LIST containing items to be processed iteratively."}),
},
"optional": {
"initial_input": (any_typ, {"tooltip": "If initial_input is omitted, the first item in item_list is used as the initial value, and the processing starts from the second item in item_list."}),
}
}
RETURN_TYPES = ("FOREACH_LIST_CONTROL", "ITEM_LIST", any_typ, any_typ)
RETURN_NAMES = ("flow_control", "remained_list", "item", "intermediate_output")
OUTPUT_TOOLTIPS = (
"Pass ForeachListEnd as is to indicate the end of the iteration.",
"Output the ITEM_LIST containing the remaining items during the iteration, passing ForeachListEnd as is to indicate the end of the iteration.",
"Output the current item during the iteration.",
"Output the intermediate results during the iteration.")
FUNCTION = "doit"
DESCRIPTION = "A starting node for performing iterative tasks by retrieving items one by one from the ITEM_LIST.\nGenerate a new intermediate_output using item and intermediate_output as inputs, then connect it to ForeachListEnd.\nNOTE:If initial_input is omitted, the first item in item_list is used as the initial value, and the processing starts from the second item in item_list."
CATEGORY = "InspirePack/List"
def doit(self, item_list, initial_input=None):
if initial_input is None:
initial_input = item_list[0]
item_list = item_list[1:]
if len(item_list) > 0:
next_list = ListWrapper(item_list[1:])
next_item = item_list[0]
else:
next_list = ListWrapper([])
next_item = None
if next_list.aux is None:
next_list.aux = len(item_list), None
return "stub", next_list, next_item, initial_input
class ForeachListEnd:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"flow_control": ("FOREACH_LIST_CONTROL", {"rawLink": True, "tooltip": "Directly connect the output of ForeachListBegin, the starting node of the iteration."}),
"remained_list": ("ITEM_LIST", {"tooltip":"Directly connect the output of ForeachListBegin, the starting node of the iteration."}),
"intermediate_output": (any_typ, {"tooltip":"Connect the intermediate outputs processed within the iteration here."}),
},
"hidden": {
"dynprompt": "DYNPROMPT",
"unique_id": "UNIQUE_ID",
}
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("result",)
OUTPUT_TOOLTIPS = ("This is the final output value.",)
FUNCTION = "doit"
DESCRIPTION = "A end node for performing iterative tasks by retrieving items one by one from the ITEM_LIST.\nNOTE:Directly connect the outputs of ForeachListBegin to 'flow_control' and 'remained_list'."
CATEGORY = "InspirePack/List"
def explore_dependencies(self, node_id, dynprompt, upstream):
node_info = dynprompt.get_node(node_id)
if "inputs" not in node_info:
return
for k, v in node_info["inputs"].items():
if is_link(v):
parent_id = v[0]
if parent_id not in upstream:
upstream[parent_id] = []
self.explore_dependencies(parent_id, dynprompt, upstream)
upstream[parent_id].append(node_id)
def collect_contained(self, node_id, upstream, contained):
if node_id not in upstream:
return
for child_id in upstream[node_id]:
if child_id not in contained:
contained[child_id] = True
self.collect_contained(child_id, upstream, contained)
def doit(self, flow_control, remained_list, intermediate_output, dynprompt, unique_id):
if hasattr(remained_list, "aux"):
if remained_list.aux[1] is None:
remained_list.aux = (remained_list.aux[0], unique_id)
update_node_status(remained_list.aux[1], f"{(remained_list.aux[0]-len(remained_list))}/{remained_list.aux[0]} steps", (remained_list.aux[0]-len(remained_list))/remained_list.aux[0])
else:
logging.warning("[Inspire Pack] ForeachListEnd: `remained_list` did not come from ForeachList.")
if len(remained_list) == 0:
return (intermediate_output,)
# We want to loop
upstream = {}
# Get the list of all nodes between the open and close nodes
self.explore_dependencies(unique_id, dynprompt, upstream)
contained = {}
open_node = flow_control[0]
self.collect_contained(open_node, upstream, contained)
contained[unique_id] = True
contained[open_node] = True
# We'll use the default prefix, but to avoid having node names grow exponentially in size,
# we'll use "Recurse" for the name of the recursively-generated copy of this node.
graph = GraphBuilder()
for node_id in contained:
original_node = dynprompt.get_node(node_id)
node = graph.node(original_node["class_type"], "Recurse" if node_id == unique_id else node_id)
node.set_override_display_id(node_id)
for node_id in contained:
original_node = dynprompt.get_node(node_id)
node = graph.lookup_node("Recurse" if node_id == unique_id else node_id)
for k, v in original_node["inputs"].items():
if is_link(v) and v[0] in contained:
parent = graph.lookup_node(v[0])
node.set_input(k, parent.out(v[1]))
else:
node.set_input(k, v)
new_open = graph.lookup_node(open_node)
new_open.set_input("item_list", remained_list)
new_open.set_input("initial_input", intermediate_output)
my_clone = graph.lookup_node("Recurse" )
result = (my_clone.out(0),)
return {
"result": result,
"expand": graph.finalize(),
}
class DropItems:
@classmethod
def INPUT_TYPES(s):
return {
"required": { "item_list": ("ITEM_LIST", {"tooltip":"Directly connect the output of ForeachListBegin, the starting node of the iteration."}), },
}
RETURN_TYPES = (any_typ,)
RETURN_NAMES = ("ITEM_LIST",)
OUTPUT_TOOLTIPS = ("This is the final output value.",)
FUNCTION = "doit"
DESCRIPTION = ""
CATEGORY = "InspirePack/List"
def doit(self, item_list):
l = ListWrapper([])
if hasattr(item_list, 'aux'):
l.aux = item_list.aux
else:
logging.warning("[Inspire Pack] DropItems: `item_list` did not come from ForeachList.")
return (l,)
NODE_CLASS_MAPPINGS = {
"FloatRange //Inspire": FloatRange,
"WorklistToItemList //Inspire": WorklistToItemList,
"ForeachListBegin //Inspire": ForeachListBegin,
"ForeachListEnd //Inspire": ForeachListEnd,
"DropItems //Inspire": DropItems,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"FloatRange //Inspire": "Float Range (Inspire)",
"WorklistToItemList //Inspire": "Worklist To Item List (Inspire)",
"ForeachListBegin //Inspire": "▶Foreach List (Inspire)",
"ForeachListEnd //Inspire": "Foreach List◀ (Inspire)",
"DropItems //Inspire": "Drop Items (Inspire)",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,191 @@
import nodes
import folder_paths
import os
import server
from .libs import utils
from . import backend_support
from comfy import sdxl_clip
import logging
model_preset = {
# base
"SD1.5": ("ip-adapter_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 Light v11": ("ip-adapter_sd15_light_v11", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 Light": ("ip-adapter_sd15_light", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 Plus": ("ip-adapter-plus_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 Plus Face": ("ip-adapter-plus-face_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 Full Face": ("ip-adapter-full-face_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SD1.5 ViT-G": ("ip-adapter_sd15_vit-G", "CLIP-ViT-bigG-14-laion2B-39B-b160k", None, False),
"SDXL": ("ip-adapter_sdxl", "CLIP-ViT-bigG-14-laion2B-39B-b160k", None, False),
"SDXL ViT-H": ("ip-adapter_sdxl_vit-h", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SDXL Plus ViT-H": ("ip-adapter-plus_sdxl_vit-h", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SDXL Plus Face ViT-H": ("ip-adapter-plus-face_sdxl_vit-h", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"Kolors Plus": ("Kolors-IP-Adapter-Plus", "clip-vit-large-patch14-336", None, False),
# faceid
"SD1.5 FaceID": ("ip-adapter-faceid_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", "ip-adapter-faceid_sd15_lora", True),
"SD1.5 FaceID Plus v2": ("ip-adapter-faceid-plusv2_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", "ip-adapter-faceid-plusv2_sd15_lora", True),
"SD1.5 FaceID Plus": ("ip-adapter-faceid-plus_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", "ip-adapter-faceid-plus_sd15_lora", True),
"SD1.5 FaceID Portrait v11": ("ip-adapter-faceid-portrait-v11_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, True),
"SD1.5 FaceID Portrait": ("ip-adapter-faceid-portrait_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, True),
"SDXL FaceID": ("ip-adapter-faceid_sdxl", "CLIP-ViT-H-14-laion2B-s32B-b79K", "ip-adapter-faceid_sdxl_lora", True),
"SDXL FaceID Plus v2": ("ip-adapter-faceid-plusv2_sdxl", "CLIP-ViT-H-14-laion2B-s32B-b79K", "ip-adapter-faceid-plusv2_sdxl_lora", True),
"SDXL FaceID Portrait": ("ip-adapter-faceid-portrait_sdxl", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, True),
"SDXL FaceID Portrait unnorm": ("ip-adapter-faceid-portrait_sdxl_unnorm", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, True),
"Kolors FaceID Plus": ("Kolors-IP-Adapter-FaceID-Plus", "clip-vit-large-patch14-336", None, True),
# composition
"SD1.5 Plus Composition": ("ip-adapter_sd15", "CLIP-ViT-H-14-laion2B-s32B-b79K", None, False),
"SDXL Plus Composition": ("ip-adapter_sdxl", "CLIP-ViT-bigG-14-laion2B-39B-b160k", None, False),
}
def lookup_model(model_dir, name):
if name is None:
return None, "N/A"
names = [(os.path.splitext(os.path.basename(x))[0], x) for x in folder_paths.get_filename_list(model_dir)]
resolved_name = [y for x, y in names if x == name]
if len(resolved_name) > 0:
return resolved_name[0], "OK"
else:
logging.error(f"[Inspire Pack] IPAdapterModelHelper: The `{name}` model file does not exist in `{model_dir}` model dir.")
return None, "FAIL"
class IPAdapterModelHelper:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"preset": (list(model_preset.keys()),),
"lora_strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"lora_strength_clip": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}),
"insightface_provider": (["CPU", "CUDA", "ROCM"], ),
"cache_mode": (["insightface only", "clip_vision only", "all", "none"], {"default": "insightface only"}),
},
"optional": {
"clip": ("CLIP",),
"insightface_model_name": (['buffalo_l', 'antelopev2'],),
},
"hidden": {"unique_id": "UNIQUE_ID"}
}
RETURN_TYPES = ("IPADAPTER_PIPE", "IPADAPTER", "CLIP_VISION", "INSIGHTFACE", "MODEL", "CLIP", "STRING", "STRING")
RETURN_NAMES = ("IPADAPTER_PIPE", "IPADAPTER", "CLIP_VISION", "INSIGHTFACE", "MODEL", "CLIP", "insightface_cache_key", "clip_vision_cache_key")
FUNCTION = "doit"
CATEGORY = "InspirePack/models"
def doit(self, model, preset, lora_strength_model, lora_strength_clip, insightface_provider, clip=None, cache_mode="none", unique_id=None, insightface_model_name='buffalo_l'):
if 'IPAdapter' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/cubiq/ComfyUI_IPAdapter_plus',
"To use 'IPAdapterModelHelper' node, 'ComfyUI IPAdapter Plus' extension is required.")
raise Exception("[ERROR] To use IPAdapterModelHelper, you need to install 'ComfyUI IPAdapter Plus'")
is_sdxl_preset = 'SDXL' in preset
if clip is not None:
is_sdxl_model = isinstance(clip.tokenizer, sdxl_clip.SDXLTokenizer)
else:
is_sdxl_model = False
if is_sdxl_preset != is_sdxl_model:
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 1, "label": "IPADAPTER (fail)"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 2, "label": "CLIP_VISION (fail)"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 3, "label": "INSIGHTFACE (fail)"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 4, "label": "MODEL (fail)"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 5, "label": "CLIP (fail)"})
logging.error("[Inspire Pack] IPAdapterModelHelper: You cannot mix SDXL and SD1.5 in the checkpoint and IPAdapter.")
raise Exception("[ERROR] You cannot mix SDXL and SD1.5 in the checkpoint and IPAdapter.")
ipadapter, clipvision, lora, is_insightface = model_preset[preset]
ipadapter, ok1 = lookup_model("ipadapter", ipadapter)
clipvision, ok2 = lookup_model("clip_vision", clipvision)
lora, ok3 = lookup_model("loras", lora)
if ok1 == "OK":
ok1 = "IPADAPTER"
else:
ok1 = f"IPADAPTER ({ok1})"
if ok2 == "OK":
ok2 = "CLIP_VISION"
else:
ok2 = f"CLIP_VISION ({ok2})"
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 1, "label": ok1})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 2, "label": ok2})
if ok3 == "FAIL":
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 4, "label": "MODEL (fail)"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 5, "label": "CLIP (fail)"})
else:
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 4, "label": "MODEL"})
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 5, "label": "CLIP"})
if ok1 == "FAIL" or ok2 == "FAIL" or ok3 == "FAIL":
raise Exception("ERROR: Failed to load several models in IPAdapterModelHelper.")
if ipadapter is not None:
ipadapter = nodes.NODE_CLASS_MAPPINGS["IPAdapterModelLoader"]().load_ipadapter_model(ipadapter_file=ipadapter)[0]
ccache_key = ""
if clipvision is not None:
if cache_mode in ["clip_vision only", "all"]:
ccache_key = clipvision
if ccache_key not in backend_support.cache:
backend_support.update_cache(ccache_key, "clipvision", (False, nodes.CLIPVisionLoader().load_clip(clip_name=clipvision)[0]))
_, (_, clipvision) = backend_support.cache[ccache_key]
else:
clipvision = nodes.CLIPVisionLoader().load_clip(clip_name=clipvision)[0]
if lora is not None:
model, clip = nodes.LoraLoader().load_lora(model=model, clip=clip, lora_name=lora, strength_model=lora_strength_model, strength_clip=lora_strength_clip)
def f(x):
return nodes.LoraLoader().load_lora(model=x, clip=clip, lora_name=lora, strength_model=lora_strength_model, strength_clip=lora_strength_clip)
lora_loader = f
else:
def f(x):
return x
lora_loader = f
if 'IPAdapterInsightFaceLoader' in nodes.NODE_CLASS_MAPPINGS:
insight_face_loader = nodes.NODE_CLASS_MAPPINGS['IPAdapterInsightFaceLoader']().load_insightface
else:
logging.warning("'ComfyUI IPAdapter Plus' extension is either too outdated or not installed.")
insight_face_loader = None
icache_key = ""
if is_insightface:
if insight_face_loader is None:
raise Exception("[ERROR] 'ComfyUI IPAdapter Plus' extension is either too outdated or not installed.")
if cache_mode in ["insightface only", "all"]:
icache_key = 'insightface-' + insightface_provider
if icache_key not in backend_support.cache:
backend_support.update_cache(icache_key, "insightface", (False, insight_face_loader(provider=insightface_provider, model_name=insightface_model_name)[0]))
_, (_, insightface) = backend_support.cache[icache_key]
else:
insightface = insight_face_loader(insightface_provider)[0]
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 3, "label": "INSIGHTFACE"})
else:
insightface = None
server.PromptServer.instance.send_sync("inspire-node-output-label", {"node_id": unique_id, "output_idx": 3, "label": "INSIGHTFACE (N/A)"})
pipe = ipadapter, model, clipvision, insightface, lora_loader
return pipe, ipadapter, clipvision, insightface, model, clip, icache_key, ccache_key
NODE_CLASS_MAPPINGS = {
"IPAdapterModelHelper //Inspire": IPAdapterModelHelper,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"IPAdapterModelHelper //Inspire": "IPAdapter Model Helper (Inspire)",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,699 @@
import traceback
import comfy
import nodes
import torch
import re
import webcolors
from . import prompt_support
from .libs import utils, common
import logging
class RegionalPromptSimple:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"basic_pipe": ("BASIC_PIPE",),
"mask": ("MASK",),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
"scheduler": (common.get_schedulers(),),
"wildcard_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "wildcard prompt"}),
"controlnet_in_pipe": ("BOOLEAN", {"default": False, "label_on": "Keep", "label_off": "Override"}),
"sigma_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
},
"optional": {
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"variation_method": (["linear", "slerp"],),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
}
}
RETURN_TYPES = ("REGIONAL_PROMPTS", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt,
controlnet_in_pipe=False, sigma_factor=1.0, variation_seed=0, variation_strength=0.0, variation_method='linear', scheduler_func_opt=None):
if 'RegionalPrompt' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/ltdrdata/ComfyUI-Impact-Pack',
"To use 'RegionalPromptSimple' node, 'Impact Pack' extension is required.")
raise Exception("[ERROR] To use RegionalPromptSimple, you need to install 'ComfyUI-Impact-Pack'")
model, clip, vae, positive, negative = basic_pipe
iwe = nodes.NODE_CLASS_MAPPINGS['ImpactWildcardEncode']()
kap = nodes.NODE_CLASS_MAPPINGS['KSamplerAdvancedProvider']()
rp = nodes.NODE_CLASS_MAPPINGS['RegionalPrompt']()
if wildcard_prompt != "":
model, clip, new_positive, _ = iwe.doit(model=model, clip=clip, populated_text=wildcard_prompt, seed=None)
if controlnet_in_pipe:
prev_cnet = None
for t in positive:
if 'control' in t[1] and 'control_apply_to_uncond' in t[1]:
prev_cnet = t[1]['control'], t[1]['control_apply_to_uncond']
break
if prev_cnet is not None:
for t in new_positive:
t[1]['control'] = prev_cnet[0]
t[1]['control_apply_to_uncond'] = prev_cnet[1]
else:
new_positive = positive
basic_pipe = model, clip, vae, new_positive, negative
sampler = kap.doit(cfg, sampler_name, scheduler, basic_pipe, sigma_factor=sigma_factor, scheduler_func_opt=scheduler_func_opt)[0]
try:
regional_prompts = rp.doit(mask, sampler, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method)[0]
except:
raise Exception("[Inspire-Pack] ERROR: Impact Pack is outdated. Update Impact Pack to latest version to use this.")
return (regional_prompts, )
def color_to_mask(color_mask, mask_color):
try:
if mask_color.startswith("#") or mask_color.isalpha():
hex = mask_color[1:] if mask_color.startswith("#") else webcolors.name_to_hex(mask_color)[1:]
selected = int(hex, 16)
else:
selected = int(mask_color, 10)
except Exception:
raise Exception("[ERROR] Invalid mask_color value. mask_color should be a color value for RGB")
temp = (torch.clamp(color_mask, 0, 1.0) * 255.0).round().to(torch.int)
temp = torch.bitwise_left_shift(temp[:, :, :, 0], 16) + torch.bitwise_left_shift(temp[:, :, :, 1], 8) + temp[:, :, :, 2]
mask = torch.where(temp == selected, 1.0, 0.0)
return mask
class RegionalPromptColorMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"basic_pipe": ("BASIC_PIPE",),
"color_mask": ("IMAGE",),
"mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
"scheduler": (common.get_schedulers(),),
"wildcard_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "placeholder": "wildcard prompt"}),
"controlnet_in_pipe": ("BOOLEAN", {"default": False, "label_on": "Keep", "label_off": "Override"}),
"sigma_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
},
"optional": {
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"variation_method": (["linear", "slerp"],),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
}
}
RETURN_TYPES = ("REGIONAL_PROMPTS", "MASK")
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(basic_pipe, color_mask, mask_color, cfg, sampler_name, scheduler, wildcard_prompt,
controlnet_in_pipe=False, sigma_factor=1.0, variation_seed=0, variation_strength=0.0, variation_method="linear", scheduler_func_opt=None):
mask = color_to_mask(color_mask, mask_color)
rp = RegionalPromptSimple().doit(basic_pipe, mask, cfg, sampler_name, scheduler, wildcard_prompt, controlnet_in_pipe,
sigma_factor=sigma_factor, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method, scheduler_func_opt=scheduler_func_opt)[0]
return rp, mask
class RegionalConditioningSimple:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"clip": ("CLIP", ),
"mask": ("MASK",),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"set_cond_area": (["default", "mask bounds"],),
"prompt": ("STRING", {"multiline": True, "placeholder": "prompt"}),
},
}
RETURN_TYPES = ("CONDITIONING", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(clip, mask, strength, set_cond_area, prompt):
conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0]
conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0]
return (conditioning, )
class RegionalConditioningColorMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"clip": ("CLIP", ),
"color_mask": ("IMAGE",),
"mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"set_cond_area": (["default", "mask bounds"],),
"prompt": ("STRING", {"multiline": True, "placeholder": "prompt"}),
},
"optional": {
"dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
}
}
RETURN_TYPES = ("CONDITIONING", "MASK")
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(clip, color_mask, mask_color, strength, set_cond_area, prompt, dilation=0):
mask = color_to_mask(color_mask, mask_color)
if dilation != 0:
mask = utils.dilate_mask(mask, dilation)
conditioning = nodes.CLIPTextEncode().encode(clip, prompt)[0]
conditioning = nodes.ConditioningSetMask().append(conditioning, mask, set_cond_area, strength)[0]
return conditioning, mask
class ToIPAdapterPipe:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"ipadapter": ("IPADAPTER", ),
"model": ("MODEL",),
},
"optional": {
"clip_vision": ("CLIP_VISION",),
"insightface": ("INSIGHTFACE",),
}
}
RETURN_TYPES = ("IPADAPTER_PIPE",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
@staticmethod
def doit(ipadapter, model, clip_vision, insightface=None):
pipe = ipadapter, model, clip_vision, insightface, lambda x: x
return (pipe,)
class FromIPAdapterPipe:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"ipadapter_pipe": ("IPADAPTER_PIPE", ),
}
}
RETURN_TYPES = ("IPADAPTER", "MODEL", "CLIP_VISION", "INSIGHTFACE")
RETURN_NAMES = ("ipadapter", "model", "clip_vision", "insight_face")
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, ipadapter_pipe):
ipadapter, model, clip_vision, insightface, _ = ipadapter_pipe
return ipadapter, model, clip_vision, insightface
class IPAdapterConditioning:
def __init__(self, mask, weight, weight_type, noise=None, image=None, neg_image=None, embeds=None, start_at=0.0, end_at=1.0, combine_embeds='concat', unfold_batch=False, weight_v2=False, neg_embeds=None):
self.mask = mask
self.image = image
self.neg_image = neg_image
self.embeds = embeds
self.neg_embeds = neg_embeds
self.weight = weight
self.noise = noise
self.weight_type = weight_type
self.start_at = start_at
self.end_at = end_at
self.unfold_batch = unfold_batch
self.weight_v2 = weight_v2
self.combine_embeds = combine_embeds
def doit(self, ipadapter_pipe):
ipadapter, model, clip_vision, insightface, _ = ipadapter_pipe
if 'IPAdapterAdvanced' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/cubiq/ComfyUI_IPAdapter_plus',
"To use 'Regional IPAdapter' node, 'ComfyUI IPAdapter Plus' extension is required.")
raise Exception("[ERROR] To use IPAdapterModelHelper, you need to install 'ComfyUI IPAdapter Plus'")
if self.embeds is None:
obj = nodes.NODE_CLASS_MAPPINGS['IPAdapterAdvanced']
model = obj().apply_ipadapter(model=model, ipadapter=ipadapter, weight=self.weight, weight_type=self.weight_type,
start_at=self.start_at, end_at=self.end_at, combine_embeds=self.combine_embeds,
clip_vision=clip_vision, image=self.image, image_negative=self.neg_image, attn_mask=self.mask,
insightface=insightface, weight_faceidv2=self.weight_v2)[0]
else:
obj = nodes.NODE_CLASS_MAPPINGS['IPAdapterEmbeds']
model = obj().apply_ipadapter(model=model, ipadapter=ipadapter, pos_embed=self.embeds, weight=self.weight, weight_type=self.weight_type,
start_at=self.start_at, end_at=self.end_at, neg_embed=self.neg_embeds,
attn_mask=self.mask, clip_vision=clip_vision)[0]
return model
IPADAPTER_WEIGHT_TYPES_CACHE = None
def IPADAPTER_WEIGHT_TYPES():
global IPADAPTER_WEIGHT_TYPES_CACHE
if IPADAPTER_WEIGHT_TYPES_CACHE is None:
try:
IPADAPTER_WEIGHT_TYPES_CACHE = nodes.NODE_CLASS_MAPPINGS['IPAdapterAdvanced']().INPUT_TYPES()['required']['weight_type'][0]
except Exception:
logging.error("[Inspire Pack] IPAdapterPlus is not installed.")
IPADAPTER_WEIGHT_TYPES_CACHE = ["IPAdapterPlus is not installed"]
return IPADAPTER_WEIGHT_TYPES_CACHE
class RegionalIPAdapterMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
"image": ("IMAGE",),
"weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}),
"noise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
"weight_type": (IPADAPTER_WEIGHT_TYPES(), ),
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"unfold_batch": ("BOOLEAN", {"default": False}),
},
"optional": {
"faceid_v2": ("BOOLEAN", {"default": False}),
"weight_v2": ("FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}),
"combine_embeds": (["concat", "add", "subtract", "average", "norm average"],),
"neg_image": ("IMAGE",),
}
}
RETURN_TYPES = ("REGIONAL_IPADAPTER", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(mask, image, weight, noise, weight_type, start_at=0.0, end_at=1.0, unfold_batch=False, faceid_v2=False, weight_v2=False, combine_embeds="concat", neg_image=None):
cond = IPAdapterConditioning(mask, weight, weight_type, noise=noise, image=image, neg_image=neg_image, start_at=start_at, end_at=end_at, unfold_batch=unfold_batch, weight_v2=weight_v2, combine_embeds=combine_embeds)
return (cond, )
class RegionalIPAdapterColorMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"color_mask": ("IMAGE",),
"mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}),
"image": ("IMAGE",),
"weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}),
"noise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
"weight_type": (IPADAPTER_WEIGHT_TYPES(), ),
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"unfold_batch": ("BOOLEAN", {"default": False}),
},
"optional": {
"faceid_v2": ("BOOLEAN", {"default": False }),
"weight_v2": ("FLOAT", {"default": 1.0, "min": -1, "max": 3, "step": 0.05}),
"combine_embeds": (["concat", "add", "subtract", "average", "norm average"],),
"neg_image": ("IMAGE",),
}
}
RETURN_TYPES = ("REGIONAL_IPADAPTER", "MASK")
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(color_mask, mask_color, image, weight, noise, weight_type, start_at=0.0, end_at=1.0, unfold_batch=False, faceid_v2=False, weight_v2=False, combine_embeds="concat", neg_image=None):
mask = color_to_mask(color_mask, mask_color)
cond = IPAdapterConditioning(mask, weight, weight_type, noise=noise, image=image, neg_image=neg_image, start_at=start_at, end_at=end_at, unfold_batch=unfold_batch, weight_v2=weight_v2, combine_embeds=combine_embeds)
return (cond, mask)
class RegionalIPAdapterEncodedMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
"embeds": ("EMBEDS",),
"weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}),
"weight_type": (IPADAPTER_WEIGHT_TYPES(), ),
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"unfold_batch": ("BOOLEAN", {"default": False}),
},
"optional": {
"neg_embeds": ("EMBEDS",),
}
}
RETURN_TYPES = ("REGIONAL_IPADAPTER", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(mask, embeds, weight, weight_type, start_at=0.0, end_at=1.0, unfold_batch=False, neg_embeds=None):
cond = IPAdapterConditioning(mask, weight, weight_type, embeds=embeds, start_at=start_at, end_at=end_at, unfold_batch=unfold_batch, neg_embeds=neg_embeds)
return (cond, )
class RegionalIPAdapterEncodedColorMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"color_mask": ("IMAGE",),
"mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}),
"embeds": ("EMBEDS",),
"weight": ("FLOAT", {"default": 0.7, "min": -1, "max": 3, "step": 0.05}),
"weight_type": (IPADAPTER_WEIGHT_TYPES(), ),
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"unfold_batch": ("BOOLEAN", {"default": False}),
},
"optional": {
"neg_embeds": ("EMBEDS",),
}
}
RETURN_TYPES = ("REGIONAL_IPADAPTER", "MASK")
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(color_mask, mask_color, embeds, weight, weight_type, start_at=0.0, end_at=1.0, unfold_batch=False, neg_embeds=None):
mask = color_to_mask(color_mask, mask_color)
cond = IPAdapterConditioning(mask, weight, weight_type, embeds=embeds, start_at=start_at, end_at=end_at, unfold_batch=unfold_batch, neg_embeds=neg_embeds)
return (cond, mask)
class ApplyRegionalIPAdapters:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"ipadapter_pipe": ("IPADAPTER_PIPE",),
"regional_ipadapter1": ("REGIONAL_IPADAPTER", ),
},
}
RETURN_TYPES = ("MODEL", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(**kwargs):
ipadapter_pipe = kwargs['ipadapter_pipe']
ipadapter, model, clip_vision, insightface, lora_loader = ipadapter_pipe
del kwargs['ipadapter_pipe']
for k, v in kwargs.items():
ipadapter_pipe = ipadapter, model, clip_vision, insightface, lora_loader
model = v.doit(ipadapter_pipe)
return (model, )
class RegionalSeedExplorerMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
"noise": ("NOISE_IMAGE",),
"seed_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}),
"enable_additional": ("BOOLEAN", {"default": True, "label_on": "true", "label_off": "false"}),
"additional_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"additional_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"noise_mode": (["GPU(=A1111)", "CPU"],),
},
"optional":
{"variation_method": (["linear", "slerp"],), }
}
RETURN_TYPES = ("NOISE_IMAGE",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(mask, noise, seed_prompt, enable_additional, additional_seed, additional_strength, noise_mode, variation_method='linear'):
device = comfy.model_management.get_torch_device()
noise_device = "cpu" if noise_mode == "CPU" else device
noise = noise.to(device)
mask = mask.to(device)
if len(mask.shape) == 2:
mask = mask.unsqueeze(0)
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noise.shape[2], noise.shape[3]), mode="bilinear").squeeze(0)
try:
seed_prompt = seed_prompt.replace("\n", "")
items = seed_prompt.strip().split(",")
if items == ['']:
items = []
if enable_additional:
items.append((additional_seed, additional_strength))
noise = prompt_support.SeedExplorer.apply_variation(noise, items, noise_device, mask, variation_method=variation_method)
except Exception:
logging.error("[Inspire Pack] IGNORED: RegionalSeedExplorerColorMask is failed.")
traceback.print_exc()
noise = noise.cpu()
mask.cpu()
return (noise,)
class RegionalSeedExplorerColorMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"color_mask": ("IMAGE",),
"mask_color": ("STRING", {"multiline": False, "default": "#FFFFFF"}),
"noise": ("NOISE_IMAGE",),
"seed_prompt": ("STRING", {"multiline": True, "dynamicPrompts": False, "pysssss.autocomplete": False}),
"enable_additional": ("BOOLEAN", {"default": True, "label_on": "true", "label_off": "false"}),
"additional_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"additional_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"noise_mode": (["GPU(=A1111)", "CPU"],),
},
"optional":
{"variation_method": (["linear", "slerp"],), }
}
RETURN_TYPES = ("NOISE_IMAGE", "MASK")
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(color_mask, mask_color, noise, seed_prompt, enable_additional, additional_seed, additional_strength, noise_mode, variation_method='linear'):
device = comfy.model_management.get_torch_device()
noise_device = "cpu" if noise_mode == "CPU" else device
color_mask = color_mask.to(device)
noise = noise.to(device)
mask = color_to_mask(color_mask, mask_color)
original_mask = mask
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noise.shape[2], noise.shape[3]), mode="bilinear").squeeze(0)
mask = mask.to(device)
try:
seed_prompt = seed_prompt.replace("\n", "")
items = seed_prompt.strip().split(",")
if items == ['']:
items = []
if enable_additional:
items.append((additional_seed, additional_strength))
noise = prompt_support.SeedExplorer.apply_variation(noise, items, noise_device, mask, variation_method=variation_method)
except Exception:
logging.error("[Inspire Pack] IGNORED: RegionalSeedExplorerColorMask is failed.")
traceback.print_exc()
color_mask.cpu()
noise = noise.cpu()
original_mask = original_mask.cpu()
return (noise, original_mask)
class ColorMaskToDepthMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"color_mask": ("IMAGE",),
"spec": ("STRING", {"multiline": True, "default": "#FF0000:1.0\n#000000:1.0"}),
"base_value": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0}),
"dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
"flatten_method": (["override", "sum", "max"],),
},
}
RETURN_TYPES = ("MASK", )
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
def doit(self, color_mask, spec, base_value, dilation, flatten_method):
specs = spec.split('\n')
pat = re.compile("(?P<color_code>#[A-F0-9]+):(?P<cfg>[0-9]+(.[0-9]*)?)")
masks = [torch.ones((1, color_mask.shape[1], color_mask.shape[2])) * base_value]
for x in specs:
match = pat.match(x)
if match:
mask = color_to_mask(color_mask=color_mask, mask_color=match['color_code']) * float(match['cfg'])
mask = utils.dilate_mask(mask, dilation)
masks.append(mask)
if masks:
masks = torch.cat(masks, dim=0)
if flatten_method == 'override':
masks = utils.flatten_non_zero_override(masks)
elif flatten_method == 'max':
masks = torch.max(masks, dim=0)[0]
else: # flatten_method == 'sum':
masks = torch.sum(masks, dim=0)
masks = torch.clamp(masks, min=0.0, max=1.0)
masks = masks.unsqueeze(0)
else:
masks = torch.tensor([])
return (masks, )
class RegionalCFG:
@classmethod
def INPUT_TYPES(s):
return {"required": {"model": ("MODEL",),
"mask": ("MASK",),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "doit"
CATEGORY = "InspirePack/Regional"
@staticmethod
def doit(model, mask):
if len(mask.shape) == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
elif len(mask.shape) == 3:
mask = mask.unsqueeze(0)
size = None
def regional_cfg(args):
nonlocal mask
nonlocal size
x = args['input']
if mask.device != x.device:
mask = mask.to(x.device)
if size != (x.shape[2], x.shape[3]):
size = (x.shape[2], x.shape[3])
mask = torch.nn.functional.interpolate(mask, size=size, mode='bilinear', align_corners=False)
cond_pred = args["cond_denoised"]
uncond_pred = args["uncond_denoised"]
cond_scale = args["cond_scale"]
cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale * mask
return x - cfg_result
m = model.clone()
m.set_model_sampler_cfg_function(regional_cfg)
return (m,)
NODE_CLASS_MAPPINGS = {
"RegionalPromptSimple //Inspire": RegionalPromptSimple,
"RegionalPromptColorMask //Inspire": RegionalPromptColorMask,
"RegionalConditioningSimple //Inspire": RegionalConditioningSimple,
"RegionalConditioningColorMask //Inspire": RegionalConditioningColorMask,
"RegionalIPAdapterMask //Inspire": RegionalIPAdapterMask,
"RegionalIPAdapterColorMask //Inspire": RegionalIPAdapterColorMask,
"RegionalIPAdapterEncodedMask //Inspire": RegionalIPAdapterEncodedMask,
"RegionalIPAdapterEncodedColorMask //Inspire": RegionalIPAdapterEncodedColorMask,
"RegionalSeedExplorerMask //Inspire": RegionalSeedExplorerMask,
"RegionalSeedExplorerColorMask //Inspire": RegionalSeedExplorerColorMask,
"ToIPAdapterPipe //Inspire": ToIPAdapterPipe,
"FromIPAdapterPipe //Inspire": FromIPAdapterPipe,
"ApplyRegionalIPAdapters //Inspire": ApplyRegionalIPAdapters,
"RegionalCFG //Inspire": RegionalCFG,
"ColorMaskToDepthMask //Inspire": ColorMaskToDepthMask,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"RegionalPromptSimple //Inspire": "Regional Prompt Simple (Inspire)",
"RegionalPromptColorMask //Inspire": "Regional Prompt By Color Mask (Inspire)",
"RegionalConditioningSimple //Inspire": "Regional Conditioning Simple (Inspire)",
"RegionalConditioningColorMask //Inspire": "Regional Conditioning By Color Mask (Inspire)",
"RegionalIPAdapterMask //Inspire": "Regional IPAdapter Mask (Inspire)",
"RegionalIPAdapterColorMask //Inspire": "Regional IPAdapter By Color Mask (Inspire)",
"RegionalIPAdapterEncodedMask //Inspire": "Regional IPAdapter Encoded Mask (Inspire)",
"RegionalIPAdapterEncodedColorMask //Inspire": "Regional IPAdapter Encoded By Color Mask (Inspire)",
"RegionalSeedExplorerMask //Inspire": "Regional Seed Explorer By Mask (Inspire)",
"RegionalSeedExplorerColorMask //Inspire": "Regional Seed Explorer By Color Mask (Inspire)",
"ToIPAdapterPipe //Inspire": "ToIPAdapterPipe (Inspire)",
"FromIPAdapterPipe //Inspire": "FromIPAdapterPipe (Inspire)",
"ApplyRegionalIPAdapters //Inspire": "Apply Regional IPAdapters (Inspire)",
"RegionalCFG //Inspire": "Regional CFG (Inspire)",
"ColorMaskToDepthMask //Inspire": "Color Mask To Depth Mask (Inspire)",
}

View File

@@ -0,0 +1,353 @@
import torch
from . import a1111_compat
import comfy
from .libs import common
from comfy.samplers import CFGGuider
from comfy_extras.nodes_perpneg import Guider_PerpNeg
import math
class KSampler_progress(a1111_compat.KSampler_inspire):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"noise_mode": (a1111_compat.supported_noise_modes,),
"interval": ("INT", {"default": 1, "min": 1, "max": 10000}),
"omit_start_latent": ("BOOLEAN", {"default": True, "label_on": "True", "label_off": "False"}),
"omit_final_latent": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}),
},
"optional": {
"scheduler_func_opt": ("SCHEDULER_FUNC",),
}
}
CATEGORY = "InspirePack/analysis"
RETURN_TYPES = ("LATENT", "LATENT")
RETURN_NAMES = ("latent", "progress_latent")
@staticmethod
def doit(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, noise_mode,
interval, omit_start_latent, omit_final_latent, scheduler_func_opt=None):
adv_steps = int(steps / denoise)
if omit_start_latent:
result = []
else:
result = [comfy.sample.fix_empty_latent_channels(model, latent_image['samples']).cpu()]
def progress_callback(step, x0, x, total_steps):
if (total_steps-1) != step and step % interval != 0:
return
x = model.model.process_latent_out(x)
x = x.cpu()
result.append(x)
latent_image, noise = a1111_compat.KSamplerAdvanced_inspire.sample(model, True, seed, adv_steps, cfg, sampler_name, scheduler, positive, negative, latent_image, (adv_steps-steps),
adv_steps, noise_mode, False, callback=progress_callback, scheduler_func_opt=scheduler_func_opt)
if not omit_final_latent:
result.append(latent_image['samples'].cpu())
if len(result) > 0:
result = torch.cat(result)
result = {'samples': result}
else:
result = latent_image
return latent_image, result
class KSamplerAdvanced_progress(a1111_compat.KSamplerAdvanced_inspire):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL",),
"add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
"scheduler": (common.get_schedulers(), ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"latent_image": ("LATENT", ),
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}),
"noise_mode": (a1111_compat.supported_noise_modes,),
"return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
"interval": ("INT", {"default": 1, "min": 1, "max": 10000}),
"omit_start_latent": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}),
"omit_final_latent": ("BOOLEAN", {"default": False, "label_on": "True", "label_off": "False"}),
},
"optional": {
"prev_progress_latent_opt": ("LATENT",),
"scheduler_func_opt": ("SCHEDULER_FUNC",),
}
}
FUNCTION = "doit"
CATEGORY = "InspirePack/analysis"
RETURN_TYPES = ("LATENT", "LATENT")
RETURN_NAMES = ("latent", "progress_latent")
def doit(self, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
start_at_step, end_at_step, noise_mode, return_with_leftover_noise, interval, omit_start_latent, omit_final_latent,
prev_progress_latent_opt=None, scheduler_func_opt=None):
if omit_start_latent:
result = []
else:
result = [latent_image['samples']]
def progress_callback(step, x0, x, total_steps):
if (total_steps-1) != step and step % interval != 0:
return
x = model.model.process_latent_out(x)
x = x.cpu()
result.append(x)
latent_image, noise = a1111_compat.KSamplerAdvanced_inspire.sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step,
noise_mode, return_with_leftover_noise, callback=progress_callback, scheduler_func_opt=scheduler_func_opt)
if not omit_final_latent:
result.append(latent_image['samples'].cpu())
if len(result) > 0:
result = torch.cat(result)
result = {'samples': result}
else:
result = latent_image
if prev_progress_latent_opt is not None:
result['samples'] = torch.cat((prev_progress_latent_opt['samples'], result['samples']), dim=0)
return latent_image, result
def exponential_interpolation(from_cfg, to_cfg, i, steps):
if i == steps-1:
return to_cfg
if from_cfg == to_cfg:
return from_cfg
if from_cfg == 0:
return to_cfg * (1 - math.exp(-5 * i / steps)) / (1 - math.exp(-5))
elif to_cfg == 0:
return from_cfg * (math.exp(-5 * i / steps) - math.exp(-5)) / (1 - math.exp(-5))
else:
log_from = math.log(from_cfg)
log_to = math.log(to_cfg)
log_value = log_from + (log_to - log_from) * i / steps
return math.exp(log_value)
def logarithmic_interpolation(from_cfg, to_cfg, i, steps):
if i == 0:
return from_cfg
if i == steps-1:
return to_cfg
log_i = math.log(i + 1)
log_steps = math.log(steps + 1)
t = log_i / log_steps
return from_cfg + (to_cfg - from_cfg) * t
def cosine_interpolation(from_cfg, to_cfg, i, steps):
if (i == 0) or (i == steps-1):
return from_cfg
t = (1.0 + math.cos(math.pi*2*(i/steps))) / 2
return from_cfg + (to_cfg - from_cfg) * t
class Guider_scheduled(CFGGuider):
def __init__(self, model_patcher, sigmas, from_cfg, to_cfg, schedule):
super().__init__(model_patcher)
self.default_cfg = self.cfg
self.sigmas = sigmas
self.cfg_sigmas = None
self.cfg_sigmas_i = None
self.from_cfg = from_cfg
self.to_cfg = to_cfg
self.schedule = schedule
self.last_i = 0
self.renew_cfg_sigmas()
def set_cfg(self, cfg):
self.default_cfg = cfg
self.renew_cfg_sigmas()
def renew_cfg_sigmas(self):
self.cfg_sigmas = {}
self.cfg_sigmas_i = {}
i = 0
steps = len(self.sigmas) - 1
for x in self.sigmas:
k = float(x)
delta = self.to_cfg - self.from_cfg
if self.schedule == 'exp':
self.cfg_sigmas[k] = exponential_interpolation(self.from_cfg, self.to_cfg, i, steps), i
elif self.schedule == 'log':
self.cfg_sigmas[k] = logarithmic_interpolation(self.from_cfg, self.to_cfg, i, steps), i
elif self.schedule == 'cos':
self.cfg_sigmas[k] = cosine_interpolation(self.from_cfg, self.to_cfg, i, steps), i
else:
self.cfg_sigmas[k] = self.from_cfg + delta * i / steps, i
self.cfg_sigmas_i[i] = self.cfg_sigmas[k]
i += 1
def predict_noise(self, x, timestep, model_options={}, seed=None):
k = float(timestep[0])
v = self.cfg_sigmas.get(k)
if v is None:
# fallback
v = self.cfg_sigmas_i[self.last_i+1]
self.cfg_sigmas[k] = v
self.last_i = v[1]
self.cfg = v[0]
return super().predict_noise(x, timestep, model_options, seed)
class Guider_PerpNeg_scheduled(Guider_PerpNeg):
def __init__(self, model_patcher, sigmas, from_cfg, to_cfg, schedule, neg_scale):
super().__init__(model_patcher)
self.default_cfg = self.cfg
self.sigmas = sigmas
self.cfg_sigmas = None
self.cfg_sigmas_i = None
self.from_cfg = from_cfg
self.to_cfg = to_cfg
self.schedule = schedule
self.neg_scale = neg_scale
self.last_i = 0
self.renew_cfg_sigmas()
def set_cfg(self, cfg):
self.default_cfg = cfg
self.renew_cfg_sigmas()
def renew_cfg_sigmas(self):
self.cfg_sigmas = {}
self.cfg_sigmas_i = {}
i = 0
steps = len(self.sigmas) - 1
for x in self.sigmas:
k = float(x)
delta = self.to_cfg - self.from_cfg
if self.schedule == 'exp':
self.cfg_sigmas[k] = exponential_interpolation(self.from_cfg, self.to_cfg, i, steps), i
elif self.schedule == 'log':
self.cfg_sigmas[k] = logarithmic_interpolation(self.from_cfg, self.to_cfg, i, steps), i
elif self.schedule == 'cos':
self.cfg_sigmas[k] = cosine_interpolation(self.from_cfg, self.to_cfg, i, steps), i
else:
self.cfg_sigmas[k] = self.from_cfg + delta * i / steps, i
self.cfg_sigmas_i[i] = self.cfg_sigmas[k]
i += 1
def predict_noise(self, x, timestep, model_options={}, seed=None):
k = float(timestep[0])
v = self.cfg_sigmas.get(k)
if v is None:
# fallback
v = self.cfg_sigmas_i[self.last_i+1]
self.cfg_sigmas[k] = v
self.last_i = v[1]
self.cfg = v[0]
return super().predict_noise(x, timestep, model_options, seed)
class ScheduledCFGGuider:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL", ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"sigmas": ("SIGMAS", ),
"from_cfg": ("FLOAT", {"default": 6.5, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
"to_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
"schedule": (["linear", "log", "exp", "cos"], {'default': 'log'})
}
}
RETURN_TYPES = ("GUIDER", "SIGMAS")
FUNCTION = "get_guider"
CATEGORY = "sampling/custom_sampling/guiders"
def get_guider(self, model, positive, negative, sigmas, from_cfg, to_cfg, schedule):
guider = Guider_scheduled(model, sigmas, from_cfg, to_cfg, schedule)
guider.set_conds(positive, negative)
return guider, sigmas
class ScheduledPerpNegCFGGuider:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model": ("MODEL", ),
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"empty_conditioning": ("CONDITIONING", ),
"neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
"sigmas": ("SIGMAS", ),
"from_cfg": ("FLOAT", {"default": 6.5, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
"to_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
"schedule": (["linear", "log", "exp", "cos"], {'default': 'log'})
}
}
RETURN_TYPES = ("GUIDER", "SIGMAS")
FUNCTION = "get_guider"
CATEGORY = "sampling/custom_sampling/guiders"
def get_guider(self, model, positive, negative, empty_conditioning, neg_scale, sigmas, from_cfg, to_cfg, schedule):
guider = Guider_PerpNeg_scheduled(model, sigmas, from_cfg, to_cfg, schedule, neg_scale)
guider.set_conds(positive, negative, empty_conditioning)
return guider, sigmas
NODE_CLASS_MAPPINGS = {
"KSamplerProgress //Inspire": KSampler_progress,
"KSamplerAdvancedProgress //Inspire": KSamplerAdvanced_progress,
"ScheduledCFGGuider //Inspire": ScheduledCFGGuider,
"ScheduledPerpNegCFGGuider //Inspire": ScheduledPerpNegCFGGuider
}
NODE_DISPLAY_NAME_MAPPINGS = {
"KSamplerProgress //Inspire": "KSampler Progress (Inspire)",
"KSamplerAdvancedProgress //Inspire": "KSampler Advanced Progress (Inspire)",
"ScheduledCFGGuider //Inspire": "Scheduled CFGGuider (Inspire)",
"ScheduledPerpNegCFGGuider //Inspire": "Scheduled PerpNeg CFGGuider (Inspire)"
}

View File

@@ -0,0 +1,648 @@
import nodes
import numpy as np
import torch
from .libs import utils
import logging
def normalize_size_base_64(w, h):
short_side = min(w, h)
remainder = short_side % 64
return short_side - remainder + (64 if remainder > 0 else 0)
class MediaPipeFaceMeshDetector:
def __init__(self, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm):
self.face = face
self.mouth = mouth
self.left_eyebrow = left_eyebrow
self.left_eye = left_eye
self.left_pupil = left_pupil
self.right_eyebrow = right_eyebrow
self.right_eye = right_eye
self.right_pupil = right_pupil
self.is_segm = is_segm
self.max_faces = max_faces
self.override_bbox_by_segm = True
def detect(self, image, threshold, dilation, crop_factor, drop_size=1, crop_min_size=None, detailer_hook=None):
if 'MediaPipe-FaceMeshPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'MediaPipeFaceMeshDetector' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
if 'MediaPipeFaceMeshToSEGS' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/ltdrdata/ComfyUI-Impact-Pack',
"To use 'MediaPipeFaceMeshDetector' node, 'Impact Pack' extension is required.")
raise Exception("[ERROR] To use MediaPipeFaceMeshDetector, you need to install 'ComfyUI-Impact-Pack'")
pre_obj = nodes.NODE_CLASS_MAPPINGS['MediaPipe-FaceMeshPreprocessor']
seg_obj = nodes.NODE_CLASS_MAPPINGS['MediaPipeFaceMeshToSEGS']
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
facemesh_image = pre_obj().detect(image, self.max_faces, threshold, resolution=resolution)[0]
facemesh_image = nodes.ImageScale().upscale(facemesh_image, "bilinear", image.shape[2], image.shape[1], "disabled")[0]
segs = seg_obj().doit(facemesh_image, crop_factor, not self.is_segm, crop_min_size, drop_size, dilation,
self.face, self.mouth, self.left_eyebrow, self.left_eye, self.left_pupil,
self.right_eyebrow, self.right_eye, self.right_pupil)[0]
return segs
def setAux(self, x):
pass
class MediaPipe_FaceMesh_Preprocessor_wrapper:
def __init__(self, max_faces, min_confidence, upscale_factor=1.0):
self.max_faces = max_faces
self.min_confidence = min_confidence
self.upscale_factor = upscale_factor
def apply(self, image, mask=None):
if 'MediaPipe-FaceMeshPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
if self.upscale_factor != 1.0:
image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0]
obj = nodes.NODE_CLASS_MAPPINGS['MediaPipe-FaceMeshPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.detect(image, self.max_faces, self.min_confidence, resolution=resolution)[0]
class AnimeLineArt_Preprocessor_wrapper:
def apply(self, image, mask=None):
if 'AnimeLineArtPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'AnimeLineArt_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use AnimeLineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['AnimeLineArtPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution)[0]
class Manga2Anime_LineArt_Preprocessor_wrapper:
def apply(self, image, mask=None):
if 'Manga2Anime_LineArt_Preprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'Manga2Anime_LineArt_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use Manga2Anime_LineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['Manga2Anime_LineArt_Preprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution)[0]
class Color_Preprocessor_wrapper:
def apply(self, image, mask=None):
if 'ColorPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'Color_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use Color_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['ColorPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution)[0]
class InpaintPreprocessor_wrapper:
def __init__(self, black_pixel_for_xinsir_cn):
self.black_pixel_for_xinsir_cn = black_pixel_for_xinsir_cn
def apply(self, image, mask=None):
if 'InpaintPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'InpaintPreprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use InpaintPreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['InpaintPreprocessor']()
if mask is None:
mask = torch.ones((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu").unsqueeze(0)
try:
res = obj.preprocess(image, mask, black_pixel_for_xinsir_cn=self.black_pixel_for_xinsir_cn)[0]
except Exception as e:
if self.black_pixel_for_xinsir_cn:
raise e
else:
res = obj.preprocess(image, mask)[0]
logging.warning("[Inspire Pack] Installed 'ComfyUI's ControlNet Auxiliary Preprocessors.' is outdated.")
return res
class TilePreprocessor_wrapper:
def __init__(self, pyrUp_iters):
self.pyrUp_iters = pyrUp_iters
def apply(self, image, mask=None):
if 'TilePreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'TilePreprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use TilePreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['TilePreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, self.pyrUp_iters, resolution=resolution)[0]
class MeshGraphormerDepthMapPreprocessorProvider_wrapper:
def apply(self, image, mask=None):
if 'MeshGraphormer-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'MeshGraphormerDepthMapPreprocessorProvider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use MeshGraphormerDepthMapPreprocessorProvider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['MeshGraphormer-DepthMapPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution)[0]
class LineArt_Preprocessor_wrapper:
def __init__(self, coarse):
self.coarse = coarse
def apply(self, image, mask=None):
if 'LineArtPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'LineArt_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use LineArt_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
coarse = 'enable' if self.coarse else 'disable'
obj = nodes.NODE_CLASS_MAPPINGS['LineArtPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution, coarse=coarse)[0]
class OpenPose_Preprocessor_wrapper:
def __init__(self, detect_hand, detect_body, detect_face, upscale_factor=1.0):
self.detect_hand = detect_hand
self.detect_body = detect_body
self.detect_face = detect_face
self.upscale_factor = upscale_factor
def apply(self, image, mask=None):
if 'OpenposePreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'OpenPose_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use OpenPose_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
detect_hand = 'enable' if self.detect_hand else 'disable'
detect_body = 'enable' if self.detect_body else 'disable'
detect_face = 'enable' if self.detect_face else 'disable'
if self.upscale_factor != 1.0:
image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0]
obj = nodes.NODE_CLASS_MAPPINGS['OpenposePreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.estimate_pose(image, detect_hand, detect_body, detect_face, resolution=resolution)['result'][0]
class DWPreprocessor_wrapper:
def __init__(self, detect_hand, detect_body, detect_face, upscale_factor=1.0, bbox_detector="yolox_l.onnx", pose_estimator="dw-ll_ucoco_384.onnx"):
self.detect_hand = detect_hand
self.detect_body = detect_body
self.detect_face = detect_face
self.upscale_factor = upscale_factor
self.bbox_detector = bbox_detector
self.pose_estimator = pose_estimator
def apply(self, image, mask=None):
if 'DWPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'DWPreprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use DWPreprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
detect_hand = 'enable' if self.detect_hand else 'disable'
detect_body = 'enable' if self.detect_body else 'disable'
detect_face = 'enable' if self.detect_face else 'disable'
if self.upscale_factor != 1.0:
image = nodes.ImageScaleBy().upscale(image, 'bilinear', self.upscale_factor)[0]
obj = nodes.NODE_CLASS_MAPPINGS['DWPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.estimate_pose(image, detect_hand, detect_body, detect_face, resolution=resolution, bbox_detector=self.bbox_detector, pose_estimator=self.pose_estimator)['result'][0]
class LeReS_DepthMap_Preprocessor_wrapper:
def __init__(self, rm_nearest, rm_background, boost):
self.rm_nearest = rm_nearest
self.rm_background = rm_background
self.boost = boost
def apply(self, image, mask=None):
if 'LeReS-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'LeReS_DepthMap_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use LeReS_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
boost = 'enable' if self.boost else 'disable'
obj = nodes.NODE_CLASS_MAPPINGS['LeReS-DepthMapPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, self.rm_nearest, self.rm_background, boost=boost, resolution=resolution)[0]
class MiDaS_DepthMap_Preprocessor_wrapper:
def __init__(self, a, bg_threshold):
self.a = a
self.bg_threshold = bg_threshold
def apply(self, image, mask=None):
if 'MiDaS-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'MiDaS_DepthMap_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use MiDaS_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['MiDaS-DepthMapPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, self.a, self.bg_threshold, resolution=resolution)[0]
class Zoe_DepthMap_Preprocessor_wrapper:
def apply(self, image, mask=None):
if 'Zoe-DepthMapPreprocessor' not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
"To use 'Zoe_DepthMap_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception("[ERROR] To use Zoe_DepthMap_Preprocessor_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS['Zoe-DepthMapPreprocessor']()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution)[0]
class HED_Preprocessor_wrapper:
def __init__(self, safe, nodename):
self.safe = safe
self.nodename = nodename
def apply(self, image, mask=None):
if self.nodename not in nodes.NODE_CLASS_MAPPINGS:
utils.try_install_custom_node('https://github.com/Fannovel16/comfyui_controlnet_aux',
f"To use '{self.nodename}_Preprocessor_Provider' node, 'ComfyUI's ControlNet Auxiliary Preprocessors.' extension is required.")
raise Exception(f"[ERROR] To use {self.nodename}_Provider, you need to install 'ComfyUI's ControlNet Auxiliary Preprocessors.'")
obj = nodes.NODE_CLASS_MAPPINGS[self.nodename]()
resolution = normalize_size_base_64(image.shape[2], image.shape[1])
return obj.execute(image, resolution=resolution, safe="enable" if self.safe else "disable")[0]
class Canny_Preprocessor_wrapper:
def __init__(self, low_threshold, high_threshold):
self.low_threshold = low_threshold
self.high_threshold = high_threshold
def apply(self, image, mask=None):
obj = nodes.NODE_CLASS_MAPPINGS['Canny']()
if hasattr(obj, 'execute'):
# node v3
return obj.execute(image, self.low_threshold, self.high_threshold)[0]
else:
# legacy compatibility
return obj.detect_edge(image, self.low_threshold, self.high_threshold)[0]
class OpenPose_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"detect_hand": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"detect_body": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"detect_face": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}),
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, detect_hand, detect_body, detect_face, resolution_upscale_by):
obj = OpenPose_Preprocessor_wrapper(detect_hand, detect_body, detect_face, upscale_factor=resolution_upscale_by)
return (obj, )
class DWPreprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"detect_hand": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"detect_body": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"detect_face": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"}),
"resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}),
"bbox_detector": (
["yolox_l.torchscript.pt", "yolox_l.onnx", "yolo_nas_l_fp16.onnx", "yolo_nas_m_fp16.onnx", "yolo_nas_s_fp16.onnx"],
{"default": "yolox_l.onnx"}
),
"pose_estimator": (["dw-ll_ucoco_384_bs5.torchscript.pt", "dw-ll_ucoco_384.onnx", "dw-ll_ucoco.onnx"], {"default": "dw-ll_ucoco_384_bs5.torchscript.pt"})
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, detect_hand, detect_body, detect_face, resolution_upscale_by, bbox_detector, pose_estimator):
obj = DWPreprocessor_wrapper(detect_hand, detect_body, detect_face, upscale_factor=resolution_upscale_by, bbox_detector=bbox_detector, pose_estimator=pose_estimator)
return (obj, )
class LeReS_DepthMap_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"rm_nearest": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}),
"rm_background": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1})
},
"optional": {
"boost": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, rm_nearest, rm_background, boost=False):
obj = LeReS_DepthMap_Preprocessor_wrapper(rm_nearest, rm_background, boost)
return (obj, )
class MiDaS_DepthMap_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"a": ("FLOAT", {"default": np.pi * 2.0, "min": 0.0, "max": np.pi * 5.0, "step": 0.05}),
"bg_threshold": ("FLOAT", {"default": 0.1, "min": 0, "max": 1, "step": 0.05})
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, a, bg_threshold):
obj = MiDaS_DepthMap_Preprocessor_wrapper(a, bg_threshold)
return (obj, )
class Zoe_DepthMap_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return { "required": {} }
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self):
obj = Zoe_DepthMap_Preprocessor_wrapper()
return (obj, )
class Canny_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}),
"high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01})
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, low_threshold, high_threshold):
obj = Canny_Preprocessor_wrapper(low_threshold, high_threshold)
return (obj, )
class HEDPreprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"safe": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"})
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, safe):
obj = HED_Preprocessor_wrapper(safe, "HEDPreprocessor")
return (obj, )
class FakeScribblePreprocessor_Provider_for_SEGS(HEDPreprocessor_Provider_for_SEGS):
def doit(self, safe):
obj = HED_Preprocessor_wrapper(safe, "FakeScribblePreprocessor")
return (obj, )
class MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"max_faces": ("INT", {"default": 10, "min": 1, "max": 50, "step": 1}),
"min_confidence": ("FLOAT", {"default": 0.5, "min": 0.01, "max": 1.0, "step": 0.01}),
"resolution_upscale_by": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}),
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, max_faces, min_confidence, resolution_upscale_by):
obj = MediaPipe_FaceMesh_Preprocessor_wrapper(max_faces, min_confidence, upscale_factor=resolution_upscale_by)
return (obj, )
class MediaPipeFaceMeshDetectorProvider:
@classmethod
def INPUT_TYPES(s):
bool_true_widget = ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable"})
bool_false_widget = ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"})
return {"required": {
"max_faces": ("INT", {"default": 10, "min": 1, "max": 50, "step": 1}),
"face": bool_true_widget,
"mouth": bool_false_widget,
"left_eyebrow": bool_false_widget,
"left_eye": bool_false_widget,
"left_pupil": bool_false_widget,
"right_eyebrow": bool_false_widget,
"right_eye": bool_false_widget,
"right_pupil": bool_false_widget,
}}
RETURN_TYPES = ("BBOX_DETECTOR", "SEGM_DETECTOR")
FUNCTION = "doit"
CATEGORY = "InspirePack/Detector"
def doit(self, max_faces, face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil):
bbox_detector = MediaPipeFaceMeshDetector(face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm=False)
segm_detector = MediaPipeFaceMeshDetector(face, mouth, left_eyebrow, left_eye, left_pupil, right_eyebrow, right_eye, right_pupil, max_faces, is_segm=True)
return (bbox_detector, segm_detector)
class AnimeLineArt_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self):
obj = AnimeLineArt_Preprocessor_wrapper()
return (obj, )
class Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self):
obj = Manga2Anime_LineArt_Preprocessor_wrapper()
return (obj, )
class LineArt_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"coarse": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, coarse):
obj = LineArt_Preprocessor_wrapper(coarse)
return (obj, )
class Color_Preprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self):
obj = Color_Preprocessor_wrapper()
return (obj, )
class InpaintPreprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {
"required": {},
"optional": {
"black_pixel_for_xinsir_cn": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}),
}
}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, black_pixel_for_xinsir_cn=False):
obj = InpaintPreprocessor_wrapper(black_pixel_for_xinsir_cn)
return (obj, )
class TilePreprocessor_Provider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {'pyrUp_iters': ("INT", {"default": 3, "min": 1, "max": 10, "step": 1})}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self, pyrUp_iters):
obj = TilePreprocessor_wrapper(pyrUp_iters)
return (obj, )
class MeshGraphormerDepthMapPreprocessorProvider_for_SEGS:
@classmethod
def INPUT_TYPES(s):
return {"required": {}}
RETURN_TYPES = ("SEGS_PREPROCESSOR",)
FUNCTION = "doit"
CATEGORY = "InspirePack/SEGS/ControlNet"
def doit(self):
obj = MeshGraphormerDepthMapPreprocessorProvider_wrapper()
return (obj, )
NODE_CLASS_MAPPINGS = {
"OpenPose_Preprocessor_Provider_for_SEGS //Inspire": OpenPose_Preprocessor_Provider_for_SEGS,
"DWPreprocessor_Provider_for_SEGS //Inspire": DWPreprocessor_Provider_for_SEGS,
"MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": MiDaS_DepthMap_Preprocessor_Provider_for_SEGS,
"LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": LeReS_DepthMap_Preprocessor_Provider_for_SEGS,
# "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": Zoe_DepthMap_Preprocessor_Provider_for_SEGS,
"Canny_Preprocessor_Provider_for_SEGS //Inspire": Canny_Preprocessor_Provider_for_SEGS,
"MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire": MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS,
"HEDPreprocessor_Provider_for_SEGS //Inspire": HEDPreprocessor_Provider_for_SEGS,
"FakeScribblePreprocessor_Provider_for_SEGS //Inspire": FakeScribblePreprocessor_Provider_for_SEGS,
"AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire": AnimeLineArt_Preprocessor_Provider_for_SEGS,
"Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire": Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS,
"LineArt_Preprocessor_Provider_for_SEGS //Inspire": LineArt_Preprocessor_Provider_for_SEGS,
"Color_Preprocessor_Provider_for_SEGS //Inspire": Color_Preprocessor_Provider_for_SEGS,
"InpaintPreprocessor_Provider_for_SEGS //Inspire": InpaintPreprocessor_Provider_for_SEGS,
"TilePreprocessor_Provider_for_SEGS //Inspire": TilePreprocessor_Provider_for_SEGS,
"MeshGraphormerDepthMapPreprocessorProvider_for_SEGS //Inspire": MeshGraphormerDepthMapPreprocessorProvider_for_SEGS,
"MediaPipeFaceMeshDetectorProvider //Inspire": MediaPipeFaceMeshDetectorProvider,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"OpenPose_Preprocessor_Provider_for_SEGS //Inspire": "OpenPose Preprocessor Provider (SEGS)",
"DWPreprocessor_Provider_for_SEGS //Inspire": "DWPreprocessor Provider (SEGS)",
"MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "MiDaS Depth Map Preprocessor Provider (SEGS)",
"LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "LeReS Depth Map Preprocessor Provider (SEGS)",
# "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire": "Zoe Depth Map Preprocessor Provider (SEGS)",
"Canny_Preprocessor_Provider_for_SEGS //Inspire": "Canny Preprocessor Provider (SEGS)",
"MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire": "MediaPipe FaceMesh Preprocessor Provider (SEGS)",
"HEDPreprocessor_Provider_for_SEGS //Inspire": "HED Preprocessor Provider (SEGS)",
"FakeScribblePreprocessor_Provider_for_SEGS //Inspire": "Fake Scribble Preprocessor Provider (SEGS)",
"AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire": "AnimeLineArt Preprocessor Provider (SEGS)",
"Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire": "Manga2Anime LineArt Preprocessor Provider (SEGS)",
"LineArt_Preprocessor_Provider_for_SEGS //Inspire": "LineArt Preprocessor Provider (SEGS)",
"Color_Preprocessor_Provider_for_SEGS //Inspire": "Color Preprocessor Provider (SEGS)",
"InpaintPreprocessor_Provider_for_SEGS //Inspire": "Inpaint Preprocessor Provider (SEGS)",
"TilePreprocessor_Provider_for_SEGS //Inspire": "Tile Preprocessor Provider (SEGS)",
"MeshGraphormerDepthMapPreprocessorProvider_for_SEGS //Inspire": "MeshGraphormer Depth Map Preprocessor Provider (SEGS)",
"MediaPipeFaceMeshDetectorProvider //Inspire": "MediaPipeFaceMesh Detector Provider",
}

View File

@@ -0,0 +1,41 @@
import colorsys
def hex_to_hsv(hex_color):
hex_color = hex_color.lstrip('#')
r, g, b = tuple(int(hex_color[i:i+2], 16) / 255.0 for i in (0, 2, 4))
h, s, v = colorsys.rgb_to_hsv(r, g, b)
hue = h * 360
saturation = s
value = v
return hue, saturation, value
class RGB_HexToHSV:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"rgb_hex": ("STRING", {"defaultInput": True}),
},
}
RETURN_TYPES = ("FLOAT", "FLOAT", "FLOAT")
RETURN_NAMES = ("hue", "saturation", "value")
FUNCTION = "doit"
CATEGORY = "InspirePack/Util"
def doit(self, rgb_hex):
return hex_to_hsv(rgb_hex)
NODE_CLASS_MAPPINGS = {
"RGB_HexToHSV //Inspire": RGB_HexToHSV,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"RGB_HexToHSV //Inspire": "RGB Hex To HSV (Inspire)",
}