Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,12 @@
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
|
||||
def ensure_onnx_package():
|
||||
try:
|
||||
import onnxruntime # noqa: F401
|
||||
except Exception:
|
||||
if "python_embeded" in sys.executable or "python_embedded" in sys.executable:
|
||||
subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime'])
|
||||
else:
|
||||
subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'install', 'onnxruntime'])
|
||||
@@ -0,0 +1,200 @@
|
||||
from nodes import MAX_RESOLUTION
|
||||
import impact.core as core
|
||||
from impact.core import SEG
|
||||
from impact.segs_nodes import SEGSPaste
|
||||
import comfy
|
||||
from impact import utils
|
||||
import torch
|
||||
import nodes
|
||||
import logging
|
||||
|
||||
try:
|
||||
from comfy_extras import nodes_differential_diffusion
|
||||
except Exception:
|
||||
logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n")
|
||||
raise Exception("[Impact Pack] ComfyUI is an outdated version.")
|
||||
|
||||
|
||||
class SEGSDetailerForAnimateDiff:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"image_frames": ("IMAGE", ),
|
||||
"segs": ("SEGS", ),
|
||||
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}),
|
||||
"max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (core.get_schedulers(),),
|
||||
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}),
|
||||
"basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}),
|
||||
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}),
|
||||
},
|
||||
"optional": {
|
||||
"refiner_basic_pipe_opt": ("BASIC_PIPE",),
|
||||
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}),
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS", "IMAGE")
|
||||
RETURN_NAMES = ("segs", "cnet_images")
|
||||
OUTPUT_IS_LIST = (False, True)
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detailer"
|
||||
|
||||
DESCRIPTION = "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is applied specifically to SEGS rather than the entire image. To apply it to the entire image, use the 'SEGS Paste' node.\nAs a specialized detailer node for improving video details, such as in AnimateDiff, this node can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames."
|
||||
|
||||
@staticmethod
|
||||
def do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler,
|
||||
denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, noise_mask_feather=0, scheduler_func_opt=None):
|
||||
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
if refiner_basic_pipe_opt is None:
|
||||
refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None
|
||||
else:
|
||||
refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt
|
||||
|
||||
segs = core.segs_scale_match(segs, image_frames.shape)
|
||||
|
||||
new_segs = []
|
||||
cnet_image_list = []
|
||||
|
||||
if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options:
|
||||
model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0]
|
||||
|
||||
for seg in segs[1]:
|
||||
cropped_image_frames = None
|
||||
|
||||
for image in image_frames:
|
||||
image = image.unsqueeze(0)
|
||||
cropped_image = seg.cropped_image if seg.cropped_image is not None else utils.crop_tensor4(image, seg.crop_region)
|
||||
cropped_image = utils.to_tensor(cropped_image)
|
||||
if cropped_image_frames is None:
|
||||
cropped_image_frames = cropped_image
|
||||
else:
|
||||
cropped_image_frames = torch.concat((cropped_image_frames, cropped_image), dim=0)
|
||||
|
||||
cropped_image_frames = cropped_image_frames.cpu().numpy()
|
||||
|
||||
# It is assumed that AnimateDiff does not support conditioning masks based on test results, but it will be added for future consideration.
|
||||
cropped_positive = [
|
||||
[condition, {
|
||||
k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v
|
||||
for k, v in details.items()
|
||||
}]
|
||||
for condition, details in positive
|
||||
]
|
||||
|
||||
cropped_negative = [
|
||||
[condition, {
|
||||
k: core.crop_condition_mask(v, cropped_image_frames, seg.crop_region) if k == "mask" else v
|
||||
for k, v in details.items()
|
||||
}]
|
||||
for condition, details in negative
|
||||
]
|
||||
|
||||
if not (isinstance(model, str) and model == "DUMMY"):
|
||||
enhanced_image_tensor, cnet_images = core.enhance_detail_for_animatediff(cropped_image_frames, model, clip, vae, guide_size, guide_size_for, max_size,
|
||||
seg.bbox, seed, steps, cfg, sampler_name, scheduler,
|
||||
cropped_positive, cropped_negative, denoise, seg.cropped_mask,
|
||||
refiner_ratio=refiner_ratio, refiner_model=refiner_model,
|
||||
refiner_clip=refiner_clip, refiner_positive=refiner_positive,
|
||||
refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper,
|
||||
noise_mask_feather=noise_mask_feather, scheduler_func=scheduler_func_opt)
|
||||
else:
|
||||
enhanced_image_tensor = cropped_image_frames
|
||||
cnet_images = None
|
||||
|
||||
if cnet_images is not None:
|
||||
cnet_image_list.extend(cnet_images)
|
||||
|
||||
if enhanced_image_tensor is None:
|
||||
new_cropped_image = cropped_image_frames
|
||||
else:
|
||||
new_cropped_image = enhanced_image_tensor.cpu().numpy()
|
||||
|
||||
new_seg = SEG(new_cropped_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, None)
|
||||
new_segs.append(new_seg)
|
||||
|
||||
return (segs[0], new_segs), cnet_image_list
|
||||
|
||||
def doit(self, image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler,
|
||||
denoise, basic_pipe, refiner_ratio=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None):
|
||||
|
||||
segs, cnet_images = SEGSDetailerForAnimateDiff.do_detail(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name,
|
||||
scheduler, denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt,
|
||||
noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt)
|
||||
|
||||
if len(cnet_images) == 0:
|
||||
cnet_images = [utils.empty_pil_tensor()]
|
||||
|
||||
return (segs, cnet_images)
|
||||
|
||||
|
||||
class DetailerForEachPipeForAnimateDiff:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"image_frames": ("IMAGE", ),
|
||||
"segs": ("SEGS", ),
|
||||
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}),
|
||||
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}),
|
||||
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (core.get_schedulers(),),
|
||||
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}),
|
||||
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}),
|
||||
"basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}),
|
||||
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}),
|
||||
},
|
||||
"optional": {
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
"refiner_basic_pipe_opt": ("BASIC_PIPE",),
|
||||
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}),
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE")
|
||||
RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images")
|
||||
OUTPUT_IS_LIST = (False, False, False, True)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detailer"
|
||||
|
||||
DESCRIPTION = "This node enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size.\nThis node is a specialized detailer node for enhancing video details, such as in AnimateDiff. It can handle cases where the masks contained in SEGS serve as batch masks spanning multiple frames."
|
||||
|
||||
@staticmethod
|
||||
def doit(image_frames, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler,
|
||||
denoise, feather, basic_pipe, refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None,
|
||||
noise_mask_feather=0, scheduler_func_opt=None):
|
||||
|
||||
enhanced_segs = []
|
||||
cnet_image_list = []
|
||||
|
||||
for sub_seg in segs[1]:
|
||||
single_seg = segs[0], [sub_seg]
|
||||
enhanced_seg, cnet_images = SEGSDetailerForAnimateDiff().do_detail(image_frames, single_seg, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler,
|
||||
denoise, basic_pipe, refiner_ratio, refiner_basic_pipe_opt, noise_mask_feather, scheduler_func_opt=scheduler_func_opt)
|
||||
|
||||
image_frames = SEGSPaste.doit(image_frames, enhanced_seg, feather, alpha=255)[0]
|
||||
|
||||
if cnet_images is not None:
|
||||
cnet_image_list.extend(cnet_images)
|
||||
|
||||
if detailer_hook is not None:
|
||||
image_frames = detailer_hook.post_paste(image_frames)
|
||||
|
||||
enhanced_segs += enhanced_seg[1]
|
||||
|
||||
new_segs = segs[0], enhanced_segs
|
||||
return image_frames, new_segs, basic_pipe, cnet_image_list
|
||||
490
custom_nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py
Normal file
490
custom_nodes/ComfyUI-Impact-Pack/modules/impact/bridge_nodes.py
Normal file
@@ -0,0 +1,490 @@
|
||||
import os
|
||||
from PIL import ImageOps
|
||||
import logging
|
||||
import folder_paths
|
||||
import torch
|
||||
import nodes
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from impact import utils
|
||||
|
||||
# NOTE: this should not be `from . import core`.
|
||||
# I don't know why but... 'from .' and 'from impact' refer to different core modules.
|
||||
# This separates global variables of the core module and breaks the preview bridge.
|
||||
from impact import core
|
||||
# <--
|
||||
import random
|
||||
|
||||
|
||||
class PreviewBridge:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"images": ("IMAGE",),
|
||||
"image": ("STRING", {"default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"block": ("BOOLEAN", {"default": False, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped."}),
|
||||
"restore_mask": (["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input image is the same size as the previous image, restore using the last saved mask\nalways: Whenever the input image changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`"}),
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "MASK", )
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
DESCRIPTION = "This is a feature that allows you to edit and send a Mask over a image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty."
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.output_dir = folder_paths.get_temp_directory()
|
||||
self.type = "temp"
|
||||
self.prev_hash = None
|
||||
|
||||
@staticmethod
|
||||
def load_image(pb_id):
|
||||
is_fail = False
|
||||
if pb_id not in core.preview_bridge_image_id_map:
|
||||
is_fail = True
|
||||
|
||||
if not is_fail:
|
||||
image_path, ui_item = core.preview_bridge_image_id_map[pb_id]
|
||||
if not os.path.isfile(image_path):
|
||||
is_fail = True
|
||||
|
||||
if not is_fail:
|
||||
i = Image.open(image_path)
|
||||
i = ImageOps.exif_transpose(i)
|
||||
image = i.convert("RGB")
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
|
||||
if 'A' in i.getbands():
|
||||
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
||||
mask = 1. - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
else:
|
||||
image = utils.empty_pil_tensor()
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
ui_item = {
|
||||
"filename": 'empty.png',
|
||||
"subfolder": '',
|
||||
"type": 'temp'
|
||||
}
|
||||
|
||||
return image, mask.unsqueeze(0), ui_item
|
||||
|
||||
@staticmethod
|
||||
def register_clipspace_image(clipspace_path, node_id):
|
||||
"""Register a clipspace image file in the preview bridge system.
|
||||
|
||||
This handles the case where ComfyUI's mask editor creates clipspace files
|
||||
that need to be integrated with the preview bridge system.
|
||||
"""
|
||||
# Remove [input] suffix if present
|
||||
clean_path = clipspace_path.replace(" [input]", "").replace("[input]", "")
|
||||
|
||||
# Try to find the actual clipspace file
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
potential_paths = [
|
||||
clean_path,
|
||||
os.path.join(input_dir, clean_path),
|
||||
os.path.join(input_dir, "clipspace", os.path.basename(clean_path)),
|
||||
os.path.abspath(clean_path),
|
||||
]
|
||||
|
||||
actual_file = None
|
||||
for path in potential_paths:
|
||||
if os.path.isfile(path):
|
||||
actual_file = path
|
||||
break
|
||||
|
||||
if not actual_file:
|
||||
return False
|
||||
|
||||
# Create ui_item for the clipspace file
|
||||
ui_item = {
|
||||
'filename': os.path.basename(actual_file),
|
||||
'subfolder': 'clipspace',
|
||||
'type': 'input'
|
||||
}
|
||||
|
||||
# Register it using the preview bridge system
|
||||
core.set_previewbridge_image(node_id, actual_file, ui_item)
|
||||
# Also register under the original clipspace path for compatibility
|
||||
core.preview_bridge_image_id_map[clipspace_path] = (actual_file, ui_item)
|
||||
|
||||
return True
|
||||
|
||||
def doit(self, images, image, unique_id, block=False, restore_mask="never", prompt=None, extra_pnginfo=None):
|
||||
need_refresh = False
|
||||
images_changed = False
|
||||
|
||||
# Check if images have changed (this determines if we start fresh)
|
||||
if unique_id not in core.preview_bridge_cache:
|
||||
need_refresh = True
|
||||
images_changed = True
|
||||
elif core.preview_bridge_cache[unique_id][0] is not images:
|
||||
need_refresh = True
|
||||
images_changed = True
|
||||
|
||||
# If images changed, clear the mask cache to ensure fresh start behavior
|
||||
# This restores the original behavior where new images start with empty masks
|
||||
# unless restore_mask is set to "always" or "if_same_size"
|
||||
if images_changed and restore_mask not in ["always", "if_same_size"] and unique_id in core.preview_bridge_last_mask_cache:
|
||||
del core.preview_bridge_last_mask_cache[unique_id]
|
||||
|
||||
# Handle clipspace files that aren't registered in the preview bridge system
|
||||
# This only applies when images haven't changed (same image, new mask scenario)
|
||||
if not need_refresh and image not in core.preview_bridge_image_id_map:
|
||||
# Check if this is a clipspace file that needs to be registered
|
||||
is_clipspace = image and ("clipspace" in image.lower() or "[input]" in image)
|
||||
if is_clipspace:
|
||||
if not PreviewBridge.register_clipspace_image(image, unique_id):
|
||||
need_refresh = True
|
||||
else:
|
||||
need_refresh = True
|
||||
|
||||
if not need_refresh:
|
||||
pixels, mask, path_item = PreviewBridge.load_image(image)
|
||||
image = [path_item]
|
||||
else:
|
||||
# For new images (images_changed=True), we want to start fresh regardless of restore_mask
|
||||
# For same image with refresh needed, respect the restore_mask setting
|
||||
# Exception: when restore_mask is "always", restore even with new images
|
||||
# Exception: when restore_mask is "if_same_size", allow restoration to check size compatibility
|
||||
if restore_mask != "never" and (not images_changed or restore_mask in ["always", "if_same_size"]):
|
||||
mask = core.preview_bridge_last_mask_cache.get(unique_id)
|
||||
if mask is None:
|
||||
mask = None
|
||||
elif restore_mask == "if_same_size" and mask.shape[1:] != images.shape[1:3]:
|
||||
# For if_same_size, clear mask if dimensions don't match
|
||||
mask = None
|
||||
# For "always", keep the mask regardless of size
|
||||
else:
|
||||
mask = None
|
||||
|
||||
if mask is None:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
res = nodes.PreviewImage().save_images(images, filename_prefix="PreviewBridge/PB-", prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
else:
|
||||
masked_images = utils.tensor_convert_rgba(images)
|
||||
resized_mask = utils.resize_mask(mask, (images.shape[1], images.shape[2])).unsqueeze(3)
|
||||
resized_mask = 1 - resized_mask
|
||||
utils.tensor_putalpha(masked_images, resized_mask)
|
||||
res = nodes.PreviewImage().save_images(masked_images, filename_prefix="PreviewBridge/PB-", prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
image2 = res['ui']['images']
|
||||
pixels = images
|
||||
|
||||
path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', image2[0]['filename'])
|
||||
core.set_previewbridge_image(unique_id, path, image2[0])
|
||||
core.preview_bridge_image_id_map[image] = (path, image2[0])
|
||||
core.preview_bridge_image_name_map[unique_id, path] = (image, image2[0])
|
||||
core.preview_bridge_cache[unique_id] = (images, image2)
|
||||
|
||||
image = image2
|
||||
|
||||
is_empty_mask = torch.all(mask == 0)
|
||||
|
||||
if block and is_empty_mask and core.is_execution_model_version_supported():
|
||||
from comfy_execution.graph import ExecutionBlocker
|
||||
result = ExecutionBlocker(None), ExecutionBlocker(None)
|
||||
elif block and is_empty_mask:
|
||||
logging.warning("[Impact Pack] PreviewBridge: ComfyUI is outdated - blocking feature is disabled.")
|
||||
result = pixels, mask
|
||||
else:
|
||||
result = pixels, mask
|
||||
|
||||
if not is_empty_mask:
|
||||
core.preview_bridge_last_mask_cache[unique_id] = mask
|
||||
|
||||
return {
|
||||
"ui": {"images": image},
|
||||
"result": result,
|
||||
}
|
||||
|
||||
|
||||
def decode_latent(latent, preview_method, vae_opt=None):
|
||||
if vae_opt is not None:
|
||||
image = nodes.VAEDecode().decode(vae_opt, latent)[0]
|
||||
return image
|
||||
|
||||
from comfy.cli_args import LatentPreviewMethod
|
||||
import comfy.latent_formats as latent_formats
|
||||
|
||||
if preview_method.startswith("TAE"):
|
||||
decoder_name = None
|
||||
|
||||
if preview_method == "TAESD15":
|
||||
decoder_name = "taesd"
|
||||
elif preview_method == 'TAESDXL':
|
||||
decoder_name = "taesdxl"
|
||||
elif preview_method == 'TAESD3':
|
||||
decoder_name = "taesd3"
|
||||
elif preview_method == 'TAEF1':
|
||||
decoder_name = "taef1"
|
||||
|
||||
if decoder_name:
|
||||
vae = nodes.VAELoader().load_vae(decoder_name)[0]
|
||||
image = nodes.VAEDecode().decode(vae, latent)[0]
|
||||
return image
|
||||
|
||||
if preview_method == "Latent2RGB-SD15":
|
||||
latent_format = latent_formats.SD15()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-SDXL":
|
||||
latent_format = latent_formats.SDXL()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-SD3":
|
||||
latent_format = latent_formats.SD3()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-SD-X4":
|
||||
latent_format = latent_formats.SD_X4()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-Playground-2.5":
|
||||
latent_format = latent_formats.SDXL_Playground_2_5()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-SC-Prior":
|
||||
latent_format = latent_formats.SC_Prior()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-SC-B":
|
||||
latent_format = latent_formats.SC_B()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-FLUX.1":
|
||||
latent_format = latent_formats.Flux()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
elif preview_method == "Latent2RGB-LTXV":
|
||||
latent_format = latent_formats.LTXV()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
else:
|
||||
logging.warning(f"[Impact Pack] PreviewBridgeLatent: '{preview_method}' is unsupported preview method.")
|
||||
latent_format = latent_formats.SD15()
|
||||
method = LatentPreviewMethod.Latent2RGB
|
||||
|
||||
previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method)
|
||||
samples = latent_format.process_in(latent['samples'])
|
||||
|
||||
pil_image = previewer.decode_latent_to_preview(samples)
|
||||
pixels_size = pil_image.size[0]*8, pil_image.size[1]*8
|
||||
resized_image = pil_image.resize(pixels_size, resample=utils.LANCZOS)
|
||||
|
||||
return utils.to_tensor(resized_image).unsqueeze(0)
|
||||
|
||||
|
||||
class PreviewBridgeLatent:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"latent": ("LATENT",),
|
||||
"image": ("STRING", {"default": ""}),
|
||||
"preview_method": (["Latent2RGB-FLUX.1",
|
||||
"Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3",
|
||||
"Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5",
|
||||
"Latent2RGB-SC-Prior", "Latent2RGB-SC-B",
|
||||
"Latent2RGB-LTXV",
|
||||
"TAEF1", "TAESDXL", "TAESD15", "TAESD3"],),
|
||||
},
|
||||
"optional": {
|
||||
"vae_opt": ("VAE", ),
|
||||
"block": ("BOOLEAN", {"default": False, "label_on": "if_empty_mask", "label_off": "never", "tooltip": "is_empty_mask: If the mask is empty, the execution is stopped.\nnever: The execution is never stopped. Instead, it returns a white mask."}),
|
||||
"restore_mask": (["never", "always", "if_same_size"], {"tooltip": "if_same_size: If the changed input latent is the same size as the previous latent, restore using the last saved mask\nalways: Whenever the input latent changes, always restore using the last saved mask\nnever: Do not restore the mask.\n`restore_mask` has higher priority than `block`\nIf the input latent already has a mask, do not restore mask."}),
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LATENT", "MASK", )
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
DESCRIPTION = "This is a feature that allows you to edit and send a Mask over a latent image.\nIf the block is set to 'is_empty_mask', the execution is stopped when the mask is empty."
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.output_dir = folder_paths.get_temp_directory()
|
||||
self.type = "temp"
|
||||
self.prev_hash = None
|
||||
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
|
||||
|
||||
@staticmethod
|
||||
def load_image(pb_id):
|
||||
is_fail = False
|
||||
if pb_id not in core.preview_bridge_image_id_map:
|
||||
is_fail = True
|
||||
|
||||
if not is_fail:
|
||||
image_path, ui_item = core.preview_bridge_image_id_map[pb_id]
|
||||
if not os.path.isfile(image_path):
|
||||
is_fail = True
|
||||
|
||||
if not is_fail:
|
||||
i = Image.open(image_path)
|
||||
i = ImageOps.exif_transpose(i)
|
||||
image = i.convert("RGB")
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
|
||||
if 'A' in i.getbands():
|
||||
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
||||
mask = 1. - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = None
|
||||
else:
|
||||
image = utils.empty_pil_tensor()
|
||||
mask = None
|
||||
ui_item = {
|
||||
"filename": 'empty.png',
|
||||
"subfolder": '',
|
||||
"type": 'temp'
|
||||
}
|
||||
|
||||
return image, mask, ui_item
|
||||
|
||||
def doit(self, latent, image, preview_method, vae_opt=None, block=False, unique_id=None, restore_mask='never', prompt=None, extra_pnginfo=None):
|
||||
latent_channels = latent['samples'].shape[1]
|
||||
|
||||
if 'SD3' in preview_method or 'SC-Prior' in preview_method or 'FLUX.1' in preview_method or 'TAEF1' == preview_method:
|
||||
preview_method_channels = 16
|
||||
elif 'LTXV' in preview_method:
|
||||
preview_method_channels = 128
|
||||
else:
|
||||
preview_method_channels = 4
|
||||
|
||||
if vae_opt is None and latent_channels != preview_method_channels:
|
||||
logging.warning("[PreviewBridgeLatent] The version of latent is not compatible with preview_method.\nSD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.")
|
||||
raise Exception("The version of latent is not compatible with preview_method.<BR>SD3, SD1/SD2, SDXL, SC-Prior, SC-B and FLUX.1 are not compatible with each other.")
|
||||
|
||||
need_refresh = False
|
||||
latent_changed = False
|
||||
|
||||
# Check if latent has changed
|
||||
if unique_id not in core.preview_bridge_cache:
|
||||
need_refresh = True
|
||||
latent_changed = True
|
||||
elif (core.preview_bridge_cache[unique_id][0] is not latent
|
||||
or (vae_opt is None and core.preview_bridge_cache[unique_id][2] is not None)
|
||||
or (vae_opt is None and core.preview_bridge_cache[unique_id][1] != preview_method)
|
||||
or (vae_opt is not None and core.preview_bridge_cache[unique_id][2] is not vae_opt)):
|
||||
need_refresh = True
|
||||
latent_changed = True
|
||||
|
||||
# If latent changed, clear the mask cache to ensure fresh start behavior
|
||||
# unless restore_mask is set to "always" or "if_same_size"
|
||||
if latent_changed and restore_mask not in ["always", "if_same_size"] and unique_id in core.preview_bridge_last_mask_cache:
|
||||
del core.preview_bridge_last_mask_cache[unique_id]
|
||||
|
||||
# Handle clipspace files that aren't registered in the preview bridge system
|
||||
# This only applies when latent hasn't changed (same latent, new mask scenario)
|
||||
if not need_refresh and image not in core.preview_bridge_image_id_map:
|
||||
is_clipspace = image and ("clipspace" in image.lower() or "[input]" in image)
|
||||
if is_clipspace:
|
||||
if not PreviewBridge.register_clipspace_image(image, unique_id):
|
||||
need_refresh = True
|
||||
else:
|
||||
need_refresh = True
|
||||
|
||||
if not need_refresh:
|
||||
pixels, mask, path_item = PreviewBridge.load_image(image)
|
||||
|
||||
if mask is None:
|
||||
mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0)
|
||||
if 'noise_mask' in latent:
|
||||
res_latent = latent.copy()
|
||||
del res_latent['noise_mask']
|
||||
else:
|
||||
res_latent = latent
|
||||
|
||||
is_empty_mask = True
|
||||
else:
|
||||
res_latent = latent.copy()
|
||||
res_latent['noise_mask'] = mask
|
||||
|
||||
is_empty_mask = torch.all(mask == 1)
|
||||
|
||||
res_image = [path_item]
|
||||
else:
|
||||
decoded_image = decode_latent(latent, preview_method, vae_opt)
|
||||
|
||||
if 'noise_mask' in latent:
|
||||
mask = latent['noise_mask'].squeeze(0) # 4D mask -> 3D mask
|
||||
|
||||
decoded_pil = utils.to_pil(decoded_image)
|
||||
|
||||
inverted_mask = 1 - mask # invert
|
||||
resized_mask = utils.resize_mask(inverted_mask, (decoded_image.shape[1], decoded_image.shape[2]))
|
||||
result_pil = utils.apply_mask_alpha_to_pil(decoded_pil, resized_mask)
|
||||
|
||||
full_output_folder, filename, counter, _, _ = folder_paths.get_save_image_path("PreviewBridge/PBL-"+self.prefix_append, folder_paths.get_temp_directory(), result_pil.size[0], result_pil.size[1])
|
||||
file = f"{filename}_{counter}.png"
|
||||
result_pil.save(os.path.join(full_output_folder, file), compress_level=4)
|
||||
res_image = [{
|
||||
'filename': file,
|
||||
'subfolder': 'PreviewBridge',
|
||||
'type': 'temp',
|
||||
}]
|
||||
|
||||
is_empty_mask = False
|
||||
else:
|
||||
# For new latents (latent_changed=True), start fresh regardless of restore_mask
|
||||
# For same latent with refresh needed, respect the restore_mask setting
|
||||
# Exception: when restore_mask is "always", restore even with new latents
|
||||
# Exception: when restore_mask is "if_same_size", allow restoration to check size compatibility
|
||||
if restore_mask != "never" and (not latent_changed or restore_mask in ["always", "if_same_size"]):
|
||||
mask = core.preview_bridge_last_mask_cache.get(unique_id)
|
||||
if mask is None:
|
||||
mask = None
|
||||
elif restore_mask == "if_same_size" and mask.shape[1:] != decoded_image.shape[1:3]:
|
||||
# For if_same_size, clear mask if dimensions don't match
|
||||
mask = None
|
||||
# For "always", keep the mask regardless of size
|
||||
else:
|
||||
mask = None
|
||||
|
||||
if mask is None:
|
||||
mask = torch.ones(latent['samples'].shape[2:], dtype=torch.float32, device="cpu").unsqueeze(0)
|
||||
res = nodes.PreviewImage().save_images(decoded_image, filename_prefix="PreviewBridge/PBL-", prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
else:
|
||||
masked_images = utils.tensor_convert_rgba(decoded_image)
|
||||
resized_mask = utils.resize_mask(mask, (decoded_image.shape[1], decoded_image.shape[2])).unsqueeze(3)
|
||||
resized_mask = 1 - resized_mask
|
||||
utils.tensor_putalpha(masked_images, resized_mask)
|
||||
res = nodes.PreviewImage().save_images(masked_images, filename_prefix="PreviewBridge/PBL-", prompt=prompt, extra_pnginfo=extra_pnginfo)
|
||||
|
||||
res_image = res['ui']['images']
|
||||
|
||||
is_empty_mask = torch.all(mask == 1)
|
||||
|
||||
path = os.path.join(folder_paths.get_temp_directory(), 'PreviewBridge', res_image[0]['filename'])
|
||||
core.set_previewbridge_image(unique_id, path, res_image[0])
|
||||
core.preview_bridge_image_id_map[image] = (path, res_image[0])
|
||||
core.preview_bridge_image_name_map[unique_id, path] = (image, res_image[0])
|
||||
core.preview_bridge_cache[unique_id] = (latent, preview_method, vae_opt, res_image)
|
||||
|
||||
res_latent = latent
|
||||
|
||||
if block and is_empty_mask and core.is_execution_model_version_supported():
|
||||
from comfy_execution.graph import ExecutionBlocker
|
||||
result = ExecutionBlocker(None), ExecutionBlocker(None)
|
||||
elif block and is_empty_mask:
|
||||
logging.warning("[Impact Pack] PreviewBridgeLatent: ComfyUI is outdated - blocking feature is disabled.")
|
||||
result = res_latent, mask
|
||||
else:
|
||||
result = res_latent, mask
|
||||
|
||||
if not is_empty_mask:
|
||||
core.preview_bridge_last_mask_cache[unique_id] = mask
|
||||
|
||||
return {
|
||||
"ui": {"images": res_image},
|
||||
"result": result,
|
||||
}
|
||||
78
custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py
Normal file
78
custom_nodes/ComfyUI-Impact-Pack/modules/impact/config.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import configparser
|
||||
import logging
|
||||
import os
|
||||
|
||||
version_code = [8, 28, 2]
|
||||
version = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '')
|
||||
|
||||
my_path = os.path.dirname(__file__)
|
||||
old_config_path = os.path.join(my_path, "impact-pack.ini")
|
||||
config_path = os.path.join(my_path, "..", "..", "impact-pack.ini")
|
||||
latent_letter_path = os.path.join(my_path, "..", "..", "latent.png")
|
||||
|
||||
|
||||
def write_config():
|
||||
config = configparser.ConfigParser()
|
||||
config['default'] = {
|
||||
'sam_editor_cpu': str(get_config()['sam_editor_cpu']),
|
||||
'sam_editor_model': get_config()['sam_editor_model'],
|
||||
'custom_wildcards': get_config()['custom_wildcards'],
|
||||
'disable_gpu_opencv': get_config()['disable_gpu_opencv'],
|
||||
'wildcard_cache_limit_mb': str(get_config()['wildcard_cache_limit_mb']),
|
||||
}
|
||||
with open(config_path, 'w') as configfile:
|
||||
config.write(configfile)
|
||||
|
||||
|
||||
def read_config():
|
||||
try:
|
||||
config = configparser.ConfigParser()
|
||||
config.read(config_path)
|
||||
default_conf = config['default']
|
||||
|
||||
# Strip quotes from custom_wildcards path if present
|
||||
custom_wildcards_path = default_conf.get('custom_wildcards', '').strip('\'"')
|
||||
|
||||
if not os.path.exists(custom_wildcards_path):
|
||||
logging.warning(f"[Impact Pack] custom_wildcards path not found: {custom_wildcards_path}. Using default path.")
|
||||
custom_wildcards_path = os.path.join(my_path, "..", "..", "custom_wildcards")
|
||||
|
||||
default_conf['custom_wildcards'] = custom_wildcards_path
|
||||
|
||||
# Parse wildcard_cache_limit_mb with default value of 50MB
|
||||
cache_limit_mb = 50
|
||||
if 'wildcard_cache_limit_mb' in default_conf:
|
||||
try:
|
||||
cache_limit_mb = float(default_conf['wildcard_cache_limit_mb'])
|
||||
except ValueError:
|
||||
logging.warning(f"[Impact Pack] Invalid wildcard_cache_limit_mb value: {default_conf['wildcard_cache_limit_mb']}. Using default: 50")
|
||||
cache_limit_mb = 50
|
||||
|
||||
return {
|
||||
'sam_editor_cpu': default_conf['sam_editor_cpu'].lower() == 'true' if 'sam_editor_cpu' in default_conf else False,
|
||||
'sam_editor_model': default_conf['sam_editor_model'].lower() if 'sam_editor_model' else 'sam_vit_b_01ec64.pth',
|
||||
'custom_wildcards': default_conf['custom_wildcards'] if 'custom_wildcards' in default_conf else os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")),
|
||||
'disable_gpu_opencv': default_conf['disable_gpu_opencv'].lower() == 'true' if 'disable_gpu_opencv' in default_conf else True,
|
||||
'wildcard_cache_limit_mb': cache_limit_mb
|
||||
}
|
||||
|
||||
except Exception:
|
||||
return {
|
||||
'sam_editor_cpu': False,
|
||||
'sam_editor_model': 'sam_vit_b_01ec64.pth',
|
||||
'custom_wildcards': os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "custom_wildcards")),
|
||||
'disable_gpu_opencv': True,
|
||||
'wildcard_cache_limit_mb': 50
|
||||
}
|
||||
|
||||
|
||||
cached_config = None
|
||||
|
||||
|
||||
def get_config():
|
||||
global cached_config
|
||||
|
||||
if cached_config is None:
|
||||
cached_config = read_config()
|
||||
|
||||
return cached_config
|
||||
2417
custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py
Normal file
2417
custom_nodes/ComfyUI-Impact-Pack/modules/impact/core.py
Normal file
File diff suppressed because it is too large
Load Diff
17
custom_nodes/ComfyUI-Impact-Pack/modules/impact/defs.py
Normal file
17
custom_nodes/ComfyUI-Impact-Pack/modules/impact/defs.py
Normal file
@@ -0,0 +1,17 @@
|
||||
detection_labels = [
|
||||
'hand', 'face', 'mouth', 'eyes', 'eyebrows', 'pupils',
|
||||
'left_eyebrow', 'left_eye', 'left_pupil', 'right_eyebrow', 'right_eye', 'right_pupil',
|
||||
'short_sleeved_shirt', 'long_sleeved_shirt', 'short_sleeved_outwear', 'long_sleeved_outwear',
|
||||
'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short_sleeved_dress', 'long_sleeved_dress', 'vest_dress', 'sling_dress',
|
||||
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
|
||||
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench",
|
||||
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
|
||||
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
|
||||
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
|
||||
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
|
||||
"banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza",
|
||||
"donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet",
|
||||
"tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
|
||||
"toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
|
||||
"hair drier", "toothbrush"
|
||||
]
|
||||
560
custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py
Normal file
560
custom_nodes/ComfyUI-Impact-Pack/modules/impact/detectors.py
Normal file
@@ -0,0 +1,560 @@
|
||||
import logging
|
||||
|
||||
import impact.core as core
|
||||
from nodes import MAX_RESOLUTION
|
||||
import impact.segs_nodes as segs_nodes
|
||||
import impact.utils as utils
|
||||
import torch
|
||||
from impact.core import SEG
|
||||
|
||||
SAM_MODEL_TOOLTIP = {"tooltip": "Segment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input."}
|
||||
SAM_MODEL_TOOLTIP_OPTIONAL = {"tooltip": "[OPTIONAL]\nSegment Anything Model for Silhouette Detection.\nBe sure to use the SAM_MODEL loaded through the SAMLoader (Impact) node as input.\nGiven this input, it refines the rectangular areas detected by BBOX_DETECTOR into silhouette shapes through SAM.\nsam_model_opt takes priority over segm_detector_opt."}
|
||||
|
||||
MASK_HINT_THRESHOLD_TOOLTIP = "When detection_hint is mask-area, the mask of SEGS is used as a point hint for SAM (Segment Anything).\nIn this case, only the areas of the mask with brightness values equal to or greater than mask_hint_threshold are used as hints."
|
||||
MASK_HINT_USE_NEGATIVE_TOOLTIP = "When detecting with SAM (Segment Anything), negative hints are applied as follows:\nSmall: When the SEGS is smaller than 10 pixels in size\nOuter: Sampling the image area outside the SEGS region at regular intervals"
|
||||
|
||||
DILATION_TOOLTIP = "Set the value to dilate the result mask. If the value is negative, it erodes the mask."
|
||||
DETECTION_HINT_TOOLTIP = {"tooltip": "It is recommended to use only center-1.\nWhen refining the mask of SEGS with the SAM (Segment Anything) model, center-1 uses only the rectangular area of SEGS and a single point at the exact center as hints.\nOther options were added during the experimental stage and do not work well."}
|
||||
|
||||
BBOX_EXPANSION_TOOLTIP = "When performing SAM (Segment Anything) detection within the SEGS area, the rectangular area of SEGS is expanded and used as a hint."
|
||||
|
||||
class SAMDetectorCombined:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"sam_model": ("SAM_MODEL", SAM_MODEL_TOOLTIP),
|
||||
"segs": ("SEGS", {"tooltip": "This is the segment information detected by the detector.\nIt refines the Mask through the SAM (Segment Anything) detector for all areas pointed to by SEGS, and combines all Masks to return as a single Mask."}),
|
||||
"image": ("IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}),
|
||||
"detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area",
|
||||
"mask-points", "mask-point-bbox", "none"], DETECTION_HINT_TOOLTIP),
|
||||
"dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": DILATION_TOOLTIP}),
|
||||
"threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Set the sensitivity threshold for the mask detected by SAM (Segment Anything). A higher value generates a more specific mask with a narrower range. For example, when pointing to a person's area, it might detect clothes, which is a narrower range, instead of the entire person."}),
|
||||
"bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": BBOX_EXPANSION_TOOLTIP}),
|
||||
"mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": MASK_HINT_THRESHOLD_TOOLTIP}),
|
||||
"mask_hint_use_negative": (["False", "Small", "Outter"], {"tooltip": MASK_HINT_USE_NEGATIVE_TOOLTIP})
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, sam_model, segs, image, detection_hint, dilation,
|
||||
threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative):
|
||||
return (core.make_sam_mask(sam_model, segs, image, detection_hint, dilation,
|
||||
threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative), )
|
||||
|
||||
|
||||
class SAMDetectorSegmented:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"sam_model": ("SAM_MODEL", SAM_MODEL_TOOLTIP),
|
||||
"segs": ("SEGS", {"tooltip": "This is the segment information detected by the detector.\nFor the SEGS region, the masks detected by SAM (Segment Anything) are created as a unified mask and a batch of individual masks."}),
|
||||
"image": ("IMAGE", {"tooltip": "It is assumed that segs contains only the information about the detected areas, and does not include the image. SAM (Segment Anything) operates by referencing this image."}),
|
||||
"detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area",
|
||||
"mask-points", "mask-point-bbox", "none"], DETECTION_HINT_TOOLTIP),
|
||||
"dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1, "tooltip": DILATION_TOOLTIP}),
|
||||
"threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1, "tooltip": BBOX_EXPANSION_TOOLTIP}),
|
||||
"mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": MASK_HINT_THRESHOLD_TOOLTIP}),
|
||||
"mask_hint_use_negative": (["False", "Small", "Outter"], {"tooltip": MASK_HINT_USE_NEGATIVE_TOOLTIP})
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK", "MASK")
|
||||
RETURN_NAMES = ("combined_mask", "batch_masks")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, sam_model, segs, image, detection_hint, dilation,
|
||||
threshold, bbox_expansion, mask_hint_threshold, mask_hint_use_negative):
|
||||
combined_mask, batch_masks = core.make_sam_mask_segmented(sam_model, segs, image, detection_hint, dilation,
|
||||
threshold, bbox_expansion, mask_hint_threshold,
|
||||
mask_hint_use_negative)
|
||||
return (combined_mask, batch_masks, )
|
||||
|
||||
|
||||
class BboxDetectorForEach:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"image": ("IMAGE", ),
|
||||
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}),
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
"labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}),
|
||||
},
|
||||
"optional": {"detailer_hook": ("DETAILER_HOOK",), }
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, bbox_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None):
|
||||
if len(image) > 1:
|
||||
raise Exception('[Impact Pack] ERROR: BboxDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.')
|
||||
|
||||
segs = bbox_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook)
|
||||
|
||||
if labels is not None and labels != '':
|
||||
labels = labels.split(',')
|
||||
if len(labels) > 0:
|
||||
segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels)
|
||||
|
||||
return (segs, )
|
||||
|
||||
|
||||
class SegmDetectorForEach:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"segm_detector": ("SEGM_DETECTOR", ),
|
||||
"image": ("IMAGE", ),
|
||||
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}),
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
"labels": ("STRING", {"multiline": True, "default": "all", "placeholder": "List the types of segments to be allowed, separated by commas"}),
|
||||
},
|
||||
"optional": {"detailer_hook": ("DETAILER_HOOK",), }
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, segm_detector, image, threshold, dilation, crop_factor, drop_size, labels=None, detailer_hook=None):
|
||||
if len(image) > 1:
|
||||
raise Exception('[Impact Pack] ERROR: SegmDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.')
|
||||
|
||||
segs = segm_detector.detect(image, threshold, dilation, crop_factor, drop_size, detailer_hook)
|
||||
|
||||
if labels is not None and labels != '':
|
||||
labels = labels.split(',')
|
||||
if len(labels) > 0:
|
||||
segs, _ = segs_nodes.SEGSLabelFilter.filter(segs, labels)
|
||||
|
||||
return (segs, )
|
||||
|
||||
|
||||
class SegmDetectorCombined:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"segm_detector": ("SEGM_DETECTOR", ),
|
||||
"image": ("IMAGE", ),
|
||||
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, segm_detector, image, threshold, dilation):
|
||||
mask = segm_detector.detect_combined(image, threshold, dilation)
|
||||
|
||||
if mask is None:
|
||||
mask = torch.zeros((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu")
|
||||
|
||||
return (mask.unsqueeze(0),)
|
||||
|
||||
|
||||
class BboxDetectorCombined(SegmDetectorCombined):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"image": ("IMAGE", ),
|
||||
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"dilation": ("INT", {"default": 4, "min": -512, "max": 512, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
def doit(self, bbox_detector, image, threshold, dilation):
|
||||
mask = bbox_detector.detect_combined(image, threshold, dilation)
|
||||
|
||||
if mask is None:
|
||||
mask = torch.zeros((image.shape[1], image.shape[2]), dtype=torch.float32, device="cpu")
|
||||
|
||||
return (mask.unsqueeze(0),)
|
||||
|
||||
|
||||
class SimpleDetectorForEach:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"image": ("IMAGE", ),
|
||||
|
||||
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
|
||||
"sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
"sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
|
||||
|
||||
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"optional": {
|
||||
"post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
"sam_model_opt": ("SAM_MODEL", SAM_MODEL_TOOLTIP_OPTIONAL),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR", ),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
@staticmethod
|
||||
def detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion,
|
||||
sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None,
|
||||
detailer_hook=None):
|
||||
if len(image) > 1:
|
||||
raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.')
|
||||
|
||||
if segm_detector_opt is not None and hasattr(segm_detector_opt, 'bbox_detector') and segm_detector_opt.bbox_detector == bbox_detector:
|
||||
# Better segm support for YOLO-World detector
|
||||
segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook)
|
||||
else:
|
||||
segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size, detailer_hook=detailer_hook)
|
||||
|
||||
if sam_model_opt is not None:
|
||||
mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation,
|
||||
sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False)
|
||||
segs = core.segs_bitwise_and_mask(segs, mask)
|
||||
elif segm_detector_opt is not None:
|
||||
segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size, detailer_hook=detailer_hook)
|
||||
mask = core.segs_to_combined_mask(segm_segs)
|
||||
segs = core.segs_bitwise_and_mask(segs, mask)
|
||||
|
||||
segs = core.dilate_segs(segs, post_dilation)
|
||||
|
||||
return (segs,)
|
||||
|
||||
def doit(self, bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion,
|
||||
sam_mask_hint_threshold, post_dilation=0, sam_model_opt=None, segm_detector_opt=None):
|
||||
|
||||
return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion,
|
||||
sam_mask_hint_threshold, post_dilation=post_dilation,
|
||||
sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt)
|
||||
|
||||
|
||||
class SimpleDetectorForEachPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"detailer_pipe": ("DETAILER_PIPE", ),
|
||||
"image": ("IMAGE", ),
|
||||
|
||||
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"bbox_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
|
||||
"sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sub_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
"sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
|
||||
|
||||
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"optional": {
|
||||
"post_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
def doit(self, detailer_pipe, image, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold, post_dilation=0):
|
||||
|
||||
if len(image) > 1:
|
||||
raise Exception('[Impact Pack] ERROR: SimpleDetectorForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.')
|
||||
|
||||
model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe
|
||||
|
||||
return SimpleDetectorForEach.detect(bbox_detector, image, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion,
|
||||
sam_mask_hint_threshold, post_dilation=post_dilation, sam_model_opt=sam_model_opt, segm_detector_opt=segm_detector_opt,
|
||||
detailer_hook=detailer_hook)
|
||||
|
||||
class SAM2VideoDetectorSEGS:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"image_frames": ("IMAGE", ),
|
||||
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"sam2_model": ("SAM_MODEL", ),
|
||||
|
||||
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sam2_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
@staticmethod
|
||||
def doit(bbox_detector, sam2_model, image_frames, bbox_threshold, sam2_threshold, crop_factor, drop_size):
|
||||
# ---- Check SAM2 model ----
|
||||
if not isinstance(sam2_model, core.SAM2Wrapper):
|
||||
logging.error("[Impact Pack] To use the SAM2VideoDetectorSEGS node, a valid SAM2 model must be provided as input to `sam2_model`.")
|
||||
raise Exception("To use the SAM2VideoDetectorSEGS node, a SAM2 model must be provided as input to `sam2_model`.")
|
||||
|
||||
# ---- Detect bboxes ----
|
||||
segs = bbox_detector.detect(image_frames[0].unsqueeze(0), bbox_threshold, 0, 0, drop_size)
|
||||
|
||||
# ---- If no detections, try reversed frames before giving up ----
|
||||
if len(segs[1]) == 0:
|
||||
reversed_frames = torch.flip(image_frames, dims=[0])
|
||||
segs_rev = bbox_detector.detect(reversed_frames[0].unsqueeze(0), bbox_threshold, 0, 0, drop_size)
|
||||
|
||||
if len(segs_rev[1]) == 0:
|
||||
# No Bboxes when reversed -> Give up
|
||||
h, w = image_frames.shape[1:3]
|
||||
return (((h, w), []), )
|
||||
|
||||
# ---- Predict masks in reversed mode ----
|
||||
segs_masks = sam2_model.predict_video_segs(reversed_frames, segs_rev)
|
||||
|
||||
# segs_masks wieder umdrehen, damit sie mit Originalframes matchen
|
||||
for k in segs_masks.keys():
|
||||
segs_masks[k] = torch.flip(segs_masks[k], dims=[0])
|
||||
else:
|
||||
# ---- Predict masks if BBOXES were found in forward pass----
|
||||
segs_masks = sam2_model.predict_video_segs(image_frames, segs)
|
||||
|
||||
def get_whole_merged_mask(all_masks):
|
||||
merged_mask = (all_masks[0] * 255).to(torch.uint8)
|
||||
for mask in all_masks[1:]:
|
||||
merged_mask |= (mask * 255).to(torch.uint8)
|
||||
|
||||
merged_mask = (merged_mask / 255.0).to(torch.float32)
|
||||
merged_mask = utils.to_binary_mask(merged_mask, 0.1)[0]
|
||||
return merged_mask
|
||||
|
||||
new_segs = []
|
||||
for k, v in segs_masks.items():
|
||||
v = v.squeeze(3)
|
||||
m = get_whole_merged_mask(v)
|
||||
seg = segs_nodes.MaskToSEGS.doit(m, False, crop_factor, False, drop_size, contour_fill=True)[0][1]
|
||||
|
||||
if len(seg) == 0:
|
||||
continue
|
||||
|
||||
seg = seg[0]
|
||||
|
||||
x1, y1, x2, y2 = seg.crop_region
|
||||
masks = []
|
||||
for mask in v:
|
||||
masks.append(mask[y1:y2, x1:x2])
|
||||
cropped_mask = torch.stack(masks)
|
||||
cropped_mask = (cropped_mask >= (sam2_threshold * 100 - 50)).to(torch.uint8).cpu()
|
||||
|
||||
new_seg = SEG(
|
||||
seg.cropped_image,
|
||||
cropped_mask,
|
||||
seg.confidence,
|
||||
seg.crop_region,
|
||||
seg.bbox,
|
||||
seg.label,
|
||||
seg.control_net_wrapper
|
||||
)
|
||||
new_segs.append(new_seg)
|
||||
|
||||
return ((segs[0], new_segs), )
|
||||
|
||||
|
||||
|
||||
class SimpleDetectorForAnimateDiff:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"image_frames": ("IMAGE", ),
|
||||
|
||||
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"bbox_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
|
||||
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 100, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
|
||||
"sub_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sub_dilation": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
|
||||
"sub_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
|
||||
|
||||
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"optional": {
|
||||
"masking_mode": (["Pivot SEGS", "Combine neighboring frames", "Don't combine"],),
|
||||
"segs_pivot": (["Combined mask", "1st frame mask"],),
|
||||
"sam_model_opt": ("SAM_MODEL", SAM_MODEL_TOOLTIP_OPTIONAL),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR", ),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Detector"
|
||||
|
||||
@staticmethod
|
||||
def detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold,
|
||||
masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None):
|
||||
|
||||
h = image_frames.shape[1]
|
||||
w = image_frames.shape[2]
|
||||
|
||||
# gather segs for all frames
|
||||
segs_by_frames = []
|
||||
for image in image_frames:
|
||||
image = image.unsqueeze(0)
|
||||
segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, crop_factor, drop_size)
|
||||
|
||||
if sam_model_opt is not None:
|
||||
mask = core.make_sam_mask(sam_model_opt, segs, image, "center-1", sub_dilation,
|
||||
sub_threshold, sub_bbox_expansion, sam_mask_hint_threshold, False)
|
||||
segs = core.segs_bitwise_and_mask(segs, mask)
|
||||
elif segm_detector_opt is not None:
|
||||
segm_segs = segm_detector_opt.detect(image, sub_threshold, sub_dilation, crop_factor, drop_size)
|
||||
mask = core.segs_to_combined_mask(segm_segs)
|
||||
segs = core.segs_bitwise_and_mask(segs, mask)
|
||||
|
||||
segs_by_frames.append(segs)
|
||||
|
||||
def get_masked_frames():
|
||||
masks_by_frame = []
|
||||
for i, segs in enumerate(segs_by_frames):
|
||||
masks_in_frame = segs_nodes.SEGSToMaskList().doit(segs)[0]
|
||||
current_frame_mask = (masks_in_frame[0] * 255).to(torch.uint8)
|
||||
|
||||
for mask in masks_in_frame[1:]:
|
||||
current_frame_mask |= (mask * 255).to(torch.uint8)
|
||||
|
||||
current_frame_mask = (current_frame_mask/255.0).to(torch.float32)
|
||||
current_frame_mask = utils.to_binary_mask(current_frame_mask, 0.1)[0]
|
||||
|
||||
masks_by_frame.append(current_frame_mask)
|
||||
|
||||
return masks_by_frame
|
||||
|
||||
def get_empty_mask():
|
||||
return torch.zeros((h, w), dtype=torch.float32, device="cpu")
|
||||
|
||||
def get_neighboring_mask_at(i, masks_by_frame):
|
||||
prv = masks_by_frame[i-1] if i > 1 else get_empty_mask()
|
||||
cur = masks_by_frame[i]
|
||||
nxt = masks_by_frame[i-1] if i > 1 else get_empty_mask()
|
||||
|
||||
prv = prv if prv is not None else get_empty_mask()
|
||||
cur = cur.clone() if cur is not None else get_empty_mask()
|
||||
nxt = nxt if nxt is not None else get_empty_mask()
|
||||
|
||||
return prv, cur, nxt
|
||||
|
||||
def get_merged_neighboring_mask(masks_by_frame):
|
||||
if len(masks_by_frame) <= 1:
|
||||
return masks_by_frame
|
||||
|
||||
result = []
|
||||
for i in range(0, len(masks_by_frame)):
|
||||
prv, cur, nxt = get_neighboring_mask_at(i, masks_by_frame)
|
||||
cur = (cur * 255).to(torch.uint8)
|
||||
cur |= (prv * 255).to(torch.uint8)
|
||||
cur |= (nxt * 255).to(torch.uint8)
|
||||
cur = (cur / 255.0).to(torch.float32)
|
||||
cur = utils.to_binary_mask(cur, 0.1)[0]
|
||||
result.append(cur)
|
||||
|
||||
return result
|
||||
|
||||
def get_whole_merged_mask():
|
||||
all_masks = []
|
||||
for segs in segs_by_frames:
|
||||
all_masks += segs_nodes.SEGSToMaskList().doit(segs)[0]
|
||||
|
||||
merged_mask = (all_masks[0] * 255).to(torch.uint8)
|
||||
for mask in all_masks[1:]:
|
||||
merged_mask |= (mask * 255).to(torch.uint8)
|
||||
|
||||
merged_mask = (merged_mask / 255.0).to(torch.float32)
|
||||
merged_mask = utils.to_binary_mask(merged_mask, 0.1)[0]
|
||||
return merged_mask
|
||||
|
||||
def get_pivot_segs():
|
||||
if segs_pivot == "1st frame mask":
|
||||
return segs_by_frames[0][1]
|
||||
else:
|
||||
merged_mask = get_whole_merged_mask()
|
||||
return segs_nodes.MaskToSEGS.doit(merged_mask, False, crop_factor, False, drop_size, contour_fill=True)[0]
|
||||
|
||||
def get_segs(merged_neighboring=False):
|
||||
pivot_segs = get_pivot_segs()
|
||||
|
||||
masks_by_frame = get_masked_frames()
|
||||
if merged_neighboring:
|
||||
masks_by_frame = get_merged_neighboring_mask(masks_by_frame)
|
||||
|
||||
new_segs = []
|
||||
for seg in pivot_segs[1]:
|
||||
cropped_mask = torch.zeros(seg.cropped_mask.shape, dtype=torch.float32, device="cpu").unsqueeze(0)
|
||||
pivot_mask = torch.from_numpy(seg.cropped_mask)
|
||||
x1, y1, x2, y2 = seg.crop_region
|
||||
for mask in masks_by_frame:
|
||||
cropped_mask_at_frame = (mask[y1:y2, x1:x2] * pivot_mask).unsqueeze(0)
|
||||
cropped_mask = torch.cat((cropped_mask, cropped_mask_at_frame), dim=0)
|
||||
|
||||
if len(cropped_mask) > 1:
|
||||
cropped_mask = cropped_mask[1:]
|
||||
|
||||
new_seg = SEG(seg.cropped_image, cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper)
|
||||
new_segs.append(new_seg)
|
||||
|
||||
return pivot_segs[0], new_segs
|
||||
|
||||
# create result mask
|
||||
if masking_mode == "Pivot SEGS":
|
||||
return (get_pivot_segs(), )
|
||||
|
||||
elif masking_mode == "Combine neighboring frames":
|
||||
return (get_segs(merged_neighboring=True), )
|
||||
|
||||
else: # elif masking_mode == "Don't combine":
|
||||
return (get_segs(merged_neighboring=False), )
|
||||
|
||||
def doit(self, bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold,
|
||||
masking_mode="Pivot SEGS", segs_pivot="Combined mask", sam_model_opt=None, segm_detector_opt=None):
|
||||
|
||||
return SimpleDetectorForAnimateDiff.detect(bbox_detector, image_frames, bbox_threshold, bbox_dilation, crop_factor, drop_size,
|
||||
sub_threshold, sub_dilation, sub_bbox_expansion, sam_mask_hint_threshold,
|
||||
masking_mode, segs_pivot, sam_model_opt, segm_detector_opt)
|
||||
189
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py
Normal file
189
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hf_nodes.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import comfy
|
||||
import re
|
||||
from impact import utils
|
||||
|
||||
|
||||
hf_transformer_model_urls = [
|
||||
"rizvandwiki/gender-classification-2",
|
||||
"NTQAI/pedestrian_gender_recognition",
|
||||
"Leilab/gender_class",
|
||||
"ProjectPersonal/GenderClassifier",
|
||||
"crangana/trained-gender",
|
||||
"cledoux42/GenderNew_v002",
|
||||
"ivensamdh/genderage2"
|
||||
]
|
||||
|
||||
|
||||
class HF_TransformersClassifierProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
global hf_transformer_model_urls
|
||||
return {"required": {
|
||||
"preset_repo_id": (hf_transformer_model_urls + ['Manual repo id'],),
|
||||
"manual_repo_id": ("STRING", {"multiline": False}),
|
||||
"device_mode": (["AUTO", "Prefer GPU", "CPU"],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("TRANSFORMERS_CLASSIFIER",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/HuggingFace"
|
||||
|
||||
def doit(self, preset_repo_id, manual_repo_id, device_mode):
|
||||
from transformers import pipeline
|
||||
|
||||
if preset_repo_id == 'Manual repo id':
|
||||
url = manual_repo_id
|
||||
else:
|
||||
url = preset_repo_id
|
||||
|
||||
if device_mode != 'CPU':
|
||||
device = comfy.model_management.get_torch_device()
|
||||
else:
|
||||
device = "cpu"
|
||||
|
||||
classifier = pipeline('image-classification', model=url, device=device)
|
||||
|
||||
return (classifier,)
|
||||
|
||||
|
||||
preset_classify_expr = [
|
||||
'#Female > #Male',
|
||||
'#Female < #Male',
|
||||
'female > 0.5',
|
||||
'male > 0.5',
|
||||
'Age16to25 > 0.1',
|
||||
'Age50to69 > 0.1',
|
||||
]
|
||||
|
||||
symbolic_label_map = {
|
||||
'#Female': {'female', 'Female', 'Human Female', 'woman', 'women', 'girl'},
|
||||
'#Male': {'male', 'Male', 'Human Male', 'man', 'men', 'boy'}
|
||||
}
|
||||
|
||||
def is_numeric_string(input_str):
|
||||
return re.match(r'^-?\d+(\.\d+)?$', input_str) is not None
|
||||
|
||||
|
||||
classify_expr_pattern = r'([^><= ]+)\s*(>|<|>=|<=|=)\s*([^><= ]+)'
|
||||
|
||||
|
||||
class SEGS_Classify:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
global preset_classify_expr
|
||||
return {"required": {
|
||||
"classifier": ("TRANSFORMERS_CLASSIFIER",),
|
||||
"segs": ("SEGS",),
|
||||
"preset_expr": (preset_classify_expr + ['Manual expr'],),
|
||||
"manual_expr": ("STRING", {"multiline": False}),
|
||||
},
|
||||
"optional": {
|
||||
"ref_image_opt": ("IMAGE", ),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("SEGS", "SEGS", "STRING")
|
||||
RETURN_NAMES = ("filtered_SEGS", "remained_SEGS", "detected_labels")
|
||||
OUTPUT_IS_LIST = (False, False, True)
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/HuggingFace"
|
||||
|
||||
@staticmethod
|
||||
def lookup_classified_label_score(score_infos, label):
|
||||
global symbolic_label_map
|
||||
|
||||
if label.startswith('#'):
|
||||
if label not in symbolic_label_map:
|
||||
return None
|
||||
else:
|
||||
label = symbolic_label_map[label]
|
||||
else:
|
||||
label = {label}
|
||||
|
||||
for x in score_infos:
|
||||
if x['label'] in label:
|
||||
return x['score']
|
||||
|
||||
return None
|
||||
|
||||
def doit(self, classifier, segs, preset_expr, manual_expr, ref_image_opt=None):
|
||||
if preset_expr == 'Manual expr':
|
||||
expr_str = manual_expr
|
||||
else:
|
||||
expr_str = preset_expr
|
||||
|
||||
match = re.match(classify_expr_pattern, expr_str)
|
||||
|
||||
if match is None:
|
||||
return (segs[0], []), segs, []
|
||||
|
||||
a = match.group(1)
|
||||
op = match.group(2)
|
||||
b = match.group(3)
|
||||
|
||||
a_is_lab = not is_numeric_string(a)
|
||||
b_is_lab = not is_numeric_string(b)
|
||||
|
||||
classified = []
|
||||
remained_SEGS = []
|
||||
provided_labels = set()
|
||||
|
||||
for seg in segs[1]:
|
||||
cropped_image = None
|
||||
|
||||
if seg.cropped_image is not None:
|
||||
cropped_image = seg.cropped_image
|
||||
elif ref_image_opt is not None:
|
||||
# take from original image
|
||||
cropped_image = utils.crop_image(ref_image_opt, seg.crop_region)
|
||||
|
||||
if cropped_image is not None:
|
||||
cropped_image = utils.to_pil(cropped_image)
|
||||
res = classifier(cropped_image)
|
||||
classified.append((seg, res))
|
||||
|
||||
for x in res:
|
||||
provided_labels.add(x['label'])
|
||||
else:
|
||||
remained_SEGS.append(seg)
|
||||
|
||||
filtered_SEGS = []
|
||||
for seg, res in classified:
|
||||
if a_is_lab:
|
||||
avalue = SEGS_Classify.lookup_classified_label_score(res, a)
|
||||
else:
|
||||
avalue = a
|
||||
|
||||
if b_is_lab:
|
||||
bvalue = SEGS_Classify.lookup_classified_label_score(res, b)
|
||||
else:
|
||||
bvalue = b
|
||||
|
||||
if avalue is None or bvalue is None:
|
||||
remained_SEGS.append(seg)
|
||||
continue
|
||||
|
||||
avalue = float(avalue)
|
||||
bvalue = float(bvalue)
|
||||
|
||||
if op == '>':
|
||||
cond = avalue > bvalue
|
||||
elif op == '<':
|
||||
cond = avalue < bvalue
|
||||
elif op == '>=':
|
||||
cond = avalue >= bvalue
|
||||
elif op == '<=':
|
||||
cond = avalue <= bvalue
|
||||
else:
|
||||
cond = avalue == bvalue
|
||||
|
||||
if cond:
|
||||
filtered_SEGS.append(seg)
|
||||
else:
|
||||
remained_SEGS.append(seg)
|
||||
|
||||
return (segs[0], filtered_SEGS), (segs[0], remained_SEGS), list(provided_labels)
|
||||
128
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py
Normal file
128
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hook_nodes.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import sys
|
||||
from . import hooks
|
||||
from . import defs
|
||||
|
||||
|
||||
class SEGSOrderedFilterDetailerHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2"],),
|
||||
"order": ("BOOLEAN", {"default": True, "label_on": "descending", "label_off": "ascending"}),
|
||||
"take_start": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
"take_count": ("INT", {"default": 1, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, target, order, take_start, take_count):
|
||||
hook = hooks.SEGSOrderedFilterDetailerHook(target, order, take_start, take_count)
|
||||
return (hook, )
|
||||
|
||||
|
||||
class SEGSRangeFilterDetailerHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"target": (["area(=w*h)", "width", "height", "x1", "y1", "x2", "y2", "length_percent"],),
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "inside", "label_off": "outside"}),
|
||||
"min_value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
"max_value": ("INT", {"default": 67108864, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, target, mode, min_value, max_value):
|
||||
hook = hooks.SEGSRangeFilterDetailerHook(target, mode, min_value, max_value)
|
||||
return (hook, )
|
||||
|
||||
|
||||
class SEGSLabelFilterDetailerHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"segs": ("SEGS", ),
|
||||
"preset": (['all'] + defs.detection_labels,),
|
||||
"labels": ("STRING", {"multiline": True, "placeholder": "List the types of segments to be allowed, separated by commas"}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, preset, labels):
|
||||
hook = hooks.SEGSLabelFilterDetailerHook(labels)
|
||||
return (hook, )
|
||||
|
||||
|
||||
class PreviewDetailerHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {"quality": ("INT", {"default": 95, "min": 20, "max": 100})},
|
||||
"hidden": {"unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", "UPSCALER_HOOK")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
NOT_IDEMPOTENT = True
|
||||
|
||||
def doit(self, quality, unique_id):
|
||||
hook = hooks.PreviewDetailerHook(unique_id, quality)
|
||||
return hook, hook
|
||||
|
||||
|
||||
class LamaRemoverDetailerHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"mask_threshold":("INT", {"default": 250, "min": 0, "max": 255, "step": 1, "display": "slider"}),
|
||||
"gaussblur_radius": ("INT", {"default": 8, "min": 0, "max": 20, "step": 1, "display": "slider"}),
|
||||
"skip_sampling": ("BOOLEAN", {"default": True}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, mask_threshold, gaussblur_radius, skip_sampling):
|
||||
hook = hooks.LamaRemoverDetailerHook(mask_threshold, gaussblur_radius, skip_sampling)
|
||||
return (hook, )
|
||||
|
||||
|
||||
class BlackPatchRetryHookProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"mean_thresh": ("INT", {"default": 10, "min": 0, "max": 255}),
|
||||
"var_thresh": ("INT", {"default": 5, "min": 0, "max": 255})
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_HOOK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
NOT_IDEMPOTENT = True
|
||||
|
||||
def doit(self, mean_thresh, var_thresh):
|
||||
hook = hooks.BlackPatchRetryHook(mean_thresh, var_thresh)
|
||||
return hook,
|
||||
595
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py
Normal file
595
custom_nodes/ComfyUI-Impact-Pack/modules/impact/hooks.py
Normal file
@@ -0,0 +1,595 @@
|
||||
import copy
|
||||
import torch
|
||||
import nodes
|
||||
from impact import utils
|
||||
from . import segs_nodes
|
||||
from thirdparty import noise_nodes
|
||||
from server import PromptServer
|
||||
import asyncio
|
||||
import folder_paths
|
||||
import os
|
||||
from comfy_extras import nodes_custom_sampler
|
||||
import math
|
||||
import logging
|
||||
|
||||
|
||||
class PixelKSampleHook:
|
||||
cur_step = 0
|
||||
total_step = 0
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def set_steps(self, info):
|
||||
self.cur_step, self.total_step = info
|
||||
|
||||
def post_decode(self, pixels):
|
||||
return pixels
|
||||
|
||||
def post_upscale(self, pixels, mask=None):
|
||||
return pixels
|
||||
|
||||
def post_encode(self, samples):
|
||||
return samples
|
||||
|
||||
def pre_decode(self, samples):
|
||||
return samples
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent,
|
||||
denoise):
|
||||
return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise
|
||||
|
||||
def post_crop_region(self, w, h, item_bbox, crop_region):
|
||||
return crop_region
|
||||
|
||||
def touch_scaled_size(self, w, h):
|
||||
return w, h
|
||||
|
||||
|
||||
class PixelKSampleHookCombine(PixelKSampleHook):
|
||||
hook1 = None
|
||||
hook2 = None
|
||||
|
||||
def __init__(self, hook1, hook2):
|
||||
super().__init__()
|
||||
self.hook1 = hook1
|
||||
self.hook2 = hook2
|
||||
|
||||
def set_steps(self, info):
|
||||
self.hook1.set_steps(info)
|
||||
self.hook2.set_steps(info)
|
||||
|
||||
def pre_decode(self, samples):
|
||||
return self.hook2.pre_decode(self.hook1.pre_decode(samples))
|
||||
|
||||
def post_decode(self, pixels):
|
||||
return self.hook2.post_decode(self.hook1.post_decode(pixels))
|
||||
|
||||
def post_upscale(self, pixels, mask=None):
|
||||
return self.hook2.post_upscale(self.hook1.post_upscale(pixels, mask), mask)
|
||||
|
||||
def post_encode(self, samples):
|
||||
return self.hook2.post_encode(self.hook1.post_encode(samples))
|
||||
|
||||
def post_crop_region(self, w, h, item_bbox, crop_region):
|
||||
crop_region = self.hook1.post_crop_region(w, h, item_bbox, crop_region)
|
||||
return self.hook2.post_crop_region(w, h, item_bbox, crop_region)
|
||||
|
||||
def touch_scaled_size(self, w, h):
|
||||
w, h = self.hook1.touch_scaled_size(w, h)
|
||||
return self.hook2.touch_scaled_size(w, h)
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent,
|
||||
denoise):
|
||||
model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \
|
||||
self.hook1.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative,
|
||||
upscaled_latent, denoise)
|
||||
|
||||
return self.hook2.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative,
|
||||
upscaled_latent, denoise)
|
||||
|
||||
|
||||
class DetailerHookCombine(PixelKSampleHookCombine):
|
||||
def cycle_latent(self, latent):
|
||||
latent = self.hook1.cycle_latent(latent)
|
||||
latent = self.hook2.cycle_latent(latent)
|
||||
return latent
|
||||
|
||||
def post_detection(self, segs):
|
||||
segs = self.hook1.post_detection(segs)
|
||||
segs = self.hook2.post_detection(segs)
|
||||
return segs
|
||||
|
||||
def post_paste(self, image):
|
||||
image = self.hook1.post_paste(image)
|
||||
image = self.hook2.post_paste(image)
|
||||
return image
|
||||
|
||||
def get_custom_noise(self, seed, noise, is_touched):
|
||||
noise_1st, is_touched = self.hook1.get_custom_noise(seed, noise, is_touched)
|
||||
noise_2nd, is_touched = self.hook2.get_custom_noise(seed, noise, is_touched)
|
||||
return noise, is_touched
|
||||
|
||||
def get_custom_sampler(self):
|
||||
if self.hook1.get_custom_sampler() is not None:
|
||||
return self.hook1.get_custom_sampler()
|
||||
else:
|
||||
return self.hook2.get_custom_sampler()
|
||||
|
||||
def get_skip_sampling(self):
|
||||
return self.hook1.get_skip_sampling() and self.hook2.get_skip_sampling()
|
||||
|
||||
def should_retry_patch(self, patch):
|
||||
return self.hook1.should_retry_patch(patch) or self.hook2.should_retry_patch(patch)
|
||||
|
||||
|
||||
class SimpleCfgScheduleHook(PixelKSampleHook):
|
||||
target_cfg = 0
|
||||
|
||||
def __init__(self, target_cfg):
|
||||
super().__init__()
|
||||
self.target_cfg = target_cfg
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise):
|
||||
if self.total_step > 1:
|
||||
progress = self.cur_step / (self.total_step - 1)
|
||||
gap = self.target_cfg - cfg
|
||||
current_cfg = int(cfg + gap * progress)
|
||||
else:
|
||||
current_cfg = self.target_cfg
|
||||
|
||||
return model, seed, steps, current_cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise
|
||||
|
||||
|
||||
class SimpleDenoiseScheduleHook(PixelKSampleHook):
|
||||
def __init__(self, target_denoise):
|
||||
super().__init__()
|
||||
self.target_denoise = target_denoise
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise):
|
||||
if self.total_step > 1:
|
||||
progress = self.cur_step / (self.total_step - 1)
|
||||
gap = self.target_denoise - denoise
|
||||
current_denoise = denoise + gap * progress
|
||||
else:
|
||||
current_denoise = self.target_denoise
|
||||
|
||||
return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, current_denoise
|
||||
|
||||
|
||||
class SimpleStepsScheduleHook(PixelKSampleHook):
|
||||
def __init__(self, target_steps):
|
||||
super().__init__()
|
||||
self.target_steps = target_steps
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise):
|
||||
if self.total_step > 1:
|
||||
progress = self.cur_step / (self.total_step - 1)
|
||||
gap = self.target_steps - steps
|
||||
current_steps = int(steps + gap * progress)
|
||||
else:
|
||||
current_steps = self.target_steps
|
||||
|
||||
return model, seed, current_steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise
|
||||
|
||||
|
||||
class DetailerHook(PixelKSampleHook):
|
||||
def cycle_latent(self, latent):
|
||||
return latent
|
||||
|
||||
def post_detection(self, segs):
|
||||
return segs
|
||||
|
||||
def post_paste(self, image):
|
||||
return image
|
||||
|
||||
def get_custom_noise(self, seed, noise, is_touched):
|
||||
return noise, is_touched
|
||||
|
||||
def get_custom_sampler(self):
|
||||
return None
|
||||
|
||||
def get_skip_sampling(self):
|
||||
return False
|
||||
|
||||
def should_retry_patch(self, patch):
|
||||
return False
|
||||
|
||||
|
||||
class CustomSamplerDetailerHookProvider(DetailerHook):
|
||||
def __init__(self, sampler):
|
||||
super().__init__()
|
||||
self.sampler = sampler
|
||||
|
||||
def get_custom_sampler(self):
|
||||
return self.sampler
|
||||
|
||||
|
||||
# class CustomNoiseDetailerHookProvider(DetailerHook):
|
||||
# def __init__(self, noise):
|
||||
# super().__init__()
|
||||
# self.noise = noise
|
||||
#
|
||||
# def get_custom_noise(self, seed, noise, is_start):
|
||||
# return self.noise
|
||||
|
||||
|
||||
class VariationNoiseDetailerHookProvider(DetailerHook):
|
||||
def __init__(self, variation_seed, variation_strength):
|
||||
super().__init__()
|
||||
self.variation_seed = variation_seed
|
||||
self.variation_strength = variation_strength
|
||||
|
||||
def get_custom_noise(self, seed, noise, is_touched):
|
||||
empty_noise = {'samples': torch.zeros(noise.size())}
|
||||
if not is_touched:
|
||||
noise = nodes_custom_sampler.Noise_RandomNoise(seed).generate_noise(empty_noise)
|
||||
noise_2nd = nodes_custom_sampler.Noise_RandomNoise(self.variation_seed).generate_noise(empty_noise)
|
||||
|
||||
mixed_noise = ((1 - self.variation_strength) * noise + self.variation_strength * noise_2nd)
|
||||
|
||||
# NOTE: Since the variance of the Gaussian noise in mixed_noise has changed, it must be corrected through scaling.
|
||||
scale_factor = math.sqrt((1 - self.variation_strength) ** 2 + self.variation_strength ** 2)
|
||||
corrected_noise = mixed_noise / scale_factor # Scale the noise to maintain variance of 1
|
||||
|
||||
return corrected_noise, True
|
||||
|
||||
|
||||
class SimpleDetailerDenoiseSchedulerHook(DetailerHook):
|
||||
def __init__(self, target_denoise):
|
||||
super().__init__()
|
||||
self.target_denoise = target_denoise
|
||||
|
||||
def pre_ksample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise):
|
||||
if self.total_step > 1:
|
||||
progress = self.cur_step / (self.total_step - 1)
|
||||
gap = self.target_denoise - denoise
|
||||
current_denoise = denoise + gap * progress
|
||||
else:
|
||||
# ignore hook if total cycle <= 1
|
||||
current_denoise = denoise
|
||||
|
||||
return model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, current_denoise
|
||||
|
||||
|
||||
class CoreMLHook(DetailerHook):
|
||||
def __init__(self, mode):
|
||||
super().__init__()
|
||||
resolution = mode.split('x')
|
||||
|
||||
self.w = int(resolution[0])
|
||||
self.h = int(resolution[1])
|
||||
|
||||
self.override_bbox_by_segm = False
|
||||
|
||||
def pre_decode(self, samples):
|
||||
new_samples = copy.deepcopy(samples)
|
||||
new_samples['samples'] = samples['samples'][0].unsqueeze(0)
|
||||
return new_samples
|
||||
|
||||
def post_encode(self, samples):
|
||||
new_samples = copy.deepcopy(samples)
|
||||
new_samples['samples'] = samples['samples'].repeat(2, 1, 1, 1)
|
||||
return new_samples
|
||||
|
||||
def post_crop_region(self, w, h, item_bbox, crop_region):
|
||||
x1, y1, x2, y2 = crop_region
|
||||
bx1, by1, bx2, by2 = item_bbox
|
||||
crop_w = x2-x1
|
||||
crop_h = y2-y1
|
||||
|
||||
crop_ratio = crop_w/crop_h
|
||||
target_ratio = self.w/self.h
|
||||
if crop_ratio < target_ratio:
|
||||
# shrink height
|
||||
top_gap = by1 - y1
|
||||
bottom_gap = y2 - by2
|
||||
|
||||
gap_ratio = top_gap / bottom_gap
|
||||
|
||||
target_height = 1/target_ratio*crop_w
|
||||
delta_height = crop_h - target_height
|
||||
|
||||
new_y1 = int(y1 + delta_height*gap_ratio)
|
||||
new_y2 = int(new_y1 + target_height)
|
||||
crop_region = x1, new_y1, x2, new_y2
|
||||
|
||||
elif crop_ratio > target_ratio:
|
||||
# shrink width
|
||||
left_gap = bx1 - x1
|
||||
right_gap = x2 - bx2
|
||||
|
||||
gap_ratio = left_gap / right_gap
|
||||
|
||||
target_width = target_ratio*crop_h
|
||||
delta_width = crop_w - target_width
|
||||
|
||||
new_x1 = int(x1 + delta_width*gap_ratio)
|
||||
new_x2 = int(new_x1 + target_width)
|
||||
crop_region = new_x1, y1, new_x2, y2
|
||||
|
||||
return crop_region
|
||||
|
||||
def touch_scaled_size(self, w, h):
|
||||
return self.w, self.h
|
||||
|
||||
|
||||
# REQUIREMENTS: BlenderNeko/ComfyUI Noise
|
||||
class InjectNoiseHook(PixelKSampleHook):
|
||||
def __init__(self, source, seed, start_strength, end_strength):
|
||||
super().__init__()
|
||||
self.source = source
|
||||
self.seed = seed
|
||||
self.start_strength = start_strength
|
||||
self.end_strength = end_strength
|
||||
|
||||
def post_encode(self, samples):
|
||||
cur_step = self.cur_step
|
||||
|
||||
size = samples['samples'].shape
|
||||
seed = cur_step + self.seed + cur_step
|
||||
|
||||
if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS:
|
||||
NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"]
|
||||
InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"]
|
||||
else:
|
||||
utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise',
|
||||
"To use 'NoiseInjectionHookProvider', 'ComfyUI Noise' extension is required.")
|
||||
raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.")
|
||||
|
||||
noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0]
|
||||
|
||||
# inj noise
|
||||
mask = None
|
||||
if 'noise_mask' in samples:
|
||||
mask = samples['noise_mask']
|
||||
|
||||
strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / self.total_step
|
||||
samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0]
|
||||
logging.info(f"[Impact Pack] InjectNoiseHook: strength = {strength}")
|
||||
|
||||
if mask is not None:
|
||||
samples['noise_mask'] = mask
|
||||
|
||||
return samples
|
||||
|
||||
|
||||
class UnsamplerHook(PixelKSampleHook):
|
||||
def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name,
|
||||
scheduler, normalize, positive, negative):
|
||||
super().__init__()
|
||||
self.model = model
|
||||
self.cfg = cfg
|
||||
self.sampler_name = sampler_name
|
||||
self.steps = steps
|
||||
self.start_end_at_step = start_end_at_step
|
||||
self.end_end_at_step = end_end_at_step
|
||||
self.scheduler = scheduler
|
||||
self.normalize = normalize
|
||||
self.positive = positive
|
||||
self.negative = negative
|
||||
|
||||
def post_encode(self, samples):
|
||||
cur_step = self.cur_step
|
||||
|
||||
Unsampler = noise_nodes.Unsampler
|
||||
|
||||
end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / self.total_step
|
||||
end_at_step = int(end_at_step)
|
||||
|
||||
logging.info(f"[Impact Pack] UnsamplerHook: end_at_step = {end_at_step}")
|
||||
|
||||
# inj noise
|
||||
mask = None
|
||||
if 'noise_mask' in samples:
|
||||
mask = samples['noise_mask']
|
||||
|
||||
samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step,
|
||||
self.scheduler, self.normalize, self.positive, self.negative, samples)[0]
|
||||
|
||||
if mask is not None:
|
||||
samples['noise_mask'] = mask
|
||||
|
||||
return samples
|
||||
|
||||
|
||||
class InjectNoiseHookForDetailer(DetailerHook):
|
||||
def __init__(self, source, seed, start_strength, end_strength, from_start=False):
|
||||
super().__init__()
|
||||
self.source = source
|
||||
self.seed = seed
|
||||
self.start_strength = start_strength
|
||||
self.end_strength = end_strength
|
||||
self.from_start = from_start
|
||||
|
||||
def inject_noise(self, samples):
|
||||
cur_step = self.cur_step if self.from_start else self.cur_step - 1
|
||||
total_step = self.total_step if self.from_start else self.total_step - 1
|
||||
|
||||
size = samples['samples'].shape
|
||||
seed = cur_step + self.seed + cur_step
|
||||
|
||||
if "BNK_NoisyLatentImage" in nodes.NODE_CLASS_MAPPINGS and "BNK_InjectNoise" in nodes.NODE_CLASS_MAPPINGS:
|
||||
NoisyLatentImage = nodes.NODE_CLASS_MAPPINGS["BNK_NoisyLatentImage"]
|
||||
InjectNoise = nodes.NODE_CLASS_MAPPINGS["BNK_InjectNoise"]
|
||||
else:
|
||||
utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_Noise',
|
||||
"To use 'NoiseInjectionDetailerHookProvider', 'ComfyUI Noise' extension is required.")
|
||||
raise Exception("'BNK_NoisyLatentImage', 'BNK_InjectNoise' nodes are not installed.")
|
||||
|
||||
noise = NoisyLatentImage().create_noisy_latents(self.source, seed, size[3] * 8, size[2] * 8, size[0])[0]
|
||||
|
||||
# inj noise
|
||||
mask = None
|
||||
if 'noise_mask' in samples:
|
||||
mask = samples['noise_mask']
|
||||
|
||||
strength = self.start_strength + (self.end_strength - self.start_strength) * cur_step / total_step
|
||||
samples = InjectNoise().inject_noise(samples, strength, noise, mask)[0]
|
||||
|
||||
if mask is not None:
|
||||
samples['noise_mask'] = mask
|
||||
|
||||
return samples
|
||||
|
||||
def cycle_latent(self, latent):
|
||||
if self.cur_step == 0 and not self.from_start:
|
||||
return latent
|
||||
else:
|
||||
return self.inject_noise(latent)
|
||||
|
||||
|
||||
class UnsamplerDetailerHook(DetailerHook):
|
||||
def __init__(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name,
|
||||
scheduler, normalize, positive, negative, from_start=False):
|
||||
super().__init__()
|
||||
self.model = model
|
||||
self.cfg = cfg
|
||||
self.sampler_name = sampler_name
|
||||
self.steps = steps
|
||||
self.start_end_at_step = start_end_at_step
|
||||
self.end_end_at_step = end_end_at_step
|
||||
self.scheduler = scheduler
|
||||
self.normalize = normalize
|
||||
self.positive = positive
|
||||
self.negative = negative
|
||||
self.from_start = from_start
|
||||
|
||||
def unsample(self, samples):
|
||||
cur_step = self.cur_step if self.from_start else self.cur_step - 1
|
||||
total_step = self.total_step if self.from_start else self.total_step - 1
|
||||
|
||||
Unsampler = noise_nodes.Unsampler
|
||||
|
||||
end_at_step = self.start_end_at_step + (self.end_end_at_step - self.start_end_at_step) * cur_step / total_step
|
||||
end_at_step = int(end_at_step)
|
||||
|
||||
# inj noise
|
||||
mask = None
|
||||
if 'noise_mask' in samples:
|
||||
mask = samples['noise_mask']
|
||||
|
||||
samples = Unsampler().unsampler(self.model, self.cfg, self.sampler_name, self.steps, end_at_step,
|
||||
self.scheduler, self.normalize, self.positive, self.negative, samples)[0]
|
||||
|
||||
if mask is not None:
|
||||
samples['noise_mask'] = mask
|
||||
|
||||
return samples
|
||||
|
||||
def cycle_latent(self, latent):
|
||||
if self.cur_step == 0 and not self.from_start:
|
||||
return latent
|
||||
else:
|
||||
return self.unsample(latent)
|
||||
|
||||
|
||||
class SEGSOrderedFilterDetailerHook(DetailerHook):
|
||||
def __init__(self, target, order, take_start, take_count):
|
||||
super().__init__()
|
||||
self.target = target
|
||||
self.order = order
|
||||
self.take_start = take_start
|
||||
self.take_count = take_count
|
||||
|
||||
def post_detection(self, segs):
|
||||
return segs_nodes.SEGSOrderedFilter().doit(segs, self.target, self.order, self.take_start, self.take_count)[0]
|
||||
|
||||
|
||||
class SEGSRangeFilterDetailerHook(DetailerHook):
|
||||
def __init__(self, target, mode, min_value, max_value):
|
||||
super().__init__()
|
||||
self.target = target
|
||||
self.mode = mode
|
||||
self.min_value = min_value
|
||||
self.max_value = max_value
|
||||
|
||||
def post_detection(self, segs):
|
||||
return segs_nodes.SEGSRangeFilter().doit(segs, self.target, self.mode, self.min_value, self.max_value)[0]
|
||||
|
||||
|
||||
class SEGSLabelFilterDetailerHook(DetailerHook):
|
||||
def __init__(self, labels):
|
||||
super().__init__()
|
||||
self.labels = labels
|
||||
|
||||
def post_detection(self, segs):
|
||||
return segs_nodes.SEGSLabelFilter().doit(segs, "", self.labels)[0]
|
||||
|
||||
|
||||
class LamaRemoverDetailerHook(DetailerHook):
|
||||
def __init__(self, mask_threshold, gaussblur_radius, skip_sampling):
|
||||
super().__init__()
|
||||
self.mask_threshold = mask_threshold
|
||||
self.gaussblur_radius = gaussblur_radius
|
||||
self.skip_sampling = skip_sampling
|
||||
|
||||
def post_upscale(self, img, mask=None):
|
||||
if "LamaRemover" in nodes.NODE_CLASS_MAPPINGS:
|
||||
lama_remover_obj = nodes.NODE_CLASS_MAPPINGS['LamaRemover']()
|
||||
else:
|
||||
utils.try_install_custom_node('https://github.com/Layer-norm/comfyui-lama-remover',
|
||||
"To use 'LAMARemoverDetailerHookProvider', 'comfyui-lama-remover' nodepack is required.")
|
||||
raise Exception("'LamaRemover' node is not installed.")
|
||||
|
||||
return lama_remover_obj.lama_remover(img, masks=mask, mask_threshold=self.mask_threshold, gaussblur_radius=self.gaussblur_radius, invert_mask=False)[0]
|
||||
|
||||
def get_skip_sampling(self):
|
||||
return self.skip_sampling
|
||||
|
||||
|
||||
class PreviewDetailerHook(DetailerHook):
|
||||
def __init__(self, node_id, quality):
|
||||
super().__init__()
|
||||
self.node_id = node_id
|
||||
self.quality = quality
|
||||
|
||||
async def send(self, image):
|
||||
if len(image) > 0:
|
||||
image = image[0].unsqueeze(0)
|
||||
img = utils.tensor2pil(image)
|
||||
|
||||
temp_path = os.path.join(folder_paths.get_temp_directory(), 'pvhook')
|
||||
|
||||
if not os.path.exists(temp_path):
|
||||
os.makedirs(temp_path)
|
||||
|
||||
fullpath = os.path.join(temp_path, f"{self.node_id}.webp")
|
||||
img.save(fullpath, quality=self.quality)
|
||||
|
||||
item = {
|
||||
"filename": f"{self.node_id}.webp",
|
||||
"subfolder": 'pvhook',
|
||||
"type": 'temp'
|
||||
}
|
||||
|
||||
PromptServer.instance.send_sync("impact-preview", {'node_id': self.node_id, 'item': item})
|
||||
|
||||
def post_paste(self, image):
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.create_task(self.send(image))
|
||||
return image
|
||||
|
||||
|
||||
class BlackPatchRetryHook(DetailerHook):
|
||||
def __init__(self, mean_thresh, var_thresh):
|
||||
super().__init__()
|
||||
assert 0 <= mean_thresh <= 255 and 0 <= var_thresh <= 255
|
||||
self.mean_thresh = mean_thresh
|
||||
self.var_thresh = var_thresh
|
||||
|
||||
def should_retry_patch(self, cropped_region):
|
||||
# remove the first dimension (batch_size)
|
||||
if cropped_region.ndim == 4:
|
||||
assert cropped_region.shape[0] == 1
|
||||
cropped_region = cropped_region.squeeze(0)
|
||||
|
||||
# turn image to grayscape
|
||||
if cropped_region.ndim == 3:
|
||||
assert cropped_region.shape[-1] in [1, 3]
|
||||
cropped_region = cropped_region.mean(axis=-1) # simple average grayscale
|
||||
|
||||
mean = cropped_region.mean()
|
||||
var = cropped_region.var()
|
||||
|
||||
return (mean <= self.mean_thresh/255) and (var <= self.var_thresh/255)
|
||||
@@ -0,0 +1,39 @@
|
||||
import impact.additional_dependencies
|
||||
import numpy as np
|
||||
from impact import utils
|
||||
import logging
|
||||
|
||||
impact.additional_dependencies.ensure_onnx_package()
|
||||
|
||||
try:
|
||||
import onnxruntime
|
||||
|
||||
def onnx_inference(image, onnx_model):
|
||||
# prepare image
|
||||
pil = utils.tensor2pil(image)
|
||||
image = np.ascontiguousarray(pil)
|
||||
image = image[:, :, ::-1] # to BGR image
|
||||
image = image.astype(np.float32)
|
||||
image -= [103.939, 116.779, 123.68] # 'caffe' mode image preprocessing
|
||||
|
||||
# do detection
|
||||
onnx_model = onnxruntime.InferenceSession(onnx_model, providers=["CPUExecutionProvider"])
|
||||
outputs = onnx_model.run(
|
||||
[s_i.name for s_i in onnx_model.get_outputs()],
|
||||
{onnx_model.get_inputs()[0].name: np.expand_dims(image, axis=0)},
|
||||
)
|
||||
|
||||
labels = [op for op in outputs if op.dtype == "int32"][0]
|
||||
scores = [op for op in outputs if isinstance(op[0][0], np.float32)][0]
|
||||
boxes = [op for op in outputs if isinstance(op[0][0], np.ndarray)][0]
|
||||
|
||||
# filter-out useless item
|
||||
idx = np.where(labels[0] == -1)[0][0]
|
||||
|
||||
labels = labels[0][:idx]
|
||||
scores = scores[0][:idx]
|
||||
boxes = boxes[0][:idx].astype(np.uint32)
|
||||
|
||||
return labels, scores, boxes
|
||||
except Exception:
|
||||
logging.error("[Impact Pack] ComfyUI-Impact-Pack: 'onnxruntime' package doesn't support 'python 3.11', yet.\t{e}")
|
||||
2767
custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py
Normal file
2767
custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_pack.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,323 @@
|
||||
import logging
|
||||
|
||||
import nodes
|
||||
from comfy.k_diffusion import sampling as k_diffusion_sampling
|
||||
from comfy import samplers
|
||||
from comfy_extras import nodes_custom_sampler
|
||||
import latent_preview
|
||||
import comfy
|
||||
import torch
|
||||
import math
|
||||
import comfy.model_management as mm
|
||||
|
||||
|
||||
try:
|
||||
from comfy_extras.nodes_custom_sampler import Noise_EmptyNoise, Noise_RandomNoise
|
||||
import node_helpers
|
||||
except Exception:
|
||||
logging.warning("\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n")
|
||||
raise Exception("[Impact Pack] ComfyUI is an outdated version.")
|
||||
|
||||
|
||||
def calculate_sigmas(model, sampler, scheduler, steps):
|
||||
discard_penultimate_sigma = False
|
||||
if sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']:
|
||||
steps += 1
|
||||
discard_penultimate_sigma = True
|
||||
|
||||
if scheduler.startswith('AYS'):
|
||||
sigmas = nodes.NODE_CLASS_MAPPINGS['AlignYourStepsScheduler']().get_sigmas(scheduler[4:], steps, denoise=1.0)[0]
|
||||
elif scheduler.startswith('GITS[coeff='):
|
||||
sigmas = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']().execute(float(scheduler[11:-1]), steps, denoise=1.0)[0]
|
||||
elif scheduler == 'LTXV[default]':
|
||||
sigmas = nodes.NODE_CLASS_MAPPINGS['LTXVScheduler']().execute(20, 2.05, 0.95, True, 0.1)[0]
|
||||
elif scheduler.startswith('OSS'):
|
||||
sigmas = nodes.NODE_CLASS_MAPPINGS['OptimalStepsScheduler']().execute(scheduler[4:], steps, denoise=1.0)[0]
|
||||
else:
|
||||
sigmas = samplers.calculate_sigmas(model.get_model_object("model_sampling"), scheduler, steps)
|
||||
|
||||
if discard_penultimate_sigma:
|
||||
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
|
||||
return sigmas
|
||||
|
||||
|
||||
def get_noise_sampler(x, cpu, total_sigmas, **kwargs):
|
||||
if 'extra_args' in kwargs and 'seed' in kwargs['extra_args']:
|
||||
sigma_min, sigma_max = total_sigmas[total_sigmas > 0].min(), total_sigmas.max()
|
||||
seed = kwargs['extra_args'].get("seed", None)
|
||||
return k_diffusion_sampling.BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=cpu)
|
||||
return None
|
||||
|
||||
|
||||
def ksampler(sampler_name, total_sigmas, extra_options={}, inpaint_options={}):
|
||||
if sampler_name in ["dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu"]:
|
||||
if sampler_name == "dpmpp_sde":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_sde
|
||||
elif sampler_name == "dpmpp_sde_gpu":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_sde_gpu
|
||||
elif sampler_name == "dpmpp_2m_sde":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_2m_sde
|
||||
elif sampler_name == "dpmpp_2m_sde_gpu":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_2m_sde_gpu
|
||||
elif sampler_name == "dpmpp_3m_sde":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_3m_sde
|
||||
elif sampler_name == "dpmpp_3m_sde_gpu":
|
||||
orig_sampler_function = k_diffusion_sampling.sample_dpmpp_3m_sde_gpu
|
||||
|
||||
def sampler_function_wrapper(model, x, sigmas, **kwargs):
|
||||
if 'noise_sampler' not in kwargs:
|
||||
kwargs['noise_sampler'] = get_noise_sampler(x, 'gpu' not in sampler_name, total_sigmas, **kwargs)
|
||||
|
||||
return orig_sampler_function(model, x, sigmas, **kwargs)
|
||||
|
||||
sampler_function = sampler_function_wrapper
|
||||
|
||||
else:
|
||||
return comfy.samplers.sampler_object(sampler_name)
|
||||
|
||||
return samplers.KSAMPLER(sampler_function, extra_options, inpaint_options)
|
||||
|
||||
|
||||
# modified version of SamplerCustom.sample
|
||||
def sample_with_custom_noise(model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image, noise=None, callback=None):
|
||||
latent = latent_image
|
||||
latent_image = latent["samples"]
|
||||
|
||||
if hasattr(comfy.sample, 'fix_empty_latent_channels'):
|
||||
latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image)
|
||||
|
||||
out = latent.copy()
|
||||
out['samples'] = latent_image
|
||||
|
||||
if noise is None:
|
||||
if not add_noise:
|
||||
noise = Noise_EmptyNoise().generate_noise(out)
|
||||
else:
|
||||
noise = Noise_RandomNoise(noise_seed).generate_noise(out)
|
||||
|
||||
noise_mask = None
|
||||
if "noise_mask" in latent:
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
x0_output = {}
|
||||
preview_callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
|
||||
|
||||
if callback is not None:
|
||||
def touched_callback(step, x0, x, total_steps):
|
||||
callback(step, x0, x, total_steps)
|
||||
preview_callback(step, x0, x, total_steps)
|
||||
else:
|
||||
touched_callback = preview_callback
|
||||
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
|
||||
device = mm.get_torch_device()
|
||||
|
||||
noise = noise.to(device)
|
||||
latent_image = latent_image.to(device)
|
||||
if noise_mask is not None:
|
||||
noise_mask = noise_mask.to(device)
|
||||
|
||||
if negative != 'NegativePlaceholder':
|
||||
# This way is incompatible with Advanced ControlNet, yet.
|
||||
# guider = comfy.samplers.CFGGuider(model)
|
||||
# guider.set_conds(positive, negative)
|
||||
# guider.set_cfg(cfg)
|
||||
samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image,
|
||||
noise_mask=noise_mask, callback=touched_callback,
|
||||
disable_pbar=disable_pbar, seed=noise_seed)
|
||||
else:
|
||||
guider = nodes_custom_sampler.Guider_Basic(model)
|
||||
positive = node_helpers.conditioning_set_values(positive, {"guidance": cfg})
|
||||
guider.set_conds(positive)
|
||||
samples = guider.sample(noise, latent_image, sampler, sigmas, denoise_mask=noise_mask, callback=touched_callback, disable_pbar=disable_pbar, seed=noise_seed)
|
||||
|
||||
samples = samples.to(comfy.model_management.intermediate_device())
|
||||
|
||||
out["samples"] = samples
|
||||
if "x0" in x0_output:
|
||||
out_denoised = latent.copy()
|
||||
out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu())
|
||||
else:
|
||||
out_denoised = out
|
||||
return out, out_denoised
|
||||
|
||||
|
||||
# When sampling one step at a time, it mitigates the problem. (especially for _sde series samplers)
|
||||
def separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler, positive, negative,
|
||||
latent_image, start_at_step, end_at_step, return_with_leftover_noise, sigma_ratio=1.0, sampler_opt=None, noise=None, callback=None, scheduler_func=None):
|
||||
|
||||
if scheduler_func is not None:
|
||||
total_sigmas = scheduler_func(model, sampler_name, steps)
|
||||
else:
|
||||
if sampler_opt is None:
|
||||
total_sigmas = calculate_sigmas(model, sampler_name, scheduler, steps)
|
||||
else:
|
||||
total_sigmas = calculate_sigmas(model, "", scheduler, steps)
|
||||
|
||||
sigmas = total_sigmas
|
||||
|
||||
if end_at_step is not None and end_at_step < (len(total_sigmas) - 1):
|
||||
sigmas = total_sigmas[:end_at_step + 1]
|
||||
if not return_with_leftover_noise:
|
||||
sigmas[-1] = 0
|
||||
|
||||
if start_at_step is not None:
|
||||
if start_at_step < (len(sigmas) - 1):
|
||||
sigmas = sigmas[start_at_step:] * sigma_ratio
|
||||
else:
|
||||
if latent_image is not None:
|
||||
return latent_image
|
||||
else:
|
||||
return {'samples': torch.zeros_like(noise)}
|
||||
|
||||
if sampler_opt is None:
|
||||
impact_sampler = ksampler(sampler_name, total_sigmas)
|
||||
else:
|
||||
impact_sampler = sampler_opt
|
||||
|
||||
if len(sigmas) == 0 or (len(sigmas) == 1 and sigmas[0] == 0):
|
||||
return latent_image
|
||||
|
||||
res = sample_with_custom_noise(model, add_noise, seed, cfg, positive, negative, impact_sampler, sigmas, latent_image, noise=noise, callback=callback)
|
||||
|
||||
if return_with_leftover_noise:
|
||||
return res[0]
|
||||
else:
|
||||
return res[1]
|
||||
|
||||
|
||||
def impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, sigma_ratio=1.0, sampler_opt=None, noise=None, scheduler_func=None):
|
||||
advanced_steps = math.floor(steps / denoise)
|
||||
start_at_step = advanced_steps - steps
|
||||
end_at_step = start_at_step + steps
|
||||
return separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
|
||||
start_at_step, end_at_step, False, scheduler_func=scheduler_func)
|
||||
|
||||
|
||||
def ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise,
|
||||
refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, sigma_factor=1.0, noise=None, scheduler_func=None, sampler_opt=None):
|
||||
|
||||
if refiner_ratio is None or refiner_model is None or refiner_clip is None or refiner_positive is None or refiner_negative is None:
|
||||
# Use separated_sample instead of KSampler for `AYS scheduler`
|
||||
# refined_latent = nodes.KSampler().sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise * sigma_factor)[0]
|
||||
|
||||
advanced_steps = math.floor(steps / denoise)
|
||||
start_at_step = advanced_steps - steps
|
||||
end_at_step = start_at_step + steps
|
||||
|
||||
refined_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler,
|
||||
positive, negative, latent_image, start_at_step, end_at_step, False,
|
||||
sigma_ratio=sigma_factor, sampler_opt=sampler_opt, noise=noise, scheduler_func=scheduler_func)
|
||||
else:
|
||||
advanced_steps = math.floor(steps / denoise)
|
||||
start_at_step = advanced_steps - steps
|
||||
end_at_step = start_at_step + math.floor(steps * (1.0 - refiner_ratio))
|
||||
|
||||
# print(f"pre: {start_at_step} .. {end_at_step} / {advanced_steps}")
|
||||
temp_latent = separated_sample(model, True, seed, advanced_steps, cfg, sampler_name, scheduler,
|
||||
positive, negative, latent_image, start_at_step, end_at_step, True,
|
||||
sigma_ratio=sigma_factor, sampler_opt=sampler_opt, noise=noise, scheduler_func=scheduler_func)
|
||||
|
||||
if 'noise_mask' in latent_image:
|
||||
# noise_latent = \
|
||||
# impact_sampling.separated_sample(refiner_model, "enable", seed, advanced_steps, cfg, sampler_name,
|
||||
# scheduler, refiner_positive, refiner_negative, latent_image, end_at_step,
|
||||
# end_at_step, "enable")
|
||||
|
||||
latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']()
|
||||
temp_latent = latent_compositor.composite(latent_image, temp_latent, 0, 0, False, latent_image['noise_mask'])[0]
|
||||
|
||||
# print(f"post: {end_at_step} .. {advanced_steps + 1} / {advanced_steps}")
|
||||
refined_latent = separated_sample(refiner_model, False, seed, advanced_steps, cfg, sampler_name, scheduler,
|
||||
refiner_positive, refiner_negative, temp_latent, end_at_step, advanced_steps + 1, False,
|
||||
sigma_ratio=sigma_factor, sampler_opt=sampler_opt, scheduler_func=scheduler_func)
|
||||
|
||||
return refined_latent
|
||||
|
||||
|
||||
class KSamplerAdvancedWrapper:
|
||||
params = None
|
||||
|
||||
def __init__(self, model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=None, sigma_factor=1.0, scheduler_func=None):
|
||||
self.params = model, cfg, sampler_name, scheduler, positive, negative, sigma_factor
|
||||
self.sampler_opt = sampler_opt
|
||||
self.scheduler_func = scheduler_func
|
||||
|
||||
def clone_with_conditionings(self, positive, negative):
|
||||
model, cfg, sampler_name, scheduler, _, _, _ = self.params
|
||||
return KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, self.sampler_opt)
|
||||
|
||||
def sample_advanced(self, add_noise, seed, steps, latent_image, start_at_step, end_at_step, return_with_leftover_noise, hook=None,
|
||||
recovery_mode="ratio additional", recovery_sampler="AUTO", recovery_sigma_ratio=1.0, noise=None):
|
||||
|
||||
model, cfg, sampler_name, scheduler, positive, negative, sigma_factor = self.params
|
||||
# steps, start_at_step, end_at_step = self.compensate_denoise(steps, start_at_step, end_at_step)
|
||||
|
||||
if hook is not None:
|
||||
model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent = hook.pre_ksample_advanced(model, add_noise, seed, steps, cfg, sampler_name, scheduler,
|
||||
positive, negative, latent_image, start_at_step, end_at_step,
|
||||
return_with_leftover_noise)
|
||||
|
||||
if recovery_mode != 'DISABLE' and sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']:
|
||||
base_image = latent_image.copy()
|
||||
if recovery_mode == "ratio between":
|
||||
sigma_ratio = 1.0 - recovery_sigma_ratio
|
||||
else:
|
||||
sigma_ratio = 1.0
|
||||
else:
|
||||
base_image = None
|
||||
sigma_ratio = 1.0
|
||||
|
||||
try:
|
||||
if sigma_ratio > 0:
|
||||
latent_image = separated_sample(model, add_noise, seed, steps, cfg, sampler_name, scheduler,
|
||||
positive, negative, latent_image, start_at_step, end_at_step,
|
||||
return_with_leftover_noise, sigma_ratio=sigma_ratio * sigma_factor,
|
||||
sampler_opt=self.sampler_opt, noise=noise, scheduler_func=self.scheduler_func)
|
||||
except ValueError as e:
|
||||
if str(e) == 'sigma_min and sigma_max must not be 0':
|
||||
logging.warning("\nWARN: sampling skipped - sigma_min and sigma_max are 0")
|
||||
return latent_image
|
||||
|
||||
if (recovery_sigma_ratio > 0 and recovery_mode != 'DISABLE' and
|
||||
sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu']):
|
||||
compensate = 0 if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu', 'dpmpp_2m_sde', 'dpmpp_2m_sde_gpu', 'dpmpp_3m_sde', 'dpmpp_3m_sde_gpu'] else 2
|
||||
if recovery_sampler == "AUTO":
|
||||
recovery_sampler = 'dpm_fast' if sampler_name in ['uni_pc', 'uni_pc_bh2', 'dpmpp_sde', 'dpmpp_sde_gpu'] else 'dpmpp_2m'
|
||||
|
||||
latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']()
|
||||
|
||||
noise_mask = latent_image['noise_mask']
|
||||
|
||||
if len(noise_mask.shape) == 4:
|
||||
noise_mask = noise_mask.squeeze(0).squeeze(0)
|
||||
|
||||
latent_image = latent_compositor.composite(base_image, latent_image, 0, 0, False, noise_mask)[0]
|
||||
|
||||
try:
|
||||
latent_image = separated_sample(model, add_noise, seed, steps, cfg, recovery_sampler, scheduler,
|
||||
positive, negative, latent_image, start_at_step-compensate, end_at_step, return_with_leftover_noise,
|
||||
sigma_ratio=recovery_sigma_ratio * sigma_factor, sampler_opt=self.sampler_opt, scheduler_func=self.scheduler_func)
|
||||
except ValueError as e:
|
||||
if str(e) == 'sigma_min and sigma_max must not be 0':
|
||||
logging.warning("\nWARN: sampling skipped - sigma_min and sigma_max are 0")
|
||||
|
||||
return latent_image
|
||||
|
||||
|
||||
class KSamplerWrapper:
|
||||
params = None
|
||||
|
||||
def __init__(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=None):
|
||||
self.params = model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise
|
||||
self.scheduler_func = scheduler_func
|
||||
|
||||
def sample(self, latent_image, hook=None):
|
||||
model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise = self.params
|
||||
|
||||
if hook is not None:
|
||||
model, seed, steps, cfg, sampler_name, scheduler, positive, negative, upscaled_latent, denoise = \
|
||||
hook.pre_ksample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise)
|
||||
|
||||
return impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=self.scheduler_func)
|
||||
619
custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py
Normal file
619
custom_nodes/ComfyUI-Impact-Pack/modules/impact/impact_server.py
Normal file
@@ -0,0 +1,619 @@
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import threading
|
||||
import traceback
|
||||
from io import BytesIO
|
||||
|
||||
import comfy
|
||||
import folder_paths
|
||||
import impact
|
||||
import impact.core as core
|
||||
import impact.impact_pack as impact_pack
|
||||
import impact.utils as utils
|
||||
import nodes
|
||||
import numpy as np
|
||||
import torchvision
|
||||
from aiohttp import web
|
||||
from impact.utils import to_tensor
|
||||
from PIL import Image
|
||||
from segment_anything import SamPredictor, sam_model_registry
|
||||
from server import PromptServer
|
||||
|
||||
sam_predictor = None
|
||||
default_sam_model_name = os.path.join(impact_pack.model_path, "sams", "sam_vit_b_01ec64.pth")
|
||||
|
||||
sam_lock = threading.Condition()
|
||||
|
||||
last_prepare_data = None
|
||||
|
||||
|
||||
def async_prepare_sam(image_dir, model_name, filename):
|
||||
with sam_lock:
|
||||
global sam_predictor
|
||||
|
||||
if 'vit_h' in model_name:
|
||||
model_kind = 'vit_h'
|
||||
elif 'vit_l' in model_name:
|
||||
model_kind = 'vit_l'
|
||||
else:
|
||||
model_kind = 'vit_b'
|
||||
|
||||
sam_model = sam_model_registry[model_kind](checkpoint=model_name)
|
||||
sam_predictor = SamPredictor(sam_model)
|
||||
|
||||
image_path = os.path.join(image_dir, filename)
|
||||
image = nodes.LoadImage().load_image(image_path)[0]
|
||||
image = np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)
|
||||
|
||||
if impact.config.get_config()['sam_editor_cpu']:
|
||||
device = 'cpu'
|
||||
else:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
|
||||
sam_predictor.model.to(device=device)
|
||||
sam_predictor.set_image(image, "RGB")
|
||||
sam_predictor.model.cpu()
|
||||
|
||||
|
||||
@PromptServer.instance.routes.post("/sam/prepare")
|
||||
async def sam_prepare(request):
|
||||
global sam_predictor
|
||||
global last_prepare_data
|
||||
data = await request.json()
|
||||
|
||||
with sam_lock:
|
||||
if last_prepare_data is not None and last_prepare_data == data:
|
||||
# already loaded: skip -- prevent redundant loading
|
||||
return web.Response(status=200)
|
||||
|
||||
last_prepare_data = data
|
||||
|
||||
model_name = 'sam_vit_b_01ec64.pth'
|
||||
if data['sam_model_name'] == 'auto':
|
||||
model_name = impact.config.get_config()['sam_editor_model']
|
||||
|
||||
model_path = folder_paths.get_full_path("sams", model_name)
|
||||
|
||||
if model_path is None:
|
||||
logging.error(f"[Impact Pack] The '{model_name}' model file cannot be found in any sams model path.")
|
||||
return web.Response(status=400)
|
||||
|
||||
logging.info(f"[Impact Pack] Loading SAM model '{model_path}'")
|
||||
|
||||
filename, image_dir = folder_paths.annotated_filepath(data["filename"])
|
||||
|
||||
if image_dir is None:
|
||||
typ = data['type'] if data['type'] != '' else 'output'
|
||||
image_dir = folder_paths.get_directory_by_type(typ)
|
||||
if data['subfolder'] is not None and data['subfolder'] != '':
|
||||
image_dir += f"/{data['subfolder']}"
|
||||
|
||||
if image_dir is None:
|
||||
return web.Response(status=400)
|
||||
|
||||
thread = threading.Thread(target=async_prepare_sam, args=(image_dir, model_path, filename,))
|
||||
thread.start()
|
||||
|
||||
logging.info("[Impact Pack] SAM model loaded. ")
|
||||
return web.Response(status=200)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.post("/sam/release")
|
||||
async def release_sam(request):
|
||||
global sam_predictor
|
||||
|
||||
with sam_lock:
|
||||
temp = sam_predictor
|
||||
del temp
|
||||
sam_predictor = None
|
||||
|
||||
logging.info("[Impact Pack]: unloading SAM model")
|
||||
|
||||
|
||||
@PromptServer.instance.routes.post("/sam/detect")
|
||||
async def sam_detect(request):
|
||||
global sam_predictor
|
||||
with sam_lock:
|
||||
if sam_predictor is not None:
|
||||
if impact.config.get_config()['sam_editor_cpu']:
|
||||
device = 'cpu'
|
||||
else:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
|
||||
sam_predictor.model.to(device=device)
|
||||
try:
|
||||
data = await request.json()
|
||||
|
||||
positive_points = data['positive_points']
|
||||
negative_points = data['negative_points']
|
||||
threshold = data['threshold']
|
||||
|
||||
points = []
|
||||
plabs = []
|
||||
|
||||
for p in positive_points:
|
||||
points.append(p)
|
||||
plabs.append(1)
|
||||
|
||||
for p in negative_points:
|
||||
points.append(p)
|
||||
plabs.append(0)
|
||||
|
||||
detected_masks = core.sam_predict(sam_predictor, points, plabs, None, threshold)
|
||||
mask = utils.combine_masks2(detected_masks)
|
||||
|
||||
if mask is None:
|
||||
return web.Response(status=400)
|
||||
|
||||
image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
|
||||
i = 255. * image.cpu().numpy()
|
||||
|
||||
img = Image.fromarray(np.clip(i[0], 0, 255).astype(np.uint8))
|
||||
|
||||
img_buffer = io.BytesIO()
|
||||
img.save(img_buffer, format='png')
|
||||
|
||||
headers = {'Content-Type': 'image/png'}
|
||||
finally:
|
||||
sam_predictor.model.to(device="cpu")
|
||||
|
||||
return web.Response(body=img_buffer.getvalue(), headers=headers)
|
||||
|
||||
else:
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/wildcards/refresh")
|
||||
async def wildcards_refresh(request):
|
||||
impact.wildcards.wildcard_load()
|
||||
return web.Response(status=200)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/wildcards/list")
|
||||
async def wildcards_list(request):
|
||||
data = {'data': impact.wildcards.get_wildcard_list()}
|
||||
return web.json_response(data)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/wildcards/list/loaded")
|
||||
async def wildcards_list_loaded(request):
|
||||
"""
|
||||
Get list of actually loaded wildcards (progressive loading in on-demand mode).
|
||||
|
||||
Returns:
|
||||
- In on-demand mode: only wildcards that have been loaded into memory
|
||||
- In full cache mode: same as /wildcards/list (all wildcards)
|
||||
"""
|
||||
data = {
|
||||
'data': impact.wildcards.get_loaded_wildcard_list(),
|
||||
'on_demand_mode': impact.wildcards.is_on_demand_mode(),
|
||||
'total_available': len(impact.wildcards.available_wildcards) if impact.wildcards.is_on_demand_mode() else len(impact.wildcards.wildcard_dict)
|
||||
}
|
||||
return web.json_response(data)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.post("/impact/wildcards")
|
||||
async def populate_wildcards(request):
|
||||
data = await request.json()
|
||||
populated = impact.wildcards.process(data['text'], data.get('seed', None))
|
||||
return web.json_response({"text": populated})
|
||||
|
||||
|
||||
segs_picker_map = {}
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/segs/picker/count")
|
||||
async def segs_picker_count(request):
|
||||
node_id = request.rel_url.query.get('id', '')
|
||||
|
||||
if node_id in segs_picker_map:
|
||||
res = len(segs_picker_map[node_id])
|
||||
return web.Response(status=200, text=str(res))
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/segs/picker/view")
|
||||
async def segs_picker(request):
|
||||
node_id = request.rel_url.query.get('id', '')
|
||||
idx = int(request.rel_url.query.get('idx', ''))
|
||||
|
||||
if node_id in segs_picker_map and idx < len(segs_picker_map[node_id]):
|
||||
img = to_tensor(segs_picker_map[node_id][idx]).permute(0, 3, 1, 2).squeeze(0)
|
||||
pil = torchvision.transforms.ToPILImage('RGB')(img)
|
||||
|
||||
image_bytes = BytesIO()
|
||||
pil.save(image_bytes, format="PNG")
|
||||
image_bytes.seek(0)
|
||||
return web.Response(status=200, body=image_bytes, content_type='image/png', headers={"Content-Disposition": f"filename={node_id}{idx}.png"})
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/view/validate")
|
||||
async def view_validate(request):
|
||||
if "filename" in request.rel_url.query:
|
||||
filename = request.rel_url.query["filename"]
|
||||
subfolder = request.rel_url.query["subfolder"]
|
||||
filename, base_dir = folder_paths.annotated_filepath(filename)
|
||||
|
||||
if filename == '' or filename[0] == '/' or '..' in filename:
|
||||
return web.Response(status=400)
|
||||
|
||||
if base_dir is None:
|
||||
base_dir = folder_paths.get_input_directory()
|
||||
|
||||
file = os.path.join(base_dir, subfolder, filename)
|
||||
|
||||
if os.path.isfile(file):
|
||||
return web.Response(status=200)
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/validate/pb_id_image")
|
||||
async def view_pb_id_image(request):
|
||||
if "id" in request.rel_url.query:
|
||||
pb_id = request.rel_url.query["id"]
|
||||
|
||||
if pb_id not in core.preview_bridge_image_id_map:
|
||||
return web.Response(status=400)
|
||||
|
||||
file = core.preview_bridge_image_id_map[pb_id]
|
||||
if os.path.isfile(file):
|
||||
return web.Response(status=200)
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/set/pb_id_image")
|
||||
async def set_previewbridge_image(request):
|
||||
try:
|
||||
if "filename" in request.rel_url.query:
|
||||
node_id = request.rel_url.query["node_id"]
|
||||
filename = request.rel_url.query["filename"]
|
||||
path_type = request.rel_url.query["type"]
|
||||
subfolder = request.rel_url.query["subfolder"]
|
||||
filename, output_dir = folder_paths.annotated_filepath(filename)
|
||||
|
||||
if filename == '' or filename[0] == '/' or '..' in filename:
|
||||
return web.Response(status=400)
|
||||
|
||||
if output_dir is None:
|
||||
if path_type == 'input':
|
||||
output_dir = folder_paths.get_input_directory()
|
||||
elif path_type == 'output':
|
||||
output_dir = folder_paths.get_output_directory()
|
||||
else:
|
||||
output_dir = folder_paths.get_temp_directory()
|
||||
|
||||
file = os.path.join(output_dir, subfolder, filename)
|
||||
item = {
|
||||
'filename': filename,
|
||||
'type': path_type,
|
||||
'subfolder': subfolder,
|
||||
}
|
||||
pb_id = core.set_previewbridge_image(node_id, file, item)
|
||||
|
||||
return web.Response(status=200, text=pb_id)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/get/pb_id_image")
|
||||
async def get_previewbridge_image(request):
|
||||
if "id" in request.rel_url.query:
|
||||
pb_id = request.rel_url.query["id"]
|
||||
|
||||
if pb_id in core.preview_bridge_image_id_map:
|
||||
_, path_item = core.preview_bridge_image_id_map[pb_id]
|
||||
return web.json_response(path_item)
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@PromptServer.instance.routes.get("/impact/view/pb_id_image")
|
||||
async def view_previewbridge_image(request):
|
||||
if "id" in request.rel_url.query:
|
||||
pb_id = request.rel_url.query["id"]
|
||||
|
||||
if pb_id in core.preview_bridge_image_id_map:
|
||||
file = core.preview_bridge_image_id_map[pb_id]
|
||||
|
||||
with Image.open(file):
|
||||
filename = os.path.basename(file)
|
||||
return web.FileResponse(file, headers={"Content-Disposition": f"filename=\"{filename}\""})
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
def onprompt_for_switch(json_data):
|
||||
inversed_switch_info = {}
|
||||
onprompt_switch_info = {}
|
||||
onprompt_cond_branch_info = {}
|
||||
disabled_switch = set()
|
||||
|
||||
|
||||
for k, v in json_data['prompt'].items():
|
||||
if 'class_type' not in v:
|
||||
continue
|
||||
|
||||
cls = v['class_type']
|
||||
if cls == 'ImpactInversedSwitch':
|
||||
# if 'sel_mode' is 'select_on_prompt'
|
||||
if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'select' in v['inputs']:
|
||||
select_input = v['inputs']['select']
|
||||
# if 'select' is converted input
|
||||
if isinstance(select_input, list) and len(select_input) == 2:
|
||||
input_node = json_data['prompt'][select_input[0]]
|
||||
if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']:
|
||||
inversed_switch_info[k] = input_node['inputs']['value']
|
||||
else:
|
||||
logging.warning(f"\n##### ##### #####\n[Impact Pack] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactInversedSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs if 'select_on_prompt' is selected.\n##### ##### #####\n")
|
||||
else:
|
||||
inversed_switch_info[k] = select_input
|
||||
|
||||
elif cls in ['ImpactSwitch', 'LatentSwitch', 'SEGSSwitch', 'ImpactMakeImageList']:
|
||||
# if 'sel_mode' is 'select_on_prompt'
|
||||
if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'select' in v['inputs']:
|
||||
select_input = v['inputs']['select']
|
||||
# if 'select' is converted input
|
||||
if isinstance(select_input, list) and len(select_input) == 2:
|
||||
input_node = json_data['prompt'][select_input[0]]
|
||||
if input_node['class_type'] == 'ImpactInt' and 'inputs' in input_node and 'value' in input_node['inputs']:
|
||||
onprompt_switch_info[k] = input_node['inputs']['value']
|
||||
if input_node['class_type'] == 'ImpactSwitch' and 'inputs' in input_node and 'select' in input_node['inputs']:
|
||||
if isinstance(input_node['inputs']['select'], int):
|
||||
onprompt_switch_info[k] = input_node['inputs']['select']
|
||||
else:
|
||||
logging.warning(f"\n##### ##### #####\n[Impact Pack] {cls}: For the 'select' operation, only 'select_index' of the 'ImpactSwitch', which is not an input, or 'ImpactInt' and 'Primitive' are allowed as inputs if 'select_on_prompt' is selected.\n##### ##### #####\n")
|
||||
else:
|
||||
onprompt_switch_info[k] = select_input
|
||||
|
||||
if k in onprompt_switch_info and f'input{onprompt_switch_info[k]}' not in v['inputs']:
|
||||
# disconnect output
|
||||
disabled_switch.add(k)
|
||||
|
||||
elif cls == 'ImpactConditionalBranchSelMode':
|
||||
if 'sel_mode' in v['inputs'] and v['inputs']['sel_mode'] and 'cond' in v['inputs']:
|
||||
cond_input = v['inputs']['cond']
|
||||
if isinstance(cond_input, list) and len(cond_input) == 2:
|
||||
input_node = json_data['prompt'][cond_input[0]]
|
||||
if (input_node['class_type'] == 'ImpactValueReceiver' and 'inputs' in input_node
|
||||
and 'value' in input_node['inputs'] and 'typ' in input_node['inputs']):
|
||||
if 'BOOLEAN' == input_node['inputs']['typ']:
|
||||
try:
|
||||
onprompt_cond_branch_info[k] = input_node['inputs']['value'].lower() == "true"
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
onprompt_cond_branch_info[k] = cond_input
|
||||
|
||||
for k, v in json_data['prompt'].items():
|
||||
disable_targets = set()
|
||||
|
||||
for kk, vv in v['inputs'].items():
|
||||
if isinstance(vv, list) and len(vv) == 2:
|
||||
if vv[0] in inversed_switch_info:
|
||||
if vv[1] + 1 != inversed_switch_info[vv[0]]:
|
||||
disable_targets.add(kk)
|
||||
else:
|
||||
del inversed_switch_info[k]
|
||||
|
||||
if vv[0] in disabled_switch:
|
||||
disable_targets.add(kk)
|
||||
|
||||
if k in onprompt_switch_info:
|
||||
selected_slot_name = f"input{onprompt_switch_info[k]}"
|
||||
for kk, vv in v['inputs'].items():
|
||||
if kk != selected_slot_name and kk.startswith('input'):
|
||||
disable_targets.add(kk)
|
||||
|
||||
if k in onprompt_cond_branch_info:
|
||||
selected_slot_name = "tt_value" if onprompt_cond_branch_info[k] else "ff_value"
|
||||
for kk, vv in v['inputs'].items():
|
||||
if kk in ['tt_value', 'ff_value'] and kk != selected_slot_name:
|
||||
disable_targets.add(kk)
|
||||
|
||||
for kk in disable_targets:
|
||||
del v['inputs'][kk]
|
||||
|
||||
# inversed_switch - select out of range
|
||||
for target in inversed_switch_info.keys():
|
||||
del json_data['prompt'][target]['inputs']['input']
|
||||
|
||||
|
||||
def onprompt_for_pickers(json_data):
|
||||
detected_pickers = set()
|
||||
|
||||
for k, v in json_data['prompt'].items():
|
||||
if 'class_type' not in v:
|
||||
continue
|
||||
|
||||
cls = v['class_type']
|
||||
if cls == 'ImpactSEGSPicker':
|
||||
detected_pickers.add(k)
|
||||
|
||||
# garbage collection
|
||||
keys_to_remove = [key for key in segs_picker_map if key not in detected_pickers]
|
||||
for key in keys_to_remove:
|
||||
del segs_picker_map[key]
|
||||
|
||||
|
||||
def gc_preview_bridge_cache(json_data):
|
||||
prompt_keys = json_data['prompt'].keys()
|
||||
|
||||
for key in list(core.preview_bridge_cache.keys()):
|
||||
if key not in prompt_keys:
|
||||
# print(f"key deleted [PB]: {key}")
|
||||
del core.preview_bridge_cache[key]
|
||||
|
||||
for key in list(core.preview_bridge_last_mask_cache.keys()):
|
||||
if key not in prompt_keys:
|
||||
# print(f"key deleted [PB_last_mask]: {key}")
|
||||
del core.preview_bridge_last_mask_cache[key]
|
||||
|
||||
|
||||
def workflow_imagereceiver_update(json_data):
|
||||
prompt = json_data['prompt']
|
||||
|
||||
for v in prompt.values():
|
||||
if 'class_type' in v and v['class_type'] == 'ImageReceiver':
|
||||
if v['inputs']['save_to_workflow']:
|
||||
v['inputs']['image'] = "#DATA"
|
||||
|
||||
|
||||
def regional_sampler_seed_update(json_data):
|
||||
prompt = json_data['prompt']
|
||||
|
||||
for k, v in prompt.items():
|
||||
if 'class_type' in v and v['class_type'] == 'RegionalSampler':
|
||||
seed_2nd_mode = v['inputs']['seed_2nd_mode']
|
||||
|
||||
new_seed = None
|
||||
if seed_2nd_mode == 'increment':
|
||||
new_seed = v['inputs']['seed_2nd']+1
|
||||
if new_seed > 1125899906842624:
|
||||
new_seed = 0
|
||||
elif seed_2nd_mode == 'decrement':
|
||||
new_seed = v['inputs']['seed_2nd']-1
|
||||
if new_seed < 0:
|
||||
new_seed = 1125899906842624
|
||||
elif seed_2nd_mode == 'randomize':
|
||||
new_seed = random.randint(0, 1125899906842624)
|
||||
|
||||
if new_seed is not None:
|
||||
PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "seed_2nd", "type": "INT", "value": new_seed})
|
||||
|
||||
|
||||
def find_input_value(input_node, prompt, input_type=int, input_keys=('value',)):
|
||||
input_val = None
|
||||
|
||||
try:
|
||||
for n in input_keys:
|
||||
input_val = input_node['inputs'].get(n, None)
|
||||
if isinstance(input_val, input_type):
|
||||
break
|
||||
elif isinstance(input_val, list) and len(input_val):
|
||||
input_val = find_input_value(prompt[input_val[0]], prompt=prompt, input_type=input_type, input_keys=input_keys)
|
||||
if input_val is not None:
|
||||
break
|
||||
|
||||
except Exception as e :
|
||||
logging.warning(f"[Impact Pack] Error encountered on find {input_type} value - {e}")
|
||||
|
||||
return input_val
|
||||
|
||||
|
||||
def onprompt_populate_wildcards(json_data):
|
||||
prompt = json_data['prompt']
|
||||
|
||||
updated_widget_values = {}
|
||||
for k, v in prompt.items():
|
||||
if 'class_type' in v and (v['class_type'] == 'ImpactWildcardEncode' or v['class_type'] == 'ImpactWildcardProcessor'):
|
||||
inputs = v['inputs']
|
||||
|
||||
# legacy adapter
|
||||
if isinstance(inputs['mode'], bool):
|
||||
if inputs['mode']:
|
||||
new_mode = 'populate'
|
||||
else:
|
||||
new_mode = 'fixed'
|
||||
|
||||
inputs['mode'] = new_mode
|
||||
|
||||
if inputs['mode'] == 'populate' and isinstance(inputs['populated_text'], str):
|
||||
if isinstance(inputs['seed'], list):
|
||||
try:
|
||||
input_node = prompt[inputs['seed'][0]]
|
||||
if input_node['class_type'] == 'ImpactInt':
|
||||
input_seed = int(input_node['inputs']['value'])
|
||||
if not isinstance(input_seed, int):
|
||||
continue
|
||||
elif input_node['class_type'] == 'Seed (rgthree)':
|
||||
input_seed = int(input_node['inputs']['seed'])
|
||||
if not isinstance(input_seed, int):
|
||||
continue
|
||||
else:
|
||||
input_seed = find_input_value(input_node, prompt=prompt, input_type=int, input_keys=('int', 'seed', 'value'))
|
||||
if input_seed is None:
|
||||
logging.info(f"[Impact Pack] Only `ImpactInt`, `Seed (rgthree)` and `Primitive` Node are allowed as the seed for '{v['class_type']}'. It will be ignored. ")
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
else:
|
||||
input_seed = int(inputs['seed'])
|
||||
|
||||
inputs['populated_text'] = impact.wildcards.process(inputs['wildcard_text'], input_seed)
|
||||
inputs['mode'] = 'reproduce'
|
||||
|
||||
PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "populated_text", "type": "STRING", "value": inputs['populated_text']})
|
||||
updated_widget_values[k] = inputs['populated_text']
|
||||
|
||||
if inputs['mode'] == 'reproduce':
|
||||
PromptServer.instance.send_sync("impact-node-feedback", {"node_id": k, "widget_name": "mode", "type": "STRING", "value": 'populate'})
|
||||
|
||||
|
||||
|
||||
match json_data:
|
||||
case {"extra_data": {"extra_pnginfo": {"workflow": {"nodes": nodes}}}}:
|
||||
for node in nodes:
|
||||
match node:
|
||||
case {"id": id, "widgets_values": widgets_values}:
|
||||
key = str(id)
|
||||
if key in updated_widget_values:
|
||||
widgets_values[1] = updated_widget_values[key]
|
||||
widgets_values[2] = "reproduce"
|
||||
|
||||
|
||||
def onprompt_for_remote(json_data):
|
||||
prompt = json_data['prompt']
|
||||
|
||||
for v in prompt.values():
|
||||
if 'class_type' in v:
|
||||
cls = v['class_type']
|
||||
if cls == 'ImpactRemoteBoolean' or cls == 'ImpactRemoteInt':
|
||||
inputs = v['inputs']
|
||||
node_id = str(inputs['node_id'])
|
||||
|
||||
if node_id not in prompt:
|
||||
continue
|
||||
|
||||
target_inputs = prompt[node_id]['inputs']
|
||||
|
||||
widget_name = inputs['widget_name']
|
||||
if widget_name in target_inputs:
|
||||
widget_type = None
|
||||
if cls == 'ImpactRemoteBoolean' and isinstance(target_inputs[widget_name], bool):
|
||||
widget_type = 'BOOLEAN'
|
||||
|
||||
elif cls == 'ImpactRemoteInt' and (isinstance(target_inputs[widget_name], int) or isinstance(target_inputs[widget_name], float)):
|
||||
widget_type = 'INT'
|
||||
|
||||
if widget_type is None:
|
||||
break
|
||||
|
||||
target_inputs[widget_name] = inputs['value']
|
||||
PromptServer.instance.send_sync("impact-node-feedback", {"node_id": node_id, "widget_name": widget_name, "type": widget_type, "value": inputs['value']})
|
||||
|
||||
|
||||
def onprompt(json_data):
|
||||
try:
|
||||
onprompt_for_remote(json_data) # NOTE: top priority
|
||||
onprompt_for_switch(json_data)
|
||||
onprompt_for_pickers(json_data)
|
||||
onprompt_populate_wildcards(json_data)
|
||||
gc_preview_bridge_cache(json_data)
|
||||
workflow_imagereceiver_update(json_data)
|
||||
regional_sampler_seed_update(json_data)
|
||||
core.current_prompt = json_data
|
||||
except Exception:
|
||||
logging.exception("[Impact Pack] ComfyUI-Impact-Pack: Error on prompt - several features will not work.")
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
PromptServer.instance.add_on_prompt_handler(onprompt)
|
||||
779
custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py
Normal file
779
custom_nodes/ComfyUI-Impact-Pack/modules/impact/logics.py
Normal file
@@ -0,0 +1,779 @@
|
||||
import sys
|
||||
import time
|
||||
|
||||
import execution
|
||||
import impact.impact_server
|
||||
from server import PromptServer
|
||||
from impact.utils import any_typ
|
||||
import impact.core as core
|
||||
import re
|
||||
import nodes
|
||||
import logging
|
||||
|
||||
|
||||
class ImpactCompare:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"cmp": (['a = b', 'a <> b', 'a > b', 'a < b', 'a >= b', 'a <= b', 'tt', 'ff'],),
|
||||
"a": (any_typ, ),
|
||||
"b": (any_typ, ),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN", )
|
||||
|
||||
def doit(self, cmp, a, b):
|
||||
if cmp == "a = b":
|
||||
return (a == b, )
|
||||
elif cmp == "a <> b":
|
||||
return (a != b, )
|
||||
elif cmp == "a > b":
|
||||
return (a > b, )
|
||||
elif cmp == "a < b":
|
||||
return (a < b, )
|
||||
elif cmp == "a >= b":
|
||||
return (a >= b, )
|
||||
elif cmp == "a <= b":
|
||||
return (a <= b, )
|
||||
elif cmp == 'tt':
|
||||
return (True, )
|
||||
else:
|
||||
return (False, )
|
||||
|
||||
|
||||
class ImpactNotEmptySEGS:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {"segs": ("SEGS",)}}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN", )
|
||||
|
||||
def doit(self, segs):
|
||||
return (segs[1] != [], )
|
||||
|
||||
|
||||
class ImpactConditionalBranch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"cond": ("BOOLEAN",),
|
||||
"tt_value": (any_typ,{"lazy": True}),
|
||||
"ff_value": (any_typ,{"lazy": True}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = (any_typ, )
|
||||
|
||||
def check_lazy_status(self, cond, tt_value=None, ff_value=None):
|
||||
if cond and tt_value is None:
|
||||
return ["tt_value"]
|
||||
if not cond and ff_value is None:
|
||||
return ["ff_value"]
|
||||
|
||||
def doit(self, cond, tt_value=None, ff_value=None):
|
||||
if cond:
|
||||
return (tt_value,)
|
||||
else:
|
||||
return (ff_value,)
|
||||
|
||||
|
||||
class ImpactConditionalBranchSelMode:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
if not core.is_execution_model_version_supported():
|
||||
required_inputs = {
|
||||
"cond": ("BOOLEAN",),
|
||||
"sel_mode": ("BOOLEAN", {"default": True, "label_on": "select_on_prompt", "label_off": "select_on_execution"}),
|
||||
}
|
||||
else:
|
||||
required_inputs = {
|
||||
"cond": ("BOOLEAN",),
|
||||
}
|
||||
|
||||
return {
|
||||
"required": required_inputs,
|
||||
"optional": {
|
||||
"tt_value": (any_typ,),
|
||||
"ff_value": (any_typ,),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = (any_typ, )
|
||||
|
||||
def doit(self, cond, tt_value=None, ff_value=None, **kwargs):
|
||||
if cond:
|
||||
return (tt_value,)
|
||||
else:
|
||||
return (ff_value,)
|
||||
|
||||
|
||||
class ImpactConvertDataType:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {"value": (any_typ,)}}
|
||||
|
||||
RETURN_TYPES = ("STRING", "FLOAT", "INT", "BOOLEAN")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
@staticmethod
|
||||
def is_number(string):
|
||||
pattern = re.compile(r'^[-+]?[0-9]*\.?[0-9]+$')
|
||||
return bool(pattern.match(string))
|
||||
|
||||
def doit(self, value):
|
||||
if self.is_number(str(value)):
|
||||
num = value
|
||||
else:
|
||||
if str.lower(str(value)) != "false":
|
||||
num = 1
|
||||
else:
|
||||
num = 0
|
||||
return (str(value), float(num), int(float(num)), bool(float(num)), )
|
||||
|
||||
|
||||
class ImpactIfNone:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {},
|
||||
"optional": {"signal": (any_typ,), "any_input": (any_typ,), }
|
||||
}
|
||||
|
||||
RETURN_TYPES = (any_typ, "BOOLEAN")
|
||||
RETURN_NAMES = ("signal_opt", "bool")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
def doit(self, signal=None, any_input=None):
|
||||
if any_input is None:
|
||||
return (signal, False, )
|
||||
else:
|
||||
return (signal, True, )
|
||||
|
||||
|
||||
class ImpactLogicalOperators:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"operator": (['and', 'or', 'xor'],),
|
||||
"bool_a": ("BOOLEAN", {"forceInput": True}),
|
||||
"bool_b": ("BOOLEAN", {"forceInput": True}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN", )
|
||||
|
||||
def doit(self, operator, bool_a, bool_b):
|
||||
if operator == "and":
|
||||
return (bool_a and bool_b, )
|
||||
elif operator == "or":
|
||||
return (bool_a or bool_b, )
|
||||
else:
|
||||
return (bool_a != bool_b, )
|
||||
|
||||
|
||||
class ImpactConditionalStopIteration:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": { "cond": ("BOOLEAN", {"forceInput": True}), },
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ()
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, cond):
|
||||
if cond:
|
||||
PromptServer.instance.send_sync("stop-iteration", {})
|
||||
return {}
|
||||
|
||||
|
||||
class ImpactNeg:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": { "value": ("BOOLEAN", {"forceInput": True}), },
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN", )
|
||||
|
||||
def doit(self, value):
|
||||
return (not value, )
|
||||
|
||||
|
||||
class ImpactInt:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("INT", )
|
||||
|
||||
def doit(self, value):
|
||||
return (value, )
|
||||
|
||||
|
||||
class ImpactFloat:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("FLOAT", {"default": 1.0, "min": -3.402823466e+38, "max": 3.402823466e+38}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("FLOAT", )
|
||||
|
||||
def doit(self, value):
|
||||
return (value, )
|
||||
|
||||
|
||||
class ImpactBoolean:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ("BOOLEAN", )
|
||||
|
||||
def doit(self, value):
|
||||
return (value, )
|
||||
|
||||
|
||||
class ImpactValueSender:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"value": (any_typ, ),
|
||||
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
"optional": {
|
||||
"signal_opt": (any_typ,),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = (any_typ, )
|
||||
RETURN_NAMES = ("signal", )
|
||||
|
||||
def doit(self, value, link_id=0, signal_opt=None):
|
||||
PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value})
|
||||
return (signal_opt, )
|
||||
|
||||
|
||||
class ImpactIntConstSender:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ, ),
|
||||
"value": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = ()
|
||||
|
||||
def doit(self, signal, value, link_id=0):
|
||||
PromptServer.instance.send_sync("value-send", {"link_id": link_id, "value": value})
|
||||
return {}
|
||||
|
||||
|
||||
class ImpactValueReceiver:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"typ": (["STRING", "INT", "FLOAT", "BOOLEAN"], ),
|
||||
"value": ("STRING", {"default": ""}),
|
||||
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
|
||||
RETURN_TYPES = (any_typ, )
|
||||
|
||||
def doit(self, typ, value, link_id=0):
|
||||
if typ == "INT":
|
||||
return (int(value), )
|
||||
elif typ == "FLOAT":
|
||||
return (float(value), )
|
||||
elif typ == "BOOLEAN":
|
||||
return (value.lower() == "true", )
|
||||
else:
|
||||
return (value, )
|
||||
|
||||
|
||||
class ImpactImageInfo:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"value": ("IMAGE", ),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
|
||||
RETURN_TYPES = ("INT", "INT", "INT", "INT")
|
||||
RETURN_NAMES = ("batch", "height", "width", "channel")
|
||||
|
||||
def doit(self, value):
|
||||
return (value.shape[0], value.shape[1], value.shape[2], value.shape[3])
|
||||
|
||||
|
||||
class ImpactLatentInfo:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"value": ("LATENT", ),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
|
||||
RETURN_TYPES = ("INT", "INT", "INT", "INT")
|
||||
RETURN_NAMES = ("batch", "height", "width", "channel")
|
||||
|
||||
def doit(self, value):
|
||||
shape = value['samples'].shape
|
||||
return (shape[0], shape[2] * 8, shape[3] * 8, shape[1])
|
||||
|
||||
|
||||
class ImpactMinMax:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "max", "label_off": "min"}),
|
||||
"a": (any_typ,),
|
||||
"b": (any_typ,),
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
|
||||
RETURN_TYPES = ("INT", )
|
||||
|
||||
def doit(self, mode, a, b):
|
||||
if mode:
|
||||
return (max(a, b), )
|
||||
else:
|
||||
return (min(a, b),)
|
||||
|
||||
|
||||
class ImpactQueueTrigger:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ,),
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}),
|
||||
}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = (any_typ,)
|
||||
RETURN_NAMES = ("signal_opt",)
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, signal, mode):
|
||||
if(mode):
|
||||
PromptServer.instance.send_sync("impact-add-queue", {})
|
||||
|
||||
return (signal,)
|
||||
|
||||
|
||||
class ImpactQueueTriggerCountdown:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"count": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"total": ("INT", {"default": 10, "min": 1, "max": 0xffffffffffffffff}),
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "Trigger", "label_off": "Don't trigger"}),
|
||||
},
|
||||
"optional": {"signal": (any_typ,),},
|
||||
"hidden": {"unique_id": "UNIQUE_ID"}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = (any_typ, "INT", "INT")
|
||||
RETURN_NAMES = ("signal_opt", "count", "total")
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, count, total, mode, unique_id, signal=None):
|
||||
if (mode):
|
||||
if count < total - 1:
|
||||
PromptServer.instance.send_sync("impact-node-feedback",
|
||||
{"node_id": unique_id, "widget_name": "count", "type": "int", "value": count+1})
|
||||
PromptServer.instance.send_sync("impact-add-queue", {})
|
||||
if count >= total - 1:
|
||||
PromptServer.instance.send_sync("impact-node-feedback",
|
||||
{"node_id": unique_id, "widget_name": "count", "type": "int", "value": 0})
|
||||
|
||||
return (signal, count, total)
|
||||
|
||||
|
||||
|
||||
class ImpactSetWidgetValue:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ,),
|
||||
"node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"widget_name": ("STRING", {"multiline": False}),
|
||||
},
|
||||
"optional": {
|
||||
"boolean_value": ("BOOLEAN", {"forceInput": True}),
|
||||
"int_value": ("INT", {"forceInput": True}),
|
||||
"float_value": ("FLOAT", {"forceInput": True}),
|
||||
"string_value": ("STRING", {"forceInput": True}),
|
||||
}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = (any_typ,)
|
||||
RETURN_NAMES = ("signal_opt",)
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, signal, node_id, widget_name, boolean_value=None, int_value=None, float_value=None, string_value=None, ):
|
||||
kind = None
|
||||
if boolean_value is not None:
|
||||
value = boolean_value
|
||||
kind = "BOOLEAN"
|
||||
elif int_value is not None:
|
||||
value = int_value
|
||||
kind = "INT"
|
||||
elif float_value is not None:
|
||||
value = float_value
|
||||
kind = "FLOAT"
|
||||
elif string_value is not None:
|
||||
value = string_value
|
||||
kind = "STRING"
|
||||
else:
|
||||
value = None
|
||||
|
||||
if value is not None:
|
||||
PromptServer.instance.send_sync("impact-node-feedback",
|
||||
{"node_id": node_id, "widget_name": widget_name, "type": kind, "value": value})
|
||||
|
||||
return (signal,)
|
||||
|
||||
|
||||
class ImpactNodeSetMuteState:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ,),
|
||||
"node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"set_state": ("BOOLEAN", {"default": True, "label_on": "active", "label_off": "mute"}),
|
||||
}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = (any_typ,)
|
||||
RETURN_NAMES = ("signal_opt",)
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, signal, node_id, set_state):
|
||||
PromptServer.instance.send_sync("impact-node-mute-state", {"node_id": node_id, "is_active": set_state})
|
||||
return (signal,)
|
||||
|
||||
|
||||
class ImpactSleep:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ,),
|
||||
"seconds": ("FLOAT", {"default": 0.5, "min": 0, "max": 3600}),
|
||||
}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = (any_typ,)
|
||||
RETURN_NAMES = ("signal_opt",)
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, signal, seconds):
|
||||
time.sleep(seconds)
|
||||
return (signal,)
|
||||
|
||||
|
||||
def workflow_to_map(workflow):
|
||||
nodes = {}
|
||||
links = {}
|
||||
for link in workflow['links']:
|
||||
links[link[0]] = link[1:]
|
||||
for node in workflow['nodes']:
|
||||
nodes[str(node['id'])] = node
|
||||
|
||||
return nodes, links
|
||||
|
||||
|
||||
class ImpactRemoteBoolean:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"widget_name": ("STRING", {"multiline": False}),
|
||||
"value": ("BOOLEAN", {"default": True, "label_on": "True", "label_off": "False"}),
|
||||
}}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = ()
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, **kwargs):
|
||||
return {}
|
||||
|
||||
|
||||
class ImpactRemoteInt:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"node_id": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"widget_name": ("STRING", {"multiline": False}),
|
||||
"value": ("INT", {"default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff}),
|
||||
}}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic/_for_test"
|
||||
RETURN_TYPES = ()
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, **kwargs):
|
||||
return {}
|
||||
|
||||
class ImpactControlBridge:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"value": (any_typ,),
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "Active", "label_off": "Stop/Mute/Bypass"}),
|
||||
"behavior": (["Stop", "Mute", "Bypass"], ),
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID", "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}
|
||||
}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Logic"
|
||||
RETURN_TYPES = (any_typ,)
|
||||
RETURN_NAMES = ("value",)
|
||||
OUTPUT_NODE = True
|
||||
|
||||
DESCRIPTION = ("When behavior is Stop and mode is active, the input value is passed directly to the output.\n"
|
||||
"When behavior is Mute/Bypass and mode is active, the node connected to the output is changed to active state.\n"
|
||||
"When behavior is Stop and mode is Stop/Mute/Bypass, the workflow execution of the current node is halted.\n"
|
||||
"When behavior is Mute/Bypass and mode is Stop/Mute/Bypass, the node connected to the output is changed to Mute/Bypass state.")
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(self, value, mode, behavior="Stop", unique_id=None, prompt=None, extra_pnginfo=None):
|
||||
if behavior == "Stop":
|
||||
return value, mode, behavior
|
||||
else:
|
||||
# NOTE: extra_pnginfo is not populated for IS_CHANGED.
|
||||
# so extra_pnginfo is useless in here
|
||||
try:
|
||||
workflow = core.current_prompt['extra_data']['extra_pnginfo']['workflow']
|
||||
except Exception:
|
||||
logging.info("[Impact Pack] core.current_prompt['extra_data']['extra_pnginfo']['workflow']")
|
||||
return 0
|
||||
|
||||
nodes, links = workflow_to_map(workflow)
|
||||
next_nodes = []
|
||||
|
||||
for link in nodes[unique_id]['outputs'][0]['links']:
|
||||
node_id = str(links[link][2])
|
||||
impact.utils.collect_non_reroute_nodes(nodes, links, next_nodes, node_id)
|
||||
|
||||
return next_nodes
|
||||
|
||||
def doit(self, value, mode, behavior="Stop", unique_id=None, prompt=None, extra_pnginfo=None):
|
||||
global error_skip_flag
|
||||
|
||||
if core.is_execution_model_version_supported():
|
||||
from comfy_execution.graph import ExecutionBlocker
|
||||
else:
|
||||
logging.info("[Impact Pack] ImpactControlBridge: ComfyUI is outdated. The 'Stop' behavior cannot function properly.")
|
||||
|
||||
if behavior == "Stop":
|
||||
if mode:
|
||||
return (value, )
|
||||
else:
|
||||
return (ExecutionBlocker(None), )
|
||||
elif extra_pnginfo is None:
|
||||
logging.warning(f"[Impact Pack] limitation: '{behavior}' behavior cannot be used in API execution.")
|
||||
return (value,)
|
||||
else:
|
||||
workflow_nodes, links = workflow_to_map(extra_pnginfo['workflow'])
|
||||
|
||||
active_nodes = []
|
||||
mute_nodes = []
|
||||
bypass_nodes = []
|
||||
|
||||
for link in workflow_nodes[unique_id]['outputs'][0]['links']:
|
||||
node_id = str(links[link][2])
|
||||
|
||||
next_nodes = []
|
||||
impact.utils.collect_non_reroute_nodes(workflow_nodes, links, next_nodes, node_id)
|
||||
|
||||
for next_node_id in next_nodes:
|
||||
node_mode = workflow_nodes[next_node_id]['mode']
|
||||
|
||||
if node_mode == 0:
|
||||
active_nodes.append(next_node_id)
|
||||
elif node_mode == 2:
|
||||
mute_nodes.append(next_node_id)
|
||||
elif node_mode == 4:
|
||||
bypass_nodes.append(next_node_id)
|
||||
|
||||
if mode:
|
||||
# active
|
||||
should_be_active_nodes = mute_nodes + bypass_nodes
|
||||
if len(should_be_active_nodes) > 0:
|
||||
PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'actives': list(should_be_active_nodes)})
|
||||
nodes.interrupt_processing()
|
||||
|
||||
elif behavior == "Mute" or behavior == True: # noqa: E712
|
||||
# mute
|
||||
should_be_mute_nodes = active_nodes + bypass_nodes
|
||||
if len(should_be_mute_nodes) > 0:
|
||||
PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'mutes': list(should_be_mute_nodes)})
|
||||
nodes.interrupt_processing()
|
||||
|
||||
else:
|
||||
# bypass
|
||||
should_be_bypass_nodes = active_nodes + mute_nodes
|
||||
if len(should_be_bypass_nodes) > 0:
|
||||
PromptServer.instance.send_sync("impact-bridge-continue", {"node_id": unique_id, 'bypasses': list(should_be_bypass_nodes)})
|
||||
nodes.interrupt_processing()
|
||||
|
||||
return (value, )
|
||||
|
||||
|
||||
class ImpactExecutionOrderController:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"signal": (any_typ,),
|
||||
"value": (any_typ,),
|
||||
}}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
RETURN_TYPES = (any_typ, any_typ)
|
||||
RETURN_NAMES = ("signal", "value")
|
||||
|
||||
def doit(self, signal, value):
|
||||
return signal, value
|
||||
|
||||
|
||||
class ImpactListBridge:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"list_input": (any_typ,),
|
||||
}}
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
DESCRIPTION = "When passing the list output through this node, it collects and organizes the data before forwarding it, which ensures that the previous stage's sub-workflow has been completed."
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
RETURN_TYPES = (any_typ, )
|
||||
RETURN_NAMES = ("list_output", )
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
OUTPUT_IS_LIST = (True, )
|
||||
|
||||
@staticmethod
|
||||
def doit(list_input):
|
||||
return (list_input,)
|
||||
|
||||
|
||||
original_handle_execution = execution.PromptExecutor.handle_execution_error
|
||||
|
||||
|
||||
def handle_execution_error(**kwargs):
|
||||
execution.PromptExecutor.handle_execution_error(**kwargs)
|
||||
|
||||
440
custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py
Normal file
440
custom_nodes/ComfyUI-Impact-Pack/modules/impact/pipe.py
Normal file
@@ -0,0 +1,440 @@
|
||||
import folder_paths
|
||||
from impact.utils import any_typ
|
||||
|
||||
|
||||
class ToDetailerPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"], ),
|
||||
},
|
||||
"optional": {
|
||||
"sam_model_opt": ("SAM_MODEL",),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE", )
|
||||
RETURN_NAMES = ("detailer_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, *args, **kwargs):
|
||||
pipe = (kwargs['model'], kwargs['clip'], kwargs['vae'], kwargs['positive'], kwargs['negative'], kwargs['wildcard'], kwargs['bbox_detector'],
|
||||
kwargs.get('segm_detector_opt', None), kwargs.get('sam_model_opt', None), kwargs.get('detailer_hook', None),
|
||||
kwargs.get('refiner_model', None), kwargs.get('refiner_clip', None),
|
||||
kwargs.get('refiner_positive', None), kwargs.get('refiner_negative', None))
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class ToDetailerPipeSDXL(ToDetailerPipe):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"refiner_model": ("MODEL",),
|
||||
"refiner_clip": ("CLIP",),
|
||||
"refiner_positive": ("CONDITIONING",),
|
||||
"refiner_negative": ("CONDITIONING",),
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"],),
|
||||
},
|
||||
"optional": {
|
||||
"sam_model_opt": ("SAM_MODEL",),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
}}
|
||||
|
||||
|
||||
class FromDetailerPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, }
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK")
|
||||
RETURN_NAMES = ("model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, detailer_pipe):
|
||||
model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe
|
||||
return model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook
|
||||
|
||||
|
||||
class FromDetailerPipe_v2:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, }
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK")
|
||||
RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, detailer_pipe):
|
||||
model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, _, _, _, _ = detailer_pipe
|
||||
return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook
|
||||
|
||||
|
||||
class FromDetailerPipe_SDXL:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }, }
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING", "BBOX_DETECTOR", "SAM_MODEL", "SEGM_DETECTOR", "DETAILER_HOOK", "MODEL", "CLIP", "CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("detailer_pipe", "model", "clip", "vae", "positive", "negative", "bbox_detector", "sam_model_opt", "segm_detector_opt", "detailer_hook", "refiner_model", "refiner_clip", "refiner_positive", "refiner_negative")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, detailer_pipe):
|
||||
model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe
|
||||
return detailer_pipe, model, clip, vae, positive, negative, bbox_detector, sam_model_opt, segm_detector_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative
|
||||
|
||||
|
||||
class AnyPipeToBasic:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {"any_pipe": (any_typ,)},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", )
|
||||
RETURN_NAMES = ("basic_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, any_pipe):
|
||||
return (any_pipe[:5], )
|
||||
|
||||
|
||||
class ToBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", )
|
||||
RETURN_NAMES = ("basic_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, model, clip, vae, positive, negative):
|
||||
pipe = (model, clip, vae, positive, negative)
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class FromBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"basic_pipe": ("BASIC_PIPE",), }, }
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("model", "clip", "vae", "positive", "negative")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, basic_pipe):
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
return model, clip, vae, positive, negative
|
||||
|
||||
|
||||
class FromBasicPipe_v2:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"basic_pipe": ("BASIC_PIPE",), }, }
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", "MODEL", "CLIP", "VAE", "CONDITIONING", "CONDITIONING")
|
||||
RETURN_NAMES = ("basic_pipe", "model", "clip", "vae", "positive", "negative")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, basic_pipe):
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
return basic_pipe, model, clip, vae, positive, negative
|
||||
|
||||
|
||||
class BasicPipeToDetailerPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"basic_pipe": ("BASIC_PIPE",),
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"],),
|
||||
},
|
||||
"optional": {
|
||||
"sam_model_opt": ("SAM_MODEL", ),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE", )
|
||||
RETURN_NAMES = ("detailer_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, *args, **kwargs):
|
||||
basic_pipe = kwargs['basic_pipe']
|
||||
bbox_detector = kwargs['bbox_detector']
|
||||
wildcard = kwargs['wildcard']
|
||||
sam_model_opt = kwargs.get('sam_model_opt', None)
|
||||
segm_detector_opt = kwargs.get('segm_detector_opt', None)
|
||||
detailer_hook = kwargs.get('detailer_hook', None)
|
||||
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class BasicPipeToDetailerPipeSDXL:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"base_basic_pipe": ("BASIC_PIPE",),
|
||||
"refiner_basic_pipe": ("BASIC_PIPE",),
|
||||
"bbox_detector": ("BBOX_DETECTOR", ),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"],),
|
||||
},
|
||||
"optional": {
|
||||
"sam_model_opt": ("SAM_MODEL", ),
|
||||
"segm_detector_opt": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE", )
|
||||
RETURN_NAMES = ("detailer_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, *args, **kwargs):
|
||||
base_basic_pipe = kwargs['base_basic_pipe']
|
||||
refiner_basic_pipe = kwargs['refiner_basic_pipe']
|
||||
bbox_detector = kwargs['bbox_detector']
|
||||
wildcard = kwargs['wildcard']
|
||||
sam_model_opt = kwargs.get('sam_model_opt', None)
|
||||
segm_detector_opt = kwargs.get('segm_detector_opt', None)
|
||||
detailer_hook = kwargs.get('detailer_hook', None)
|
||||
|
||||
model, clip, vae, positive, negative = base_basic_pipe
|
||||
refiner_model, refiner_clip, refiner_vae, refiner_positive, refiner_negative = refiner_basic_pipe
|
||||
pipe = model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, refiner_model, refiner_clip, refiner_positive, refiner_negative
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class DetailerPipeToBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"detailer_pipe": ("DETAILER_PIPE",), }}
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", "BASIC_PIPE")
|
||||
RETURN_NAMES = ("base_basic_pipe", "refiner_basic_pipe")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, detailer_pipe):
|
||||
model, clip, vae, positive, negative, _, _, _, _, _, refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe
|
||||
pipe = model, clip, vae, positive, negative
|
||||
refiner_pipe = refiner_model, refiner_clip, vae, refiner_positive, refiner_negative
|
||||
return (pipe, refiner_pipe)
|
||||
|
||||
|
||||
class EditBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {"basic_pipe": ("BASIC_PIPE",), },
|
||||
"optional": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", )
|
||||
RETURN_NAMES = ("basic_pipe", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, basic_pipe, model=None, clip=None, vae=None, positive=None, negative=None):
|
||||
res_model, res_clip, res_vae, res_positive, res_negative = basic_pipe
|
||||
|
||||
if model is not None:
|
||||
res_model = model
|
||||
|
||||
if clip is not None:
|
||||
res_clip = clip
|
||||
|
||||
if vae is not None:
|
||||
res_vae = vae
|
||||
|
||||
if positive is not None:
|
||||
res_positive = positive
|
||||
|
||||
if negative is not None:
|
||||
res_negative = negative
|
||||
|
||||
pipe = res_model, res_clip, res_vae, res_positive, res_negative
|
||||
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class EditDetailerPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"detailer_pipe": ("DETAILER_PIPE",),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"],),
|
||||
},
|
||||
"optional": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"bbox_detector": ("BBOX_DETECTOR",),
|
||||
"sam_model": ("SAM_MODEL",),
|
||||
"segm_detector": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("DETAILER_PIPE",)
|
||||
RETURN_NAMES = ("detailer_pipe",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Pipe"
|
||||
|
||||
def doit(self, *args, **kwargs):
|
||||
detailer_pipe = kwargs['detailer_pipe']
|
||||
wildcard = kwargs['wildcard']
|
||||
model = kwargs.get('model', None)
|
||||
clip = kwargs.get('clip', None)
|
||||
vae = kwargs.get('vae', None)
|
||||
positive = kwargs.get('positive', None)
|
||||
negative = kwargs.get('negative', None)
|
||||
bbox_detector = kwargs.get('bbox_detector', None)
|
||||
sam_model = kwargs.get('sam_model', None)
|
||||
segm_detector = kwargs.get('segm_detector', None)
|
||||
detailer_hook = kwargs.get('detailer_hook', None)
|
||||
refiner_model = kwargs.get('refiner_model', None)
|
||||
refiner_clip = kwargs.get('refiner_clip', None)
|
||||
refiner_positive = kwargs.get('refiner_positive', None)
|
||||
refiner_negative = kwargs.get('refiner_negative', None)
|
||||
|
||||
res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard, res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook, res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative = detailer_pipe
|
||||
|
||||
if model is not None:
|
||||
res_model = model
|
||||
|
||||
if clip is not None:
|
||||
res_clip = clip
|
||||
|
||||
if vae is not None:
|
||||
res_vae = vae
|
||||
|
||||
if positive is not None:
|
||||
res_positive = positive
|
||||
|
||||
if negative is not None:
|
||||
res_negative = negative
|
||||
|
||||
if bbox_detector is not None:
|
||||
res_bbox_detector = bbox_detector
|
||||
|
||||
if segm_detector is not None:
|
||||
res_segm_detector = segm_detector
|
||||
|
||||
if wildcard != "":
|
||||
res_wildcard = wildcard
|
||||
|
||||
if sam_model is not None:
|
||||
res_sam_model = sam_model
|
||||
|
||||
if detailer_hook is not None:
|
||||
res_detailer_hook = detailer_hook
|
||||
|
||||
if refiner_model is not None:
|
||||
res_refiner_model = refiner_model
|
||||
|
||||
if refiner_clip is not None:
|
||||
res_refiner_clip = refiner_clip
|
||||
|
||||
if refiner_positive is not None:
|
||||
res_refiner_positive = refiner_positive
|
||||
|
||||
if refiner_negative is not None:
|
||||
res_refiner_negative = refiner_negative
|
||||
|
||||
pipe = (res_model, res_clip, res_vae, res_positive, res_negative, res_wildcard,
|
||||
res_bbox_detector, res_segm_detector, res_sam_model, res_detailer_hook,
|
||||
res_refiner_model, res_refiner_clip, res_refiner_positive, res_refiner_negative)
|
||||
|
||||
return (pipe, )
|
||||
|
||||
|
||||
class EditDetailerPipeSDXL(EditDetailerPipe):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"detailer_pipe": ("DETAILER_PIPE",),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"),),
|
||||
"Select to add Wildcard": (["Select the Wildcard to add to the text"],),
|
||||
},
|
||||
"optional": {
|
||||
"model": ("MODEL",),
|
||||
"clip": ("CLIP",),
|
||||
"vae": ("VAE",),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"refiner_model": ("MODEL",),
|
||||
"refiner_clip": ("CLIP",),
|
||||
"refiner_positive": ("CONDITIONING",),
|
||||
"refiner_negative": ("CONDITIONING",),
|
||||
"bbox_detector": ("BBOX_DETECTOR",),
|
||||
"sam_model": ("SAM_MODEL",),
|
||||
"segm_detector": ("SEGM_DETECTOR",),
|
||||
"detailer_hook": ("DETAILER_HOOK",),
|
||||
},
|
||||
}
|
||||
2029
custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py
Normal file
2029
custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
140
custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py
Normal file
140
custom_nodes/ComfyUI-Impact-Pack/modules/impact/segs_upscaler.py
Normal file
@@ -0,0 +1,140 @@
|
||||
from impact import impact_sampling
|
||||
from comfy import model_management
|
||||
from impact import utils
|
||||
from PIL import Image
|
||||
import nodes
|
||||
import torch
|
||||
import inspect
|
||||
import logging
|
||||
import comfy
|
||||
|
||||
try:
|
||||
from comfy_extras import nodes_differential_diffusion
|
||||
except Exception:
|
||||
logging.info("[Impact Pack] ComfyUI is an outdated version. The DifferentialDiffusion feature will be disabled.")
|
||||
|
||||
|
||||
# Implementation based on `https://github.com/lingondricka2/Upscaler-Detailer`
|
||||
|
||||
# code from comfyroll --->
|
||||
# https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes/blob/main/nodes/functions_upscale.py
|
||||
|
||||
def upscale_with_model(upscale_model, image):
|
||||
device = model_management.get_torch_device()
|
||||
upscale_model.to(device)
|
||||
in_img = image.movedim(-1, -3).to(device)
|
||||
|
||||
tile = 512
|
||||
overlap = 32
|
||||
|
||||
oom = True
|
||||
while oom:
|
||||
try:
|
||||
steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap)
|
||||
pbar = comfy.utils.ProgressBar(steps)
|
||||
s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar)
|
||||
oom = False
|
||||
except model_management.OOM_EXCEPTION as e:
|
||||
tile //= 2
|
||||
if tile < 128:
|
||||
raise e
|
||||
|
||||
s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0)
|
||||
return s
|
||||
|
||||
|
||||
def apply_resize_image(image: Image.Image, original_width, original_height, rounding_modulus, mode='scale', supersample='true', factor: int = 2, width: int = 1024, height: int = 1024,
|
||||
resample='bicubic'):
|
||||
# Calculate the new width and height based on the given mode and parameters
|
||||
if mode == 'rescale':
|
||||
new_width, new_height = int(original_width * factor), int(original_height * factor)
|
||||
else:
|
||||
m = rounding_modulus
|
||||
original_ratio = original_height / original_width
|
||||
height = int(width * original_ratio)
|
||||
|
||||
new_width = width if width % m == 0 else width + (m - width % m)
|
||||
new_height = height if height % m == 0 else height + (m - height % m)
|
||||
|
||||
# Define a dictionary of resampling filters
|
||||
resample_filters = {'nearest': 0, 'bilinear': 2, 'bicubic': 3, 'lanczos': 1}
|
||||
|
||||
# Apply supersample
|
||||
if supersample == 'true':
|
||||
image = image.resize((new_width * 8, new_height * 8), resample=Image.Resampling(resample_filters[resample]))
|
||||
|
||||
# Resize the image using the given resampling filter
|
||||
resized_image = image.resize((new_width, new_height), resample=Image.Resampling(resample_filters[resample]))
|
||||
|
||||
return resized_image
|
||||
|
||||
|
||||
def upscaler(image, upscale_model, rescale_factor, resampling_method, supersample, rounding_modulus):
|
||||
if upscale_model is not None:
|
||||
up_image = upscale_with_model(upscale_model, image)
|
||||
else:
|
||||
up_image = image
|
||||
|
||||
pil_img = utils.tensor2pil(image)
|
||||
original_width, original_height = pil_img.size
|
||||
scaled_image = utils.pil2tensor(apply_resize_image(utils.tensor2pil(up_image), original_width, original_height, rounding_modulus, 'rescale',
|
||||
supersample, rescale_factor, 1024, resampling_method))
|
||||
return scaled_image
|
||||
|
||||
# <---
|
||||
|
||||
|
||||
def img2img_segs(image, model, clip, vae, seed, steps, cfg, sampler_name, scheduler,
|
||||
positive, negative, denoise, noise_mask, control_net_wrapper=None,
|
||||
inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None):
|
||||
|
||||
original_image_size = image.shape[1:3]
|
||||
|
||||
# Match to original image size
|
||||
if original_image_size[0] % 8 > 0 or original_image_size[1] % 8 > 0:
|
||||
scale = 8/min(original_image_size[0], original_image_size[1]) + 1
|
||||
w = int(original_image_size[1] * scale)
|
||||
h = int(original_image_size[0] * scale)
|
||||
image = utils.tensor_resize(image, w, h)
|
||||
|
||||
if noise_mask is not None:
|
||||
noise_mask = utils.tensor_gaussian_blur_mask(noise_mask, noise_mask_feather)
|
||||
noise_mask = noise_mask.squeeze(3)
|
||||
|
||||
if noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options:
|
||||
model = nodes_differential_diffusion.DifferentialDiffusion().execute(model)[0]
|
||||
|
||||
if control_net_wrapper is not None:
|
||||
positive, negative, _ = control_net_wrapper.apply(positive, negative, image, noise_mask)
|
||||
|
||||
# prepare mask
|
||||
if noise_mask is not None and inpaint_model:
|
||||
imc_encode = nodes.InpaintModelConditioning().encode
|
||||
if 'noise_mask' in inspect.signature(imc_encode).parameters:
|
||||
positive, negative, latent_image = imc_encode(positive, negative, image, vae, mask=noise_mask, noise_mask=True)
|
||||
else:
|
||||
logging.info("[Impact Pack] ComfyUI is an outdated version.")
|
||||
positive, negative, latent_image = imc_encode(positive, negative, image, vae, noise_mask)
|
||||
else:
|
||||
latent_image = utils.to_latent_image(image, vae)
|
||||
if noise_mask is not None:
|
||||
latent_image['noise_mask'] = noise_mask
|
||||
|
||||
refined_latent = latent_image
|
||||
|
||||
# ksampler
|
||||
refined_latent = impact_sampling.ksampler_wrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, refined_latent, denoise, scheduler_func=scheduler_func_opt)
|
||||
|
||||
# non-latent downscale - latent downscale cause bad quality
|
||||
refined_image = vae.decode(refined_latent['samples'])
|
||||
|
||||
# prevent mixing of device
|
||||
refined_image = refined_image.cpu()
|
||||
|
||||
# Match to original image size
|
||||
if refined_image.shape[1:3] != original_image_size:
|
||||
refined_image = utils.tensor_resize(refined_image, original_image_size[1], original_image_size[0])
|
||||
|
||||
# don't convert to latent - latent break image
|
||||
# preserving pil is much better
|
||||
return refined_image
|
||||
@@ -0,0 +1,686 @@
|
||||
import math
|
||||
import impact.core as core
|
||||
from comfy_extras.nodes_custom_sampler import Noise_RandomNoise
|
||||
from nodes import MAX_RESOLUTION
|
||||
import nodes
|
||||
from impact.impact_sampling import KSamplerWrapper, KSamplerAdvancedWrapper, separated_sample, impact_sample
|
||||
import comfy
|
||||
import torch
|
||||
import numpy as np
|
||||
import logging
|
||||
|
||||
|
||||
class TiledKSamplerProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}),
|
||||
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"tooltip": "noise schedule"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}),
|
||||
"tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64, "tooltip": "Sets the width of the tile to be used in TiledKSampler."}),
|
||||
"tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64, "tooltip": "Sets the height of the tile to be used in TiledKSampler."}),
|
||||
"tiling_strategy": (["random", "padded", 'simple'], {"tooltip": "Sets the tiling strategy for TiledKSampler."} ),
|
||||
"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"})
|
||||
}}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)", )
|
||||
|
||||
RETURN_TYPES = ("KSAMPLER",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Sampler"
|
||||
|
||||
@staticmethod
|
||||
def doit(seed, steps, cfg, sampler_name, scheduler, denoise,
|
||||
tile_width, tile_height, tiling_strategy, basic_pipe):
|
||||
model, _, _, positive, negative = basic_pipe
|
||||
sampler = core.TiledKSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise,
|
||||
tile_width, tile_height, tiling_strategy)
|
||||
return (sampler, )
|
||||
|
||||
|
||||
class KSamplerProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}),
|
||||
"scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}),
|
||||
"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"})
|
||||
},
|
||||
"optional": {
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)",)
|
||||
|
||||
RETURN_TYPES = ("KSAMPLER",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Sampler"
|
||||
|
||||
@staticmethod
|
||||
def doit(seed, steps, cfg, sampler_name, scheduler, denoise, basic_pipe, scheduler_func_opt=None):
|
||||
model, _, _, positive, negative = basic_pipe
|
||||
sampler = KSamplerWrapper(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, scheduler_func=scheduler_func_opt)
|
||||
return (sampler, )
|
||||
|
||||
|
||||
class KSamplerAdvancedProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "toolip": "classifier free guidance value"}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"toolip": "sampler"}),
|
||||
"scheduler": (core.get_schedulers(), {"toolip": "noise schedule"}),
|
||||
"sigma_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "toolip": "Multiplier of noise schedule"}),
|
||||
"basic_pipe": ("BASIC_PIPE", {"toolip": "basic_pipe input for sampling"})
|
||||
},
|
||||
"optional": {
|
||||
"sampler_opt": ("SAMPLER", {"toolip": "[OPTIONAL] Uses the passed sampler instead of internal impact_sampler."}),
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC", {"toolip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("sampler wrapper. (Can be used when generating a regional_prompt.)", )
|
||||
|
||||
RETURN_TYPES = ("KSAMPLER_ADVANCED",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Sampler"
|
||||
|
||||
@staticmethod
|
||||
def doit(cfg, sampler_name, scheduler, basic_pipe, sigma_factor=1.0, sampler_opt=None, scheduler_func_opt=None):
|
||||
model, _, _, positive, negative = basic_pipe
|
||||
sampler = KSamplerAdvancedWrapper(model, cfg, sampler_name, scheduler, positive, negative, sampler_opt=sampler_opt, sigma_factor=sigma_factor, scheduler_func=scheduler_func_opt)
|
||||
return (sampler, )
|
||||
|
||||
|
||||
class TwoSamplersForMask:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"latent_image": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"base_sampler": ("KSAMPLER", {"tooltip": "Sampler to apply to the region outside the mask."}),
|
||||
"mask_sampler": ("KSAMPLER", {"tooltip": "Sampler to apply to the masked region."}),
|
||||
"mask": ("MASK", {"tooltip": "region mask"})
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("result latent", )
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Sampler"
|
||||
|
||||
@staticmethod
|
||||
def doit(latent_image, base_sampler, mask_sampler, mask):
|
||||
inv_mask = torch.where(mask != 1.0, torch.tensor(1.0), torch.tensor(0.0))
|
||||
|
||||
latent_image['noise_mask'] = inv_mask
|
||||
new_latent_image = base_sampler.sample(latent_image)
|
||||
|
||||
new_latent_image['noise_mask'] = mask
|
||||
new_latent_image = mask_sampler.sample(new_latent_image)
|
||||
|
||||
del new_latent_image['noise_mask']
|
||||
|
||||
return (new_latent_image, )
|
||||
|
||||
|
||||
class TwoAdvancedSamplersForMask:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}),
|
||||
"samples": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the region outside the mask."}),
|
||||
"mask_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "Sampler to apply to the masked region."}),
|
||||
"mask": ("MASK", {"tooltip": "region mask"}),
|
||||
"overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask by the overlap_factor amount to overlap with other regions."})
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("result latent", )
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Sampler"
|
||||
|
||||
@staticmethod
|
||||
def doit(seed, steps, denoise, samples, base_sampler, mask_sampler, mask, overlap_factor):
|
||||
regional_prompts = RegionalPrompt().doit(mask=mask, advanced_sampler=mask_sampler)[0]
|
||||
|
||||
return RegionalSampler().doit(seed=seed, seed_2nd=0, seed_2nd_mode="ignore", steps=steps, base_only_steps=1,
|
||||
denoise=denoise, samples=samples, base_sampler=base_sampler,
|
||||
regional_prompts=regional_prompts, overlap_factor=overlap_factor,
|
||||
restore_latent=True, additional_mode="ratio between",
|
||||
additional_sampler="AUTO", additional_sigma_ratio=0.3)
|
||||
|
||||
|
||||
class RegionalPrompt:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"mask": ("MASK", {"tooltip": "region mask"}),
|
||||
"advanced_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "sampler for specified region"}),
|
||||
},
|
||||
"optional": {
|
||||
"variation_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Sets the extra seed to be used for noise variation."}),
|
||||
"variation_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Sets the strength of the noise variation."}),
|
||||
"variation_method": (["linear", "slerp"], {"tooltip": "Sets how the original noise and extra noise are blended together."}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("regional prompts. (Can be used in the RegionalSampler.)", )
|
||||
|
||||
RETURN_TYPES = ("REGIONAL_PROMPTS", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Regional"
|
||||
|
||||
@staticmethod
|
||||
def doit(mask, advanced_sampler, variation_seed=0, variation_strength=0.0, variation_method="linear"):
|
||||
regional_prompt = core.REGIONAL_PROMPT(mask, advanced_sampler, variation_seed=variation_seed, variation_strength=variation_strength, variation_method=variation_method)
|
||||
return ([regional_prompt], )
|
||||
|
||||
|
||||
class CombineRegionalPrompts:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"regional_prompts1": ("REGIONAL_PROMPTS", {"tooltip": "input regional_prompts. (Connecting to the input slot increases the number of additional slots.)"}),
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("Combined REGIONAL_PROMPTS", )
|
||||
|
||||
RETURN_TYPES = ("REGIONAL_PROMPTS", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Regional"
|
||||
|
||||
@staticmethod
|
||||
def doit(**kwargs):
|
||||
res = []
|
||||
for k, v in kwargs.items():
|
||||
res += v
|
||||
|
||||
return (res, )
|
||||
|
||||
|
||||
class CombineConditionings:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning1": ("CONDITIONING", { "tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)" }),
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("Combined conditioning", )
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
@staticmethod
|
||||
def doit(**kwargs):
|
||||
res = []
|
||||
for k, v in kwargs.items():
|
||||
res += v
|
||||
|
||||
return (res, )
|
||||
|
||||
|
||||
class ConcatConditionings:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"conditioning1": ("CONDITIONING", { "tooltip": "input conditionings. (Connecting to the input slot increases the number of additional slots.)" }),
|
||||
},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("Concatenated conditioning", )
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
@staticmethod
|
||||
def doit(**kwargs):
|
||||
conditioning_to = list(kwargs.values())[0]
|
||||
|
||||
for k, conditioning_from in list(kwargs.items())[1:]:
|
||||
out = []
|
||||
if len(conditioning_from) > 1:
|
||||
logging.warning("Warning: ConcatConditionings {k} contains more than 1 cond, only the first one will actually be applied to conditioning1.")
|
||||
|
||||
cond_from = conditioning_from[0][0]
|
||||
|
||||
for i in range(len(conditioning_to)):
|
||||
t1 = conditioning_to[i][0]
|
||||
tw = torch.cat((t1, cond_from), 1)
|
||||
n = [tw, conditioning_to[i][1].copy()]
|
||||
out.append(n)
|
||||
|
||||
conditioning_to = out
|
||||
|
||||
return (out, )
|
||||
|
||||
|
||||
class RegionalSampler:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"seed_2nd": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Additional noise seed. The behavior is determined by seed_2nd_mode."}),
|
||||
"seed_2nd_mode": (["ignore", "fixed", "seed+seed_2nd", "seed-seed_2nd", "increment", "decrement", "randomize"], {"tooltip": "application method of seed_2nd. 1) ignore: Do not use seed_2nd. In the base only sampling stage, the seed is applied as a noise seed, and in the regional sampling stage, denoising is performed as it is without additional noise. 2) Others: In the base only sampling stage, the seed is applied as a noise seed, and once it is closed so that there is no leftover noise, new noise is added with seed_2nd and the regional samping stage is performed. a) fixed: Use seed_2nd as it is as an additional noise seed. b) seed+seed_2nd: Apply the value of seed+seed_2nd as an additional noise seed. c) seed-seed_2nd: Apply the value of seed-seed_2nd as an additional noise seed. d) increment: Not implemented yet. Same with fixed. e) decrement: Not implemented yet. Same with fixed. f) randomize: Not implemented yet. Same with fixed."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"base_only_steps": ("INT", {"default": 2, "min": 0, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}),
|
||||
"samples": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}),
|
||||
"regional_prompts": ("REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}),
|
||||
"overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}),
|
||||
"restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}),
|
||||
"additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}),
|
||||
"additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}),
|
||||
"additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}),
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("result latent", )
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Regional"
|
||||
|
||||
@staticmethod
|
||||
def separated_sample(*args, **kwargs):
|
||||
return separated_sample(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def mask_erosion(samples, mask, grow_mask_by):
|
||||
mask = mask.clone()
|
||||
|
||||
w = samples['samples'].shape[3]
|
||||
h = samples['samples'].shape[2]
|
||||
|
||||
mask2 = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(w, h), mode="bilinear")
|
||||
if grow_mask_by == 0:
|
||||
mask_erosion = mask2
|
||||
else:
|
||||
kernel_tensor = torch.ones((1, 1, grow_mask_by, grow_mask_by))
|
||||
padding = math.ceil((grow_mask_by - 1) / 2)
|
||||
|
||||
mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask2.round(), kernel_tensor, padding=padding), 0, 1)
|
||||
|
||||
return mask_erosion[:, :, :w, :h].round()
|
||||
|
||||
@staticmethod
|
||||
def doit(seed, seed_2nd, seed_2nd_mode, steps, base_only_steps, denoise, samples, base_sampler, regional_prompts, overlap_factor, restore_latent,
|
||||
additional_mode, additional_sampler, additional_sigma_ratio, unique_id=None):
|
||||
|
||||
samples = samples.copy()
|
||||
samples['samples'] = comfy.sample.fix_empty_latent_channels(base_sampler.params[0], samples['samples'])
|
||||
|
||||
if restore_latent:
|
||||
latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']()
|
||||
else:
|
||||
latent_compositor = None
|
||||
|
||||
masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts]
|
||||
masks = [np.ceil(mask).astype(np.int32) for mask in masks]
|
||||
combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks))
|
||||
|
||||
inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0))
|
||||
|
||||
adv_steps = int(steps / denoise)
|
||||
start_at_step = adv_steps - steps
|
||||
|
||||
region_len = len(regional_prompts)
|
||||
total = steps*region_len
|
||||
|
||||
leftover_noise = False
|
||||
if base_only_steps > 0:
|
||||
if seed_2nd_mode == 'ignore':
|
||||
leftover_noise = True
|
||||
|
||||
noise = Noise_RandomNoise(seed).generate_noise(samples)
|
||||
|
||||
for rp in regional_prompts:
|
||||
noise = rp.touch_noise(noise)
|
||||
|
||||
samples = base_sampler.sample_advanced(True, seed, adv_steps, samples, start_at_step, start_at_step + base_only_steps, leftover_noise, recovery_mode="DISABLE", noise=noise)
|
||||
|
||||
if seed_2nd_mode == "seed+seed_2nd":
|
||||
seed += seed_2nd
|
||||
if seed > 1125899906842624:
|
||||
seed = seed - 1125899906842624
|
||||
elif seed_2nd_mode == "seed-seed_2nd":
|
||||
seed -= seed_2nd
|
||||
if seed < 0:
|
||||
seed += 1125899906842624
|
||||
elif seed_2nd_mode != 'ignore':
|
||||
seed = seed_2nd
|
||||
|
||||
new_latent_image = samples.copy()
|
||||
base_latent_image = None
|
||||
|
||||
if not leftover_noise:
|
||||
add_noise = True
|
||||
noise = Noise_RandomNoise(seed).generate_noise(samples)
|
||||
|
||||
for rp in regional_prompts:
|
||||
noise = rp.touch_noise(noise)
|
||||
else:
|
||||
add_noise = False
|
||||
noise = None
|
||||
|
||||
for i in range(start_at_step+base_only_steps, adv_steps):
|
||||
core.update_node_status(unique_id, f"{i}/{steps} steps | ", ((i-start_at_step)*region_len)/total)
|
||||
|
||||
new_latent_image['noise_mask'] = inv_mask
|
||||
new_latent_image = base_sampler.sample_advanced(add_noise, seed, adv_steps, new_latent_image,
|
||||
start_at_step=i, end_at_step=i + 1, return_with_leftover_noise=True,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise)
|
||||
|
||||
if restore_latent:
|
||||
if 'noise_mask' in new_latent_image:
|
||||
del new_latent_image['noise_mask']
|
||||
base_latent_image = new_latent_image.copy()
|
||||
|
||||
j = 1
|
||||
for regional_prompt in regional_prompts:
|
||||
if restore_latent:
|
||||
new_latent_image = base_latent_image.copy()
|
||||
|
||||
core.update_node_status(unique_id, f"{i}/{steps} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total)
|
||||
|
||||
region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0)
|
||||
|
||||
new_latent_image['noise_mask'] = region_mask
|
||||
new_latent_image = regional_prompt.sampler.sample_advanced(False, seed, adv_steps, new_latent_image, i, i + 1, True,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio)
|
||||
|
||||
if restore_latent:
|
||||
del new_latent_image['noise_mask']
|
||||
base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0]
|
||||
new_latent_image = base_latent_image
|
||||
|
||||
j += 1
|
||||
|
||||
add_noise = False
|
||||
|
||||
# finalize
|
||||
core.update_node_status(unique_id, "finalize")
|
||||
if base_latent_image is not None:
|
||||
new_latent_image = base_latent_image
|
||||
else:
|
||||
base_latent_image = new_latent_image
|
||||
|
||||
new_latent_image['noise_mask'] = inv_mask
|
||||
new_latent_image = base_sampler.sample_advanced(False, seed, adv_steps, new_latent_image, adv_steps, adv_steps+1, False,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio)
|
||||
|
||||
core.update_node_status(unique_id, f"{steps}/{steps} steps", total)
|
||||
core.update_node_status(unique_id, "", None)
|
||||
|
||||
if restore_latent:
|
||||
new_latent_image = base_latent_image
|
||||
|
||||
if 'noise_mask' in new_latent_image:
|
||||
del new_latent_image['noise_mask']
|
||||
|
||||
return (new_latent_image, )
|
||||
|
||||
|
||||
class RegionalSamplerAdvanced:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"add_noise": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to add noise"}),
|
||||
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}),
|
||||
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}),
|
||||
"overlap_factor": ("INT", {"default": 10, "min": 0, "max": 10000, "tooltip": "To smooth the seams of the region boundaries, expand the mask set in regional_prompts by the overlap_factor amount to overlap with other regions."}),
|
||||
"restore_latent": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled", "tooltip": "At each step, restore the noise outside the mask area to its original state, as per the principle of inpainting. This option is provided for backward compatibility, and it is recommended to always set it to true."}),
|
||||
"return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}),
|
||||
"latent_image": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"base_sampler": ("KSAMPLER_ADVANCED", {"tooltip": "The sampler applied outside the area set by the regional_prompt."}),
|
||||
"regional_prompts": ("REGIONAL_PROMPTS", {"tooltip": "The prompt applied to each region"}),
|
||||
"additional_mode": (["DISABLE", "ratio additional", "ratio between"], {"default": "ratio between", "tooltip": "..._sde or uni_pc and other special samplers are used, the region is not properly denoised, and it causes a phenomenon that destroys the overall harmony. To compensate for this, a recovery operation is performed using another sampler. This requires a longer time for sampling because a second sampling is performed at each step in each region using a special sampler. 1) DISABLE: Disable this feature. 2) ratio additional: After performing the denoise amount to be performed in the step with the sampler set in the region, the recovery sampler is additionally applied by the additional_sigma_ratio. If you use this option, the total denoise amount increases by additional_sigma_ratio. 3) ratio between: The denoise amount to be performed in the step with the sampler set in the region and the denoise amount to be applied to the recovery sampler are divided by additional_sigma_ratio, and denoise is performed for each denoise amount. If you use this option, the total denoise amount does not change."}),
|
||||
"additional_sampler": (["AUTO", "euler", "heun", "heunpp2", "dpm_2", "dpm_fast", "dpmpp_2m", "ddpm"], {"tooltip": "1) AUTO: Automatically set the recovery sampler. If the sampler is uni_pc, uni_pc_bh2, dpmpp_sde, dpmpp_sde_gpu, the dpm_fast sampler is selected If the sampler is dpmpp_2m_sde, dpmpp_2m_sde_gpu, dpmpp_3m_sde, dpmpp_3m_sde_gpu, the dpmpp_2m sampler is selected. 2) Others: Manually set the recovery sampler."}),
|
||||
"additional_sigma_ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "Multiplier of noise schedule to be applied according to additional_mode."}),
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("result latent", )
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Regional"
|
||||
|
||||
@staticmethod
|
||||
def doit(add_noise, noise_seed, steps, start_at_step, end_at_step, overlap_factor, restore_latent, return_with_leftover_noise, latent_image, base_sampler, regional_prompts,
|
||||
additional_mode, additional_sampler, additional_sigma_ratio, unique_id):
|
||||
|
||||
new_latent_image = latent_image.copy()
|
||||
new_latent_image['samples'] = comfy.sample.fix_empty_latent_channels(base_sampler.params[0], new_latent_image['samples'])
|
||||
|
||||
if restore_latent:
|
||||
latent_compositor = nodes.NODE_CLASS_MAPPINGS['LatentCompositeMasked']()
|
||||
else:
|
||||
latent_compositor = None
|
||||
|
||||
masks = [regional_prompt.mask.numpy() for regional_prompt in regional_prompts]
|
||||
masks = [np.ceil(mask).astype(np.int32) for mask in masks]
|
||||
combined_mask = torch.from_numpy(np.bitwise_or.reduce(masks))
|
||||
|
||||
inv_mask = torch.where(combined_mask == 0, torch.tensor(1.0), torch.tensor(0.0))
|
||||
|
||||
region_len = len(regional_prompts)
|
||||
end_at_step = min(steps, end_at_step)
|
||||
total = (end_at_step - start_at_step) * region_len
|
||||
|
||||
base_latent_image = None
|
||||
region_masks = {}
|
||||
|
||||
for i in range(start_at_step, end_at_step-1):
|
||||
core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | ", ((i-start_at_step)*region_len)/total)
|
||||
|
||||
cur_add_noise = True if i == start_at_step and add_noise else False
|
||||
|
||||
if cur_add_noise:
|
||||
noise = Noise_RandomNoise(noise_seed).generate_noise(new_latent_image)
|
||||
for rp in regional_prompts:
|
||||
noise = rp.touch_noise(noise)
|
||||
else:
|
||||
noise = None
|
||||
|
||||
new_latent_image['noise_mask'] = inv_mask
|
||||
new_latent_image = base_sampler.sample_advanced(cur_add_noise, noise_seed, steps, new_latent_image, i, i + 1, True,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio, noise=noise)
|
||||
|
||||
if restore_latent:
|
||||
del new_latent_image['noise_mask']
|
||||
base_latent_image = new_latent_image.copy()
|
||||
|
||||
j = 1
|
||||
for regional_prompt in regional_prompts:
|
||||
if restore_latent:
|
||||
new_latent_image = base_latent_image.copy()
|
||||
|
||||
core.update_node_status(unique_id, f"{start_at_step+i}/{end_at_step} steps | {j}/{region_len}", ((i-start_at_step)*region_len + j)/total)
|
||||
|
||||
if j not in region_masks:
|
||||
region_mask = regional_prompt.get_mask_erosion(overlap_factor).squeeze(0).squeeze(0)
|
||||
region_masks[j] = region_mask
|
||||
else:
|
||||
region_mask = region_masks[j]
|
||||
|
||||
new_latent_image['noise_mask'] = region_mask
|
||||
new_latent_image = regional_prompt.sampler.sample_advanced(False, noise_seed, steps, new_latent_image, i, i + 1, True,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio)
|
||||
|
||||
if restore_latent:
|
||||
del new_latent_image['noise_mask']
|
||||
base_latent_image = latent_compositor.composite(base_latent_image, new_latent_image, 0, 0, False, region_mask)[0]
|
||||
new_latent_image = base_latent_image
|
||||
|
||||
j += 1
|
||||
|
||||
# finalize
|
||||
core.update_node_status(unique_id, "finalize")
|
||||
if base_latent_image is not None:
|
||||
new_latent_image = base_latent_image
|
||||
else:
|
||||
base_latent_image = new_latent_image
|
||||
|
||||
new_latent_image['noise_mask'] = inv_mask
|
||||
new_latent_image = base_sampler.sample_advanced(False, noise_seed, steps, new_latent_image, end_at_step-1, end_at_step, return_with_leftover_noise,
|
||||
recovery_mode=additional_mode, recovery_sampler=additional_sampler, recovery_sigma_ratio=additional_sigma_ratio)
|
||||
|
||||
core.update_node_status(unique_id, f"{end_at_step}/{end_at_step} steps", total)
|
||||
core.update_node_status(unique_id, "", None)
|
||||
|
||||
if restore_latent:
|
||||
new_latent_image = base_latent_image
|
||||
|
||||
if 'noise_mask' in new_latent_image:
|
||||
del new_latent_image['noise_mask']
|
||||
|
||||
return (new_latent_image, )
|
||||
|
||||
|
||||
class KSamplerBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}),
|
||||
"scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}),
|
||||
"latent_image": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of noise to remove. This amount is the noise added at the start, and the higher it is, the more the input latent will be modified before being returned."}),
|
||||
},
|
||||
"optional":
|
||||
{
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe")
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE")
|
||||
FUNCTION = "sample"
|
||||
|
||||
CATEGORY = "ImpactPack/sampling"
|
||||
|
||||
@staticmethod
|
||||
def sample(basic_pipe, seed, steps, cfg, sampler_name, scheduler, latent_image, denoise=1.0, scheduler_func_opt=None):
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
latent = impact_sample(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise, scheduler_func=scheduler_func_opt)
|
||||
return basic_pipe, latent, vae
|
||||
|
||||
|
||||
class KSamplerAdvancedBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"basic_pipe": ("BASIC_PIPE", {"tooltip": "basic_pipe input for sampling"}),
|
||||
"add_noise": ("BOOLEAN", {"default": True, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to add noise"}),
|
||||
"noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Random seed to use for generating CPU noise for sampling."}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "total sampling steps"}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "tooltip": "classifier free guidance value"}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "sampler"}),
|
||||
"scheduler": (core.get_schedulers(), {"tooltip": "noise schedule"}),
|
||||
"latent_image": ("LATENT", {"tooltip": "input latent image"}),
|
||||
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000, "tooltip": "The starting step of the sampling to be applied at this node within the range of 'steps'."}),
|
||||
"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000, "tooltip": "The step at which sampling applied at this node will stop within the range of steps (if greater than steps, sampling will continue only up to steps)."}),
|
||||
"return_with_leftover_noise": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable", "tooltip": "Whether to return the latent with noise remaining if the noise has not been completely removed according to the noise schedule, or to completely remove the noise before returning it."}),
|
||||
},
|
||||
"optional":
|
||||
{
|
||||
"scheduler_func_opt": ("SCHEDULER_FUNC", {"tooltip": "[OPTIONAL] Noise schedule generation function. If this is set, the scheduler widget will be ignored."}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("passthrough input basic_pipe", "result latent", "VAE in basic_pipe")
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE", "LATENT", "VAE")
|
||||
FUNCTION = "sample"
|
||||
|
||||
CATEGORY = "ImpactPack/sampling"
|
||||
|
||||
@staticmethod
|
||||
def sample(basic_pipe, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, latent_image, start_at_step, end_at_step, return_with_leftover_noise, denoise=1.0, scheduler_func_opt=None):
|
||||
model, clip, vae, positive, negative = basic_pipe
|
||||
|
||||
latent = separated_sample(model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, start_at_step, end_at_step, return_with_leftover_noise, scheduler_func=scheduler_func_opt)
|
||||
return basic_pipe, latent, vae
|
||||
|
||||
|
||||
class GITSSchedulerFuncProvider:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"coeff": ("FLOAT", {"default": 1.20, "min": 0.80, "max": 1.50, "step": 0.05, "tooltip": "coeff factor of GITS Scheduler"}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "denoise amount for noise schedule"}),
|
||||
}
|
||||
}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("Returns a function that generates a noise schedule using GITSScheduler. This can be used in place of a predetermined noise schedule to dynamically generate a noise schedule based on the steps.",)
|
||||
|
||||
RETURN_TYPES = ("SCHEDULER_FUNC",)
|
||||
CATEGORY = "ImpactPack/sampling"
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
@staticmethod
|
||||
def doit(coeff, denoise):
|
||||
def f(model, sampler, steps):
|
||||
if 'GITSScheduler' not in nodes.NODE_CLASS_MAPPINGS:
|
||||
raise Exception("[Impact Pack] ComfyUI is an outdated version. Cannot use GITSScheduler.")
|
||||
|
||||
scheduler = nodes.NODE_CLASS_MAPPINGS['GITSScheduler']()
|
||||
return scheduler.get_sigmas(coeff, steps, denoise)[0]
|
||||
|
||||
return (f, )
|
||||
|
||||
|
||||
class NegativeConditioningPlaceholder:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {}}
|
||||
|
||||
OUTPUT_TOOLTIPS = ("This is a Placeholder for the FLUX model that does not use Negative Conditioning.",)
|
||||
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
CATEGORY = "ImpactPack/sampling"
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
@staticmethod
|
||||
def doit():
|
||||
return ("NegativePlaceholder", )
|
||||
775
custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py
Normal file
775
custom_nodes/ComfyUI-Impact-Pack/modules/impact/util_nodes.py
Normal file
@@ -0,0 +1,775 @@
|
||||
from impact.utils import any_typ, ByPassTypeTuple, make_3d_mask
|
||||
import comfy_extras.nodes_mask
|
||||
from nodes import MAX_RESOLUTION
|
||||
import torch
|
||||
import comfy
|
||||
import sys
|
||||
import nodes
|
||||
import re
|
||||
import impact.core as core
|
||||
from server import PromptServer
|
||||
import inspect
|
||||
import logging
|
||||
|
||||
|
||||
class GeneralSwitch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
dyn_inputs = {"input1": (any_typ, {"lazy": True, "tooltip": "Any input. When connected, one more input slot is added."}), }
|
||||
if core.is_execution_model_version_supported():
|
||||
stack = inspect.stack()
|
||||
if stack[2].function == 'get_input_info':
|
||||
# bypass validation
|
||||
class AllContainer:
|
||||
def __contains__(self, item):
|
||||
return True
|
||||
|
||||
def __getitem__(self, key):
|
||||
return any_typ, {"lazy": True}
|
||||
|
||||
dyn_inputs = AllContainer()
|
||||
|
||||
inputs = {"required": {
|
||||
"select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The input number you want to output among the inputs"}),
|
||||
"sel_mode": ("BOOLEAN", {"default": False, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False,
|
||||
"tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}),
|
||||
},
|
||||
"optional": dyn_inputs,
|
||||
"hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO"}
|
||||
}
|
||||
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = (any_typ, "STRING", "INT")
|
||||
RETURN_NAMES = ("selected_value", "selected_label", "selected_index")
|
||||
OUTPUT_TOOLTIPS = ("Output is generated only from the input chosen by the 'select' value.", "Slot label of the selected input slot", "Outputs the select value as is")
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def check_lazy_status(self, *args, **kwargs):
|
||||
selected_index = int(kwargs['select'])
|
||||
input_name = f"input{selected_index}"
|
||||
|
||||
logging.info(f"SELECTED: {input_name}")
|
||||
|
||||
if input_name in kwargs:
|
||||
return [input_name]
|
||||
else:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def doit(*args, **kwargs):
|
||||
selected_index = int(kwargs['select'])
|
||||
input_name = f"input{selected_index}"
|
||||
|
||||
selected_label = input_name
|
||||
node_id = kwargs['unique_id']
|
||||
|
||||
if 'extra_pnginfo' in kwargs and kwargs['extra_pnginfo'] is not None:
|
||||
nodelist = kwargs['extra_pnginfo']['workflow']['nodes']
|
||||
for node in nodelist:
|
||||
if str(node['id']) == node_id:
|
||||
inputs = node['inputs']
|
||||
|
||||
for slot in inputs:
|
||||
if slot['name'] == input_name and 'label' in slot:
|
||||
selected_label = slot['label']
|
||||
|
||||
break
|
||||
else:
|
||||
logging.info("[Impact-Pack] The switch node does not guarantee proper functioning in API mode.")
|
||||
|
||||
if input_name in kwargs:
|
||||
return kwargs[input_name], selected_label, selected_index
|
||||
else:
|
||||
logging.info("ImpactSwitch: invalid select index (ignored)")
|
||||
return None, "", selected_index
|
||||
|
||||
class LatentSwitch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"select": ("INT", {"default": 1, "min": 1, "max": 99999, "step": 1}),
|
||||
"latent1": ("LATENT",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, *args, **kwargs):
|
||||
input_name = f"latent{int(kwargs['select'])}"
|
||||
|
||||
if input_name in kwargs:
|
||||
return (kwargs[input_name],)
|
||||
else:
|
||||
logging.info("LatentSwitch: invalid select index ('latent1' is selected)")
|
||||
return (kwargs['latent1'],)
|
||||
|
||||
|
||||
class ImageMaskSwitch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"select": ("INT", {"default": 1, "min": 1, "max": 4, "step": 1}),
|
||||
"images1": ("IMAGE",),
|
||||
},
|
||||
|
||||
"optional": {
|
||||
"mask1_opt": ("MASK",),
|
||||
"images2_opt": ("IMAGE",),
|
||||
"mask2_opt": ("MASK",),
|
||||
"images3_opt": ("IMAGE",),
|
||||
"mask3_opt": ("MASK",),
|
||||
"images4_opt": ("IMAGE",),
|
||||
"mask4_opt": ("MASK",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "MASK",)
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, select, images1, mask1_opt=None, images2_opt=None, mask2_opt=None, images3_opt=None, mask3_opt=None,
|
||||
images4_opt=None, mask4_opt=None):
|
||||
if select == 1:
|
||||
return images1, mask1_opt,
|
||||
elif select == 2:
|
||||
return images2_opt, mask2_opt,
|
||||
elif select == 3:
|
||||
return images3_opt, mask3_opt,
|
||||
else:
|
||||
return images4_opt, mask4_opt,
|
||||
|
||||
|
||||
class GeneralInversedSwitch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"select": ("INT", {"default": 1, "min": 1, "max": 999999, "step": 1, "tooltip": "The output number you want to send from the input"}),
|
||||
"input": (any_typ, {"tooltip": "Any input. When connected, one more input slot is added."}),
|
||||
|
||||
},
|
||||
"optional": {
|
||||
"sel_mode": ("BOOLEAN", {"default": False, "label_on": "select_on_prompt", "label_off": "select_on_execution", "forceInput": False,
|
||||
"tooltip": "In the case of 'select_on_execution', the selection is dynamically determined at the time of workflow execution. 'select_on_prompt' is an option that exists for older versions of ComfyUI, and it makes the decision before the workflow execution."}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ByPassTypeTuple((any_typ, ))
|
||||
OUTPUT_TOOLTIPS = ("Output occurs only from the output selected by the 'select' value.\nWhen slots are connected, additional slots are created.", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, select, prompt, unique_id, input, **kwargs):
|
||||
if core.is_execution_model_version_supported():
|
||||
from comfy_execution.graph import ExecutionBlocker
|
||||
else:
|
||||
logging.warning("[Impact Pack] InversedSwitch: ComfyUI is outdated. The 'select_on_execution' mode cannot function properly.")
|
||||
|
||||
res = []
|
||||
|
||||
# search max output count in prompt
|
||||
cnt = 0
|
||||
for x in prompt.values():
|
||||
for y in x.get('inputs', {}).values():
|
||||
if isinstance(y, list) and len(y) == 2:
|
||||
if y[0] == unique_id:
|
||||
cnt = max(cnt, y[1])
|
||||
|
||||
for i in range(0, cnt + 1):
|
||||
if select == i+1:
|
||||
res.append(input)
|
||||
elif core.is_execution_model_version_supported():
|
||||
res.append(ExecutionBlocker(None))
|
||||
else:
|
||||
res.append(None)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class RemoveNoiseMask:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"samples": ("LATENT",)}}
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, samples):
|
||||
res = {key: value for key, value in samples.items() if key != 'noise_mask'}
|
||||
return (res, )
|
||||
|
||||
|
||||
class ImagePasteMasked:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"destination": ("IMAGE",),
|
||||
"source": ("IMAGE",),
|
||||
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
|
||||
"resize_source": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
"optional": {
|
||||
"mask": ("MASK",),
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "composite"
|
||||
|
||||
CATEGORY = "image"
|
||||
|
||||
def composite(self, destination, source, x, y, resize_source, mask = None):
|
||||
destination = destination.clone().movedim(-1, 1)
|
||||
output = comfy_extras.nodes_mask.composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1)
|
||||
return (output,)
|
||||
|
||||
|
||||
from impact.utils import any_typ
|
||||
|
||||
class ImpactLogger:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"data": (any_typ,),
|
||||
"text": ("STRING", {"multiline": True}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
CATEGORY = "ImpactPack/Debug"
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "doit"
|
||||
|
||||
def doit(self, data, text, prompt, extra_pnginfo, unique_id):
|
||||
shape = ""
|
||||
if hasattr(data, "shape"):
|
||||
shape = f"{data.shape} / "
|
||||
|
||||
logging.info(f"[IMPACT LOGGER]: {shape}{data}")
|
||||
|
||||
logging.info(f" PROMPT: {prompt}")
|
||||
|
||||
# for x in prompt:
|
||||
# if 'inputs' in x and 'populated_text' in x['inputs']:
|
||||
# print(f"PROMPT: {x['10']['inputs']['populated_text']}")
|
||||
#
|
||||
# for x in extra_pnginfo['workflow']['nodes']:
|
||||
# if x['type'] == 'ImpactWildcardProcessor':
|
||||
# print(f" WV : {x['widgets_values'][1]}\n")
|
||||
|
||||
PromptServer.instance.send_sync("impact-node-feedback", {"node_id": unique_id, "widget_name": "text", "type": "TEXT", "value": f"{data}"})
|
||||
return {}
|
||||
|
||||
|
||||
class ImpactDummyInput:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {}}
|
||||
|
||||
CATEGORY = "ImpactPack/Debug"
|
||||
|
||||
RETURN_TYPES = (any_typ,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
def doit(self):
|
||||
return ("DUMMY",)
|
||||
|
||||
|
||||
class MasksToMaskList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"optional": {
|
||||
"masks": ("MASK", ),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK", )
|
||||
OUTPUT_IS_LIST = (True, )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Operation"
|
||||
|
||||
def doit(self, masks):
|
||||
if masks is None:
|
||||
empty_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
return ([empty_mask], )
|
||||
|
||||
res = []
|
||||
|
||||
for mask in masks:
|
||||
res.append(mask)
|
||||
|
||||
res = [make_3d_mask(x) for x in res]
|
||||
|
||||
return (res, )
|
||||
|
||||
|
||||
class MaskListToMaskBatch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"mask": ("MASK", ),
|
||||
}
|
||||
}
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
|
||||
RETURN_TYPES = ("MASK", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Operation"
|
||||
|
||||
def doit(self, mask):
|
||||
if len(mask) == 0:
|
||||
empty_mask = torch.zeros((1, 64, 64), dtype=torch.float32, device="cpu").unsqueeze(0)
|
||||
return (empty_mask,)
|
||||
|
||||
masks_3d = [make_3d_mask(m) for m in mask]
|
||||
target_shape = masks_3d[0].shape[1:]
|
||||
upscaled_masks = []
|
||||
for m in masks_3d:
|
||||
if m.shape[1:] != target_shape:
|
||||
m = m.unsqueeze(1).repeat(1, 3, 1, 1)
|
||||
m = comfy.utils.common_upscale(m, target_shape[1], target_shape[0], "lanczos", "center")
|
||||
m = m[:, 0, :, :]
|
||||
|
||||
upscaled_masks.append(m)
|
||||
# Concatenate all at once
|
||||
result = torch.cat(upscaled_masks, dim=0)
|
||||
return (result,)
|
||||
|
||||
|
||||
class ImageListToImageBatch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"images": ("IMAGE", ),
|
||||
}
|
||||
}
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
|
||||
RETURN_TYPES = ("IMAGE", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Operation"
|
||||
|
||||
def doit(self, images):
|
||||
if len(images) == 0:
|
||||
return ()
|
||||
if len(images) == 1:
|
||||
img = images[0]
|
||||
if img.ndim == 3: # add batch dim if missing
|
||||
img = img.unsqueeze(0)
|
||||
return (img,)
|
||||
|
||||
# Start with the first image
|
||||
image1 = images[0]
|
||||
if image1.ndim == 3:
|
||||
image1 = image1.unsqueeze(0)
|
||||
|
||||
for image2 in images[1:]:
|
||||
# Ensure batch dim
|
||||
if image2.ndim == 3:
|
||||
image2 = image2.unsqueeze(0)
|
||||
|
||||
# Ensure same device
|
||||
if image2.device != image1.device:
|
||||
image2 = image2.to(image1.device)
|
||||
|
||||
# Ensure HxW match exactly
|
||||
H, W = image1.shape[1], image1.shape[2]
|
||||
if image2.shape[1] != H or image2.shape[2] != W:
|
||||
image2 = comfy.utils.common_upscale(
|
||||
image2.movedim(-1, 1), # move channels first
|
||||
W, # width
|
||||
H, # height
|
||||
"lanczos",
|
||||
"center"
|
||||
).movedim(1, -1) # move channels back last
|
||||
|
||||
# Ensure channels match
|
||||
if image2.shape[3] != image1.shape[3]:
|
||||
# simple fix: truncate or pad channels
|
||||
min_C = min(image1.shape[3], image2.shape[3])
|
||||
image1 = image1[:, :, :, :min_C]
|
||||
image2 = image2[:, :, :, :min_C]
|
||||
|
||||
# Concatenate along batch dimension
|
||||
image1 = torch.cat((image1, image2), dim=0)
|
||||
|
||||
return (image1,)
|
||||
|
||||
|
||||
class ImageBatchToImageList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"image": ("IMAGE",), }}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, image):
|
||||
images = [image[i:i + 1, ...] for i in range(image.shape[0])]
|
||||
return (images, )
|
||||
|
||||
|
||||
class MakeAnyList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {},
|
||||
"optional": {"value1": (any_typ,), }
|
||||
}
|
||||
|
||||
RETURN_TYPES = (any_typ,)
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, **kwargs):
|
||||
values = []
|
||||
|
||||
for k, v in kwargs.items():
|
||||
if v is not None:
|
||||
values.append(v)
|
||||
|
||||
return (values, )
|
||||
|
||||
|
||||
class MakeMaskList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"mask1": ("MASK",), }}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, **kwargs):
|
||||
masks = []
|
||||
|
||||
for k, v in kwargs.items():
|
||||
masks.append(v)
|
||||
|
||||
return (masks, )
|
||||
|
||||
|
||||
class NthItemOfAnyList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"any_list": (any_typ,),
|
||||
"index": ("INT", {"default": 0, "min": -sys.maxsize, "max": sys.maxsize, "step": 1, "tooltip": "The index of the item you want to select from the list. Use negative values to select from the end (e.g., -1 for last item, -2 for second to last)."}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (any_typ,)
|
||||
INPUT_IS_LIST = True
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
DESCRIPTION = "Selects the Nth item from a list. If the index is out of range, it returns the last item in the list."
|
||||
|
||||
def doit(self, any_list, index):
|
||||
i = index[0]
|
||||
list_len = len(any_list)
|
||||
if i >= list_len or i < -list_len:
|
||||
return (any_list[-1],)
|
||||
else:
|
||||
return (any_list[i],)
|
||||
|
||||
|
||||
class MakeImageList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"optional": {"image1": ("IMAGE",), }}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
OUTPUT_IS_LIST = (True,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, **kwargs):
|
||||
images = []
|
||||
|
||||
for k, v in kwargs.items():
|
||||
images.append(v)
|
||||
|
||||
return (images, )
|
||||
|
||||
|
||||
class MakeImageBatch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"optional": {"image1": ("IMAGE",), }}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, **kwargs):
|
||||
images = [value for value in kwargs.values()]
|
||||
|
||||
if len(images) == 1:
|
||||
return (images[0],)
|
||||
else:
|
||||
image1 = images[0]
|
||||
for image2 in images[1:]:
|
||||
if image1.shape[1:] != image2.shape[1:]:
|
||||
image2 = comfy.utils.common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "lanczos", "center").movedim(1, -1)
|
||||
image1 = torch.cat((image1, image2), dim=0)
|
||||
return (image1,)
|
||||
|
||||
|
||||
class MakeMaskBatch:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"optional": {"mask1": ("MASK",), }}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, **kwargs):
|
||||
masks = [make_3d_mask(value) for value in kwargs.values()]
|
||||
|
||||
if len(masks) == 1:
|
||||
return (masks[0],)
|
||||
else:
|
||||
mask1 = masks[0]
|
||||
for mask2 in masks[1:]:
|
||||
if mask1.shape[1:] != mask2.shape[1:]:
|
||||
mask2 = comfy.utils.common_upscale(mask2.movedim(-1, 1), mask1.shape[2], mask1.shape[1], "lanczos", "center").movedim(1, -1)
|
||||
mask1 = torch.cat((mask1, mask2), dim=0)
|
||||
return (mask1,)
|
||||
|
||||
|
||||
class ReencodeLatent:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"samples": ("LATENT", ),
|
||||
"tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],),
|
||||
"input_vae": ("VAE", ),
|
||||
"output_vae": ("VAE", ),
|
||||
"tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}),
|
||||
},
|
||||
"optional": {
|
||||
"overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32, "tooltip": "This setting applies when 'tile_mode' is enabled."}),
|
||||
}
|
||||
}
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
def doit(self, samples, tile_mode, input_vae, output_vae, tile_size=512, overlap=64):
|
||||
if tile_mode in ["Both", "Decode(input) only"]:
|
||||
decoder = nodes.VAEDecodeTiled()
|
||||
if 'overlap' in inspect.signature(decoder.decode).parameters:
|
||||
pixels = decoder.decode(input_vae, samples, tile_size, overlap=overlap)[0]
|
||||
else:
|
||||
pixels = decoder.decode(input_vae, samples, tile_size, overlap=overlap)[0]
|
||||
else:
|
||||
pixels = nodes.VAEDecode().decode(input_vae, samples)[0]
|
||||
|
||||
if tile_mode in ["Both", "Encode(output) only"]:
|
||||
encoder = nodes.VAEEncodeTiled()
|
||||
if 'overlap' in inspect.signature(encoder.encode).parameters:
|
||||
return encoder.encode(output_vae, pixels, tile_size, overlap=overlap)
|
||||
else:
|
||||
return encoder.encode(output_vae, pixels, tile_size)
|
||||
else:
|
||||
return nodes.VAEEncode().encode(output_vae, pixels)
|
||||
|
||||
|
||||
class ReencodeLatentPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"samples": ("LATENT", ),
|
||||
"tile_mode": (["None", "Both", "Decode(input) only", "Encode(output) only"],),
|
||||
"input_basic_pipe": ("BASIC_PIPE", ),
|
||||
"output_basic_pipe": ("BASIC_PIPE", ),
|
||||
},
|
||||
}
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "doit"
|
||||
|
||||
def doit(self, samples, tile_mode, input_basic_pipe, output_basic_pipe):
|
||||
_, _, input_vae, _, _ = input_basic_pipe
|
||||
_, _, output_vae, _, _ = output_basic_pipe
|
||||
return ReencodeLatent().doit(samples, tile_mode, input_vae, output_vae)
|
||||
|
||||
|
||||
class StringSelector:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"strings": ("STRING", {"multiline": True}),
|
||||
"multiline": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}),
|
||||
"select": ("INT", {"min": 0, "max": sys.maxsize, "step": 1, "default": 0}),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, strings, multiline, select):
|
||||
lines = strings.split('\n')
|
||||
|
||||
if multiline:
|
||||
result = []
|
||||
current_string = ""
|
||||
|
||||
for line in lines:
|
||||
if line.startswith("#"):
|
||||
if current_string:
|
||||
result.append(current_string.strip())
|
||||
current_string = ""
|
||||
current_string += line + "\n"
|
||||
|
||||
if current_string:
|
||||
result.append(current_string.strip())
|
||||
|
||||
if len(result) == 0:
|
||||
selected = strings
|
||||
else:
|
||||
selected = result[select % len(result)]
|
||||
|
||||
if selected.startswith('#'):
|
||||
selected = selected[1:]
|
||||
else:
|
||||
if len(lines) == 0:
|
||||
selected = strings
|
||||
else:
|
||||
selected = lines[select % len(lines)]
|
||||
|
||||
return (selected, )
|
||||
|
||||
|
||||
class StringListToString:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"join_with": ("STRING", {"default": "\\n"}),
|
||||
"string_list": ("STRING", {"forceInput": True}),
|
||||
}
|
||||
}
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
RETURN_TYPES = ("STRING",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, join_with, string_list):
|
||||
# convert \\n to newline character
|
||||
if join_with[0] == "\\n":
|
||||
join_with[0] = "\n"
|
||||
|
||||
joined_text = join_with[0].join(string_list)
|
||||
|
||||
return (joined_text,)
|
||||
|
||||
|
||||
class WildcardPromptFromString:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"string": ("STRING", {"forceInput": True}),
|
||||
"delimiter": ("STRING", {"multiline": False, "default": "\\n" }),
|
||||
"prefix_all": ("STRING", {"multiline": False}),
|
||||
"postfix_all": ("STRING", {"multiline": False}),
|
||||
"restrict_to_tags": ("STRING", {"multiline": False}),
|
||||
"exclude_tags": ("STRING", {"multiline": False})
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING",)
|
||||
RETURN_NAMES = ("wildcard", "segs_labels",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "ImpactPack/Util"
|
||||
|
||||
def doit(self, string, delimiter, prefix_all, postfix_all, restrict_to_tags, exclude_tags):
|
||||
# convert \\n to newline character
|
||||
if delimiter == "\\n":
|
||||
delimiter = "\n"
|
||||
|
||||
# some sanity checks and normalization for later processing
|
||||
if prefix_all is None:
|
||||
prefix_all = ""
|
||||
if postfix_all is None:
|
||||
postfix_all = ""
|
||||
if restrict_to_tags is None:
|
||||
restrict_to_tags = ""
|
||||
if exclude_tags is None:
|
||||
exclude_tags = ""
|
||||
|
||||
restrict_to_tags = restrict_to_tags.split(", ")
|
||||
exclude_tags = exclude_tags.split(", ")
|
||||
|
||||
# build the wildcard prompt per list entry
|
||||
output = ["[LAB]"]
|
||||
labels = []
|
||||
for x in string.split(delimiter):
|
||||
label = str(len(labels) + 1)
|
||||
labels.append(label)
|
||||
x = x.split(", ")
|
||||
# restrict to tags
|
||||
if restrict_to_tags != [""]:
|
||||
x = list(set(x) & set(restrict_to_tags))
|
||||
# remove tags
|
||||
if exclude_tags != [""]:
|
||||
x = list(set(x) - set(exclude_tags))
|
||||
# next row: <LABEL> <PREFIX> <TAGS> <POSTFIX>
|
||||
prompt_for_seg = f'[{label}] {prefix_all} {", ".join(x)} {postfix_all}'.strip()
|
||||
output.append(prompt_for_seg)
|
||||
output = "\n".join(output)
|
||||
|
||||
# clean string: fixup double spaces, commas etc.
|
||||
output = re.sub(r' ,', ',', output)
|
||||
output = re.sub(r' +', ' ', output)
|
||||
output = re.sub(r',,+', ',', output)
|
||||
output = re.sub(r'\n, ', '\n', output)
|
||||
|
||||
return output, ", ".join(labels)
|
||||
743
custom_nodes/ComfyUI-Impact-Pack/modules/impact/utils.py
Normal file
743
custom_nodes/ComfyUI-Impact-Pack/modules/impact/utils.py
Normal file
@@ -0,0 +1,743 @@
|
||||
import torch
|
||||
import torchvision
|
||||
import cv2
|
||||
import numpy as np
|
||||
import folder_paths
|
||||
import nodes
|
||||
from . import config
|
||||
from PIL import Image
|
||||
import comfy
|
||||
import time
|
||||
import logging
|
||||
|
||||
|
||||
class TensorBatchBuilder:
|
||||
def __init__(self):
|
||||
self.tensor = None
|
||||
|
||||
def concat(self, new_tensor):
|
||||
if self.tensor is None:
|
||||
self.tensor = new_tensor
|
||||
else:
|
||||
self.tensor = torch.concat((self.tensor, new_tensor), dim=0)
|
||||
|
||||
|
||||
def tensor_convert_rgba(image, prefer_copy=True):
|
||||
"""Assumes NHWC format tensor with 1, 3 or 4 channels."""
|
||||
_tensor_check_image(image)
|
||||
n_channel = image.shape[-1]
|
||||
if n_channel == 4:
|
||||
return image
|
||||
|
||||
if n_channel == 3:
|
||||
alpha = torch.ones((*image.shape[:-1], 1))
|
||||
return torch.cat((image, alpha), axis=-1)
|
||||
|
||||
if n_channel == 1:
|
||||
if prefer_copy:
|
||||
image = image.repeat(1, -1, -1, 4)
|
||||
else:
|
||||
image = image.expand(1, -1, -1, 3)
|
||||
return image
|
||||
|
||||
# NOTE: Similar error message as in PIL, for easier googling :P
|
||||
raise ValueError(f"illegal conversion (channels: {n_channel} -> 4)")
|
||||
|
||||
|
||||
def tensor_convert_rgb(image, prefer_copy=True):
|
||||
"""Assumes NHWC format tensor with 1, 3 or 4 channels."""
|
||||
_tensor_check_image(image)
|
||||
n_channel = image.shape[-1]
|
||||
if n_channel == 3:
|
||||
return image
|
||||
|
||||
if n_channel == 4:
|
||||
image = image[..., :3]
|
||||
if prefer_copy:
|
||||
image = image.copy()
|
||||
return image
|
||||
|
||||
if n_channel == 1:
|
||||
if prefer_copy:
|
||||
image = image.repeat(1, -1, -1, 4)
|
||||
else:
|
||||
image = image.expand(1, -1, -1, 3)
|
||||
return image
|
||||
|
||||
# NOTE: Same error message as in PIL, for easier googling :P
|
||||
raise ValueError(f"illegal conversion (channels: {n_channel} -> 3)")
|
||||
|
||||
|
||||
def resize_with_padding(image, target_w: int, target_h: int):
|
||||
_tensor_check_image(image)
|
||||
b, h, w, c = image.shape
|
||||
image = image.permute(0, 3, 1, 2) # B, C, H, W
|
||||
|
||||
scale = min(target_w / w, target_h / h)
|
||||
new_w, new_h = int(w * scale), int(h * scale)
|
||||
|
||||
image = F.interpolate(image, size=(new_h, new_w), mode="bilinear", align_corners=False)
|
||||
|
||||
pad_left = (target_w - new_w) // 2
|
||||
pad_right = target_w - new_w - pad_left
|
||||
pad_top = (target_h - new_h) // 2
|
||||
pad_bottom = target_h - new_h - pad_top
|
||||
|
||||
image = F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0)
|
||||
|
||||
image = image.permute(0, 2, 3, 1) # B, H, W, C
|
||||
return image, (pad_top, pad_bottom, pad_left, pad_right)
|
||||
|
||||
|
||||
def remove_padding(image, padding):
|
||||
pad_top, pad_bottom, pad_left, pad_right = padding
|
||||
return image[:, pad_top:image.shape[1] - pad_bottom, pad_left:image.shape[2] - pad_right, :]
|
||||
|
||||
|
||||
def adjust_bbox_after_resize(bbox, original_size, target_size, padding):
|
||||
"""
|
||||
bbox: (x1, y1, x2, y2) in original image
|
||||
original_size: (original_h, original_w)
|
||||
target_size: (target_h, target_w)
|
||||
padding: (pad_top, pad_bottom, pad_left, pad_right)
|
||||
"""
|
||||
orig_h, orig_w = original_size
|
||||
target_h, target_w = target_size
|
||||
pad_top, pad_bottom, pad_left, pad_right = padding
|
||||
|
||||
scale = min(target_w / orig_w, target_h / orig_h)
|
||||
|
||||
# Apply scale
|
||||
x1 = int(bbox[0] * scale + pad_left)
|
||||
y1 = int(bbox[1] * scale + pad_top)
|
||||
x2 = int(bbox[2] * scale + pad_left)
|
||||
y2 = int(bbox[3] * scale + pad_top)
|
||||
|
||||
return x1, y1, x2, y2
|
||||
|
||||
|
||||
def general_tensor_resize(image, w: int, h: int):
|
||||
_tensor_check_image(image)
|
||||
image = image.permute(0, 3, 1, 2)
|
||||
image = torch.nn.functional.interpolate(image, size=(h, w), mode="bilinear")
|
||||
image = image.permute(0, 2, 3, 1)
|
||||
return image
|
||||
|
||||
|
||||
# TODO: Sadly, we need LANCZOS
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
def tensor_resize(image, w: int, h: int):
|
||||
_tensor_check_image(image)
|
||||
if image.shape[3] >= 3:
|
||||
scaled_images = TensorBatchBuilder()
|
||||
for single_image in image:
|
||||
single_image = single_image.unsqueeze(0)
|
||||
single_pil = tensor2pil(single_image)
|
||||
scaled_pil = single_pil.resize((w, h), resample=LANCZOS)
|
||||
|
||||
single_image = pil2tensor(scaled_pil)
|
||||
scaled_images.concat(single_image)
|
||||
|
||||
return scaled_images.tensor
|
||||
else:
|
||||
return general_tensor_resize(image, w, h)
|
||||
|
||||
|
||||
def tensor_get_size(image):
|
||||
"""Mimicking `PIL.Image.size`"""
|
||||
_tensor_check_image(image)
|
||||
_, h, w, _ = image.shape
|
||||
return (w, h)
|
||||
|
||||
|
||||
def tensor2pil(image):
|
||||
_tensor_check_image(image)
|
||||
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(0), 0, 255).astype(np.uint8))
|
||||
|
||||
|
||||
def pil2tensor(image):
|
||||
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
||||
|
||||
|
||||
def numpy2pil(image):
|
||||
return Image.fromarray(np.clip(255. * image.squeeze(0), 0, 255).astype(np.uint8))
|
||||
|
||||
|
||||
def to_pil(image):
|
||||
if isinstance(image, Image.Image):
|
||||
return image
|
||||
if isinstance(image, torch.Tensor):
|
||||
return tensor2pil(image)
|
||||
if isinstance(image, np.ndarray):
|
||||
return numpy2pil(image)
|
||||
raise ValueError(f"Cannot convert {type(image)} to PIL.Image")
|
||||
|
||||
|
||||
def to_tensor(image):
|
||||
if isinstance(image, Image.Image):
|
||||
return torch.from_numpy(np.array(image)) / 255.0
|
||||
if isinstance(image, torch.Tensor):
|
||||
return image
|
||||
if isinstance(image, np.ndarray):
|
||||
return torch.from_numpy(image)
|
||||
raise ValueError(f"Cannot convert {type(image)} to torch.Tensor")
|
||||
|
||||
|
||||
def to_numpy(image):
|
||||
if isinstance(image, Image.Image):
|
||||
return np.array(image)
|
||||
if isinstance(image, torch.Tensor):
|
||||
return image.numpy()
|
||||
if isinstance(image, np.ndarray):
|
||||
return image
|
||||
raise ValueError(f"Cannot convert {type(image)} to numpy.ndarray")
|
||||
|
||||
def tensor_putalpha(image, mask):
|
||||
_tensor_check_image(image)
|
||||
_tensor_check_mask(mask)
|
||||
image[..., -1] = mask[..., 0]
|
||||
|
||||
|
||||
def _tensor_check_image(image):
|
||||
if image.ndim != 4:
|
||||
raise ValueError(f"Expected NHWC tensor, but found {image.ndim} dimensions")
|
||||
if image.shape[-1] not in (1, 3, 4):
|
||||
raise ValueError(f"Expected 1, 3 or 4 channels for image, but found {image.shape[-1]} channels")
|
||||
return
|
||||
|
||||
|
||||
def _tensor_check_mask(mask):
|
||||
if mask.ndim != 4:
|
||||
raise ValueError(f"Expected NHWC tensor, but found {mask.ndim} dimensions")
|
||||
if mask.shape[-1] != 1:
|
||||
raise ValueError(f"Expected 1 channel for mask, but found {mask.shape[-1]} channels")
|
||||
return
|
||||
|
||||
|
||||
def tensor_crop(image, crop_region):
|
||||
_tensor_check_image(image)
|
||||
return crop_ndarray4(image, crop_region)
|
||||
|
||||
|
||||
def tensor2numpy(image):
|
||||
_tensor_check_image(image)
|
||||
return image.numpy()
|
||||
|
||||
|
||||
def tensor_paste(image1, image2, left_top, mask):
|
||||
"""
|
||||
Pastes image2 onto image1 at position left_top using mask.
|
||||
Supports both RGB and RGBA images.
|
||||
"""
|
||||
_tensor_check_image(image1)
|
||||
_tensor_check_image(image2)
|
||||
_tensor_check_mask(mask)
|
||||
|
||||
if image2.shape[1:3] != mask.shape[1:3]:
|
||||
mask = resize_mask(mask.squeeze(dim=3), image2.shape[1:3]).unsqueeze(dim=3)
|
||||
|
||||
x, y = left_top
|
||||
_, h1, w1, c1 = image1.shape
|
||||
_, h2, w2, c2 = image2.shape
|
||||
|
||||
# Calculate image patch size
|
||||
w = min(w1, x + w2) - x
|
||||
h = min(h1, y + h2) - y
|
||||
|
||||
# If the patch is out of bound, nothing to do!
|
||||
if w <= 0 or h <= 0:
|
||||
return
|
||||
|
||||
mask = mask[:, :h, :w, :]
|
||||
|
||||
# Get the region to be modified
|
||||
region1 = image1[:, y:y+h, x:x+w, :]
|
||||
region2 = image2[:, :h, :w, :]
|
||||
|
||||
# Handle RGB and RGBA cases
|
||||
if c1 == 3 and c2 == 3:
|
||||
# Both RGB - simple case
|
||||
image1[:, y:y+h, x:x+w, :] = (1 - mask) * region1 + mask * region2
|
||||
|
||||
elif c1 == 4 and c2 == 4:
|
||||
# Both RGBA - need to handle alpha channel separately
|
||||
# RGB channels
|
||||
image1[:, y:y+h, x:x+w, :3] = (
|
||||
(1 - mask) * region1[:, :, :, :3] +
|
||||
mask * region2[:, :, :, :3]
|
||||
)
|
||||
|
||||
# Alpha channel - use "over" composition
|
||||
a1 = region1[:, :, :, 3:4]
|
||||
a2 = region2[:, :, :, 3:4] * mask
|
||||
new_alpha = a1 + a2 * (1 - a1)
|
||||
image1[:, y:y+h, x:x+w, 3:4] = new_alpha
|
||||
|
||||
elif c1 == 4 and c2 == 3:
|
||||
# Target is RGBA, source is RGB - assume source is fully opaque
|
||||
image1[:, y:y+h, x:x+w, :3] = (
|
||||
(1 - mask) * region1[:, :, :, :3] +
|
||||
mask * region2
|
||||
)
|
||||
# Alpha channel - reduce alpha where mask is applied
|
||||
image1[:, y:y+h, x:x+w, 3:4] = region1[:, :, :, 3:4] * (1 - mask) + mask
|
||||
|
||||
elif c1 == 3 and c2 == 4:
|
||||
# Target is RGB, source is RGBA - apply source alpha to mask
|
||||
effective_mask = mask * region2[:, :, :, 3:4]
|
||||
image1[:, y:y+h, x:x+w, :] = (
|
||||
(1 - effective_mask) * region1 +
|
||||
effective_mask * region2[:, :, :, :3]
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
|
||||
def center_of_bbox(bbox):
|
||||
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
|
||||
return bbox[0] + w/2, bbox[1] + h/2
|
||||
|
||||
|
||||
def combine_masks(masks):
|
||||
if len(masks) == 0:
|
||||
return None
|
||||
else:
|
||||
initial_cv2_mask = np.array(masks[0][1])
|
||||
combined_cv2_mask = initial_cv2_mask
|
||||
|
||||
for i in range(1, len(masks)):
|
||||
cv2_mask = np.array(masks[i][1])
|
||||
|
||||
if combined_cv2_mask.shape == cv2_mask.shape:
|
||||
combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask)
|
||||
else:
|
||||
# do nothing - incompatible mask
|
||||
pass
|
||||
|
||||
mask = torch.from_numpy(combined_cv2_mask)
|
||||
return mask
|
||||
|
||||
|
||||
def combine_masks2(masks):
|
||||
if len(masks) == 0:
|
||||
return None
|
||||
else:
|
||||
initial_cv2_mask = np.array(masks[0]).astype(np.uint8)
|
||||
combined_cv2_mask = initial_cv2_mask
|
||||
|
||||
for i in range(1, len(masks)):
|
||||
cv2_mask = np.array(masks[i]).astype(np.uint8)
|
||||
|
||||
if combined_cv2_mask.shape == cv2_mask.shape:
|
||||
combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask)
|
||||
else:
|
||||
# do nothing - incompatible mask
|
||||
pass
|
||||
|
||||
mask = torch.from_numpy(combined_cv2_mask)
|
||||
return mask
|
||||
|
||||
|
||||
def bitwise_and_masks(mask1, mask2):
|
||||
mask1 = mask1.cpu()
|
||||
mask2 = mask2.cpu()
|
||||
cv2_mask1 = np.array(mask1)
|
||||
cv2_mask2 = np.array(mask2)
|
||||
|
||||
if cv2_mask1.shape == cv2_mask2.shape:
|
||||
cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2)
|
||||
return torch.from_numpy(cv2_mask)
|
||||
else:
|
||||
# do nothing - incompatible mask shape: mostly empty mask
|
||||
return mask1
|
||||
|
||||
|
||||
def to_binary_mask(mask, threshold=0):
|
||||
mask = make_3d_mask(mask)
|
||||
|
||||
mask = mask.clone().cpu()
|
||||
mask[mask > threshold] = 1.
|
||||
mask[mask <= threshold] = 0.
|
||||
return mask
|
||||
|
||||
|
||||
def use_gpu_opencv():
|
||||
return not config.get_config()['disable_gpu_opencv']
|
||||
|
||||
|
||||
def dilate_mask(mask, dilation_factor, iter=1):
|
||||
if dilation_factor == 0:
|
||||
return make_2d_mask(mask)
|
||||
|
||||
mask = make_2d_mask(mask)
|
||||
|
||||
kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8)
|
||||
|
||||
if use_gpu_opencv():
|
||||
mask = cv2.UMat(mask)
|
||||
kernel = cv2.UMat(kernel)
|
||||
|
||||
if dilation_factor > 0:
|
||||
result = cv2.dilate(mask, kernel, iter)
|
||||
else:
|
||||
result = cv2.erode(mask, kernel, iter)
|
||||
|
||||
if use_gpu_opencv():
|
||||
return result.get()
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
def dilate_masks(segmasks, dilation_factor, iter=1):
|
||||
if dilation_factor == 0:
|
||||
return segmasks
|
||||
|
||||
dilated_masks = []
|
||||
kernel = np.ones((abs(dilation_factor), abs(dilation_factor)), np.uint8)
|
||||
|
||||
if use_gpu_opencv():
|
||||
kernel = cv2.UMat(kernel)
|
||||
|
||||
for i in range(len(segmasks)):
|
||||
cv2_mask = segmasks[i][1]
|
||||
|
||||
if use_gpu_opencv():
|
||||
cv2_mask = cv2.UMat(cv2_mask)
|
||||
|
||||
if dilation_factor > 0:
|
||||
dilated_mask = cv2.dilate(cv2_mask, kernel, iter)
|
||||
else:
|
||||
dilated_mask = cv2.erode(cv2_mask, kernel, iter)
|
||||
|
||||
if use_gpu_opencv():
|
||||
dilated_mask = dilated_mask.get()
|
||||
|
||||
item = (segmasks[i][0], dilated_mask, segmasks[i][2])
|
||||
dilated_masks.append(item)
|
||||
|
||||
return dilated_masks
|
||||
|
||||
import torch.nn.functional as F
|
||||
def feather_mask(mask, thickness):
|
||||
mask = mask.permute(0, 3, 1, 2)
|
||||
|
||||
# Gaussian kernel for blurring
|
||||
kernel_size = 2 * int(thickness) + 1
|
||||
sigma = thickness / 3 # Adjust the sigma value as needed
|
||||
blur_kernel = _gaussian_kernel(kernel_size, sigma).to(mask.device, mask.dtype)
|
||||
|
||||
# Apply blur to the mask
|
||||
blurred_mask = F.conv2d(mask, blur_kernel.unsqueeze(0).unsqueeze(0), padding=thickness)
|
||||
|
||||
blurred_mask = blurred_mask.permute(0, 2, 3, 1)
|
||||
|
||||
return blurred_mask
|
||||
|
||||
def _gaussian_kernel(kernel_size, sigma):
|
||||
# Generate a 1D Gaussian kernel
|
||||
kernel = torch.exp(-(torch.arange(kernel_size) - kernel_size // 2)**2 / (2 * sigma**2))
|
||||
return kernel / kernel.sum()
|
||||
|
||||
|
||||
def tensor_gaussian_blur_mask(mask, kernel_size, sigma=10.0):
|
||||
"""Return NHWC torch.Tenser from ndim == 2 or 4 `np.ndarray` or `torch.Tensor`"""
|
||||
if isinstance(mask, np.ndarray):
|
||||
mask = torch.from_numpy(mask)
|
||||
|
||||
if mask.ndim == 2:
|
||||
mask = mask[None, ..., None]
|
||||
elif mask.ndim == 3:
|
||||
mask = mask[..., None]
|
||||
|
||||
_tensor_check_mask(mask)
|
||||
|
||||
if kernel_size <= 0:
|
||||
return mask
|
||||
|
||||
kernel_size = kernel_size*2+1
|
||||
|
||||
shortest = min(mask.shape[1], mask.shape[2])
|
||||
if shortest <= kernel_size:
|
||||
kernel_size = int(shortest/2)
|
||||
if kernel_size % 2 == 0:
|
||||
kernel_size += 1
|
||||
if kernel_size < 3:
|
||||
return mask # skip feathering
|
||||
|
||||
prev_device = mask.device
|
||||
device = comfy.model_management.get_torch_device()
|
||||
mask.to(device)
|
||||
|
||||
# apply gaussian blur
|
||||
mask = mask[:, None, ..., 0]
|
||||
blurred_mask = torchvision.transforms.GaussianBlur(kernel_size=kernel_size, sigma=sigma)(mask)
|
||||
blurred_mask = blurred_mask[:, 0, ..., None]
|
||||
|
||||
blurred_mask.to(prev_device)
|
||||
|
||||
return blurred_mask
|
||||
|
||||
|
||||
def subtract_masks(mask1, mask2):
|
||||
mask1 = mask1.cpu()
|
||||
mask2 = mask2.cpu()
|
||||
cv2_mask1 = np.array(mask1) * 255
|
||||
cv2_mask2 = np.array(mask2) * 255
|
||||
|
||||
if cv2_mask1.shape == cv2_mask2.shape:
|
||||
cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2)
|
||||
return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1)
|
||||
else:
|
||||
# do nothing - incompatible mask shape: mostly empty mask
|
||||
return mask1
|
||||
|
||||
|
||||
def add_masks(mask1, mask2):
|
||||
mask1 = mask1.cpu()
|
||||
mask2 = mask2.cpu()
|
||||
cv2_mask1 = np.array(mask1) * 255
|
||||
cv2_mask2 = np.array(mask2) * 255
|
||||
|
||||
if cv2_mask1.shape == cv2_mask2.shape:
|
||||
cv2_mask = cv2.add(cv2_mask1, cv2_mask2)
|
||||
return torch.clamp(torch.from_numpy(cv2_mask) / 255.0, min=0, max=1)
|
||||
else:
|
||||
# do nothing - incompatible mask shape: mostly empty mask
|
||||
return mask1
|
||||
|
||||
|
||||
def normalize_region(limit, startp, size):
|
||||
if startp < 0:
|
||||
new_endp = min(limit, size)
|
||||
new_startp = 0
|
||||
elif startp + size > limit:
|
||||
new_startp = max(0, limit - size)
|
||||
new_endp = limit
|
||||
else:
|
||||
new_startp = startp
|
||||
new_endp = min(limit, startp+size)
|
||||
|
||||
return int(new_startp), int(new_endp)
|
||||
|
||||
|
||||
def make_crop_region(w, h, bbox, crop_factor, crop_min_size=None):
|
||||
x1 = bbox[0]
|
||||
y1 = bbox[1]
|
||||
x2 = bbox[2]
|
||||
y2 = bbox[3]
|
||||
|
||||
bbox_w = x2 - x1
|
||||
bbox_h = y2 - y1
|
||||
|
||||
crop_w = bbox_w * crop_factor
|
||||
crop_h = bbox_h * crop_factor
|
||||
|
||||
if crop_min_size is not None:
|
||||
crop_w = max(crop_min_size, crop_w)
|
||||
crop_h = max(crop_min_size, crop_h)
|
||||
|
||||
kernel_x = x1 + bbox_w / 2
|
||||
kernel_y = y1 + bbox_h / 2
|
||||
|
||||
new_x1 = int(kernel_x - crop_w / 2)
|
||||
new_y1 = int(kernel_y - crop_h / 2)
|
||||
|
||||
# make sure position in (w,h)
|
||||
new_x1, new_x2 = normalize_region(w, new_x1, crop_w)
|
||||
new_y1, new_y2 = normalize_region(h, new_y1, crop_h)
|
||||
|
||||
return [new_x1, new_y1, new_x2, new_y2]
|
||||
|
||||
|
||||
def crop_ndarray4(npimg, crop_region):
|
||||
x1 = crop_region[0]
|
||||
y1 = crop_region[1]
|
||||
x2 = crop_region[2]
|
||||
y2 = crop_region[3]
|
||||
|
||||
cropped = npimg[:, y1:y2, x1:x2, :]
|
||||
|
||||
return cropped
|
||||
|
||||
|
||||
crop_tensor4 = crop_ndarray4
|
||||
|
||||
|
||||
def crop_ndarray3(npimg, crop_region):
|
||||
x1 = crop_region[0]
|
||||
y1 = crop_region[1]
|
||||
x2 = crop_region[2]
|
||||
y2 = crop_region[3]
|
||||
|
||||
cropped = npimg[:, y1:y2, x1:x2]
|
||||
|
||||
return cropped
|
||||
|
||||
|
||||
def crop_ndarray2(npimg, crop_region):
|
||||
x1 = crop_region[0]
|
||||
y1 = crop_region[1]
|
||||
x2 = crop_region[2]
|
||||
y2 = crop_region[3]
|
||||
|
||||
cropped = npimg[y1:y2, x1:x2]
|
||||
|
||||
return cropped
|
||||
|
||||
|
||||
def crop_image(image, crop_region):
|
||||
return crop_tensor4(image, crop_region)
|
||||
|
||||
|
||||
def to_latent_image(pixels, vae, vae_tiled_encode=False):
|
||||
x = pixels.shape[1]
|
||||
y = pixels.shape[2]
|
||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||
pixels = pixels[:, :x, :y, :]
|
||||
|
||||
start = time.time()
|
||||
if vae_tiled_encode:
|
||||
encoded = nodes.VAEEncodeTiled().encode(vae, pixels, 512, overlap=64)[0] # using default settings
|
||||
logging.info(f"[Impact Pack] vae encoded (tiled) in {time.time() - start:.1f}s")
|
||||
else:
|
||||
encoded = nodes.VAEEncode().encode(vae, pixels)[0]
|
||||
logging.info(f"[Impact Pack] vae encoded in {time.time() - start:.1f}s")
|
||||
|
||||
return encoded
|
||||
|
||||
|
||||
def empty_pil_tensor(w=64, h=64):
|
||||
return torch.zeros((1, h, w, 3), dtype=torch.float32)
|
||||
|
||||
|
||||
def make_2d_mask(mask):
|
||||
if len(mask.shape) == 4:
|
||||
return mask.squeeze(0).squeeze(0)
|
||||
|
||||
elif len(mask.shape) == 3:
|
||||
return mask.squeeze(0)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def make_3d_mask(mask):
|
||||
if len(mask.shape) == 4:
|
||||
return mask.squeeze(0)
|
||||
|
||||
elif len(mask.shape) == 2:
|
||||
return mask.unsqueeze(0)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def make_4d_mask(mask):
|
||||
if len(mask.shape) == 3:
|
||||
return mask.unsqueeze(0)
|
||||
|
||||
elif len(mask.shape) == 2:
|
||||
return mask.unsqueeze(0).unsqueeze(0)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
def is_same_device(a, b):
|
||||
a_device = torch.device(a) if isinstance(a, str) else a
|
||||
b_device = torch.device(b) if isinstance(b, str) else b
|
||||
return a_device.type == b_device.type and a_device.index == b_device.index
|
||||
|
||||
|
||||
def collect_non_reroute_nodes(node_map, links, res, node_id):
|
||||
if node_map[node_id]['type'] != 'Reroute' and node_map[node_id]['type'] != 'Reroute (rgthree)':
|
||||
res.append(node_id)
|
||||
else:
|
||||
for link in node_map[node_id]['outputs'][0]['links']:
|
||||
next_node_id = str(links[link][2])
|
||||
collect_non_reroute_nodes(node_map, links, res, next_node_id)
|
||||
|
||||
|
||||
from torchvision.transforms.functional import to_pil_image
|
||||
|
||||
|
||||
def resize_mask(mask, size):
|
||||
mask = make_4d_mask(mask)
|
||||
resized_mask = torch.nn.functional.interpolate(mask, size=size, mode='bilinear', align_corners=False)
|
||||
return resized_mask.squeeze(0)
|
||||
|
||||
|
||||
def apply_mask_alpha_to_pil(decoded_pil, mask):
|
||||
decoded_rgba = decoded_pil.convert('RGBA')
|
||||
mask_pil = to_pil_image(mask)
|
||||
decoded_rgba.putalpha(mask_pil)
|
||||
|
||||
return decoded_rgba
|
||||
|
||||
|
||||
def flatten_mask(all_masks):
|
||||
merged_mask = (all_masks[0] * 255).to(torch.uint8)
|
||||
for mask in all_masks[1:]:
|
||||
merged_mask |= (mask * 255).to(torch.uint8)
|
||||
|
||||
return merged_mask
|
||||
|
||||
|
||||
def try_install_custom_node(custom_node_url, msg):
|
||||
try:
|
||||
import cm_global
|
||||
cm_global.try_call(api='cm.try-install-custom-node',
|
||||
sender="Impact Pack", custom_node_url=custom_node_url, msg=msg)
|
||||
except Exception:
|
||||
logging.info(msg)
|
||||
logging.info("[Impact Pack] ComfyUI-Manager is outdated. The custom node installation feature is not available.")
|
||||
|
||||
|
||||
# author: Trung0246 --->
|
||||
class TautologyStr(str):
|
||||
def __ne__(self, other):
|
||||
return False
|
||||
|
||||
|
||||
class ByPassTypeTuple(tuple):
|
||||
def __getitem__(self, index):
|
||||
if index > 0:
|
||||
index = 0
|
||||
item = super().__getitem__(index)
|
||||
if isinstance(item, str):
|
||||
return TautologyStr(item)
|
||||
return item
|
||||
|
||||
|
||||
class NonListIterable:
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.data[index]
|
||||
|
||||
|
||||
def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions):
|
||||
# Iterate over the list of full folder paths
|
||||
for full_folder_path in full_folder_paths:
|
||||
# Use the provided function to add each model folder path
|
||||
folder_paths.add_model_folder_path(folder_name, full_folder_path)
|
||||
|
||||
# Now handle the extensions. If the folder name already exists, update the extensions
|
||||
if folder_name in folder_paths.folder_names_and_paths:
|
||||
# Unpack the current paths and extensions
|
||||
current_paths, current_extensions = folder_paths.folder_names_and_paths[folder_name]
|
||||
# Update the extensions set with the new extensions
|
||||
updated_extensions = current_extensions | extensions
|
||||
# Reassign the updated tuple back to the dictionary
|
||||
folder_paths.folder_names_and_paths[folder_name] = (current_paths, updated_extensions)
|
||||
else:
|
||||
# If the folder name was not present, add_model_folder_path would have added it with the last path
|
||||
# Now we just need to update the set of extensions as it would be an empty set
|
||||
# Also ensure that all paths are included (since add_model_folder_path adds only one path at a time)
|
||||
folder_paths.folder_names_and_paths[folder_name] = (full_folder_paths, extensions)
|
||||
# <---
|
||||
|
||||
# wildcard trick is taken from pythongossss's
|
||||
class AnyType(str):
|
||||
def __ne__(self, __value: object) -> bool:
|
||||
return False
|
||||
|
||||
any_typ = AnyType("*")
|
||||
1251
custom_nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py
Normal file
1251
custom_nodes/ComfyUI-Impact-Pack/modules/impact/wildcards.py
Normal file
File diff suppressed because it is too large
Load Diff
83
custom_nodes/ComfyUI-Impact-Pack/modules/thirdparty/noise_nodes.py
vendored
Normal file
83
custom_nodes/ComfyUI-Impact-Pack/modules/thirdparty/noise_nodes.py
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
# Due to the current lack of maintenance for the `ComfyUI_Noise` extension,
|
||||
# I have copied the code from the applied PR.
|
||||
# https://github.com/BlenderNeko/ComfyUI_Noise/pull/13/files
|
||||
|
||||
import comfy
|
||||
import torch
|
||||
|
||||
class Unsampler:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"model": ("MODEL",),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"end_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
|
||||
"normalize": (["disable", "enable"],),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"latent_image": ("LATENT",),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "unsampler"
|
||||
|
||||
CATEGORY = "sampling"
|
||||
|
||||
def unsampler(self, model, cfg, sampler_name, steps, end_at_step, scheduler, normalize, positive, negative,
|
||||
latent_image):
|
||||
normalize = normalize == "enable"
|
||||
device = comfy.model_management.get_torch_device()
|
||||
latent = latent_image
|
||||
latent_image = latent["samples"]
|
||||
|
||||
end_at_step = min(end_at_step, steps - 1)
|
||||
end_at_step = steps - end_at_step
|
||||
|
||||
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
|
||||
noise_mask = None
|
||||
if "noise_mask" in latent:
|
||||
noise_mask = comfy.sampler_helpers.prepare_mask(latent["noise_mask"], noise.shape, device)
|
||||
|
||||
noise = noise.to(device)
|
||||
latent_image = latent_image.to(device)
|
||||
|
||||
conds0 = \
|
||||
{"positive": comfy.sampler_helpers.convert_cond(positive),
|
||||
"negative": comfy.sampler_helpers.convert_cond(negative)}
|
||||
|
||||
conds = {}
|
||||
for k in conds0:
|
||||
conds[k] = list(map(lambda a: a.copy(), conds0[k]))
|
||||
|
||||
models, inference_memory = comfy.sampler_helpers.get_additional_models(conds, model.model_dtype())
|
||||
|
||||
comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise.shape) + inference_memory)
|
||||
|
||||
sampler = comfy.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name,
|
||||
scheduler=scheduler, denoise=1.0, model_options=model.model_options)
|
||||
|
||||
sigmas = sampler.sigmas.flip(0) + 0.0001
|
||||
|
||||
pbar = comfy.utils.ProgressBar(steps)
|
||||
|
||||
def callback(step, x0, x, total_steps):
|
||||
pbar.update_absolute(step + 1, total_steps)
|
||||
|
||||
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image,
|
||||
force_full_denoise=False, denoise_mask=noise_mask, sigmas=sigmas, start_step=0,
|
||||
last_step=end_at_step, callback=callback)
|
||||
if normalize:
|
||||
# technically doesn't normalize because unsampling is not guaranteed to end at a std given by the schedule
|
||||
samples -= samples.mean()
|
||||
samples /= samples.std()
|
||||
samples = samples.cpu()
|
||||
|
||||
comfy.sampler_helpers.cleanup_additional_models(models)
|
||||
|
||||
out = latent.copy()
|
||||
out["samples"] = samples
|
||||
return (out,)
|
||||
|
||||
Reference in New Issue
Block a user