Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled

Includes 30 custom nodes committed directly, 7 Civitai-exclusive
loras stored via Git LFS, and a setup script that installs all
dependencies and downloads HuggingFace-hosted models on vast.ai.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-09 00:55:26 +00:00
parent 2b70ab9ad0
commit f09734b0ee
2274 changed files with 748556 additions and 3 deletions

View File

@@ -0,0 +1,219 @@
#credit to huchenlei for this module
#from https://github.com/huchenlei/ComfyUI-layerdiffuse
import torch
import comfy.model_management
import comfy.lora
import copy
from typing import Optional
from enum import Enum
from comfy.utils import load_torch_file
from comfy.conds import CONDRegular
from comfy_extras.nodes_compositing import JoinImageWithAlpha
try:
from .model import ModelPatcher, TransparentVAEDecoder, calculate_weight_adjust_channel
except:
ModelPatcher, TransparentVAEDecoder, calculate_weight_adjust_channel = None, None, None
from .attension_sharing import AttentionSharingPatcher
from ...config import LAYER_DIFFUSION, LAYER_DIFFUSION_DIR, LAYER_DIFFUSION_VAE
from ...libs.utils import to_lora_patch_dict, get_local_filepath, get_sd_version
load_layer_model_state_dict = load_torch_file
class LayerMethod(Enum):
FG_ONLY_ATTN = "Attention Injection"
FG_ONLY_CONV = "Conv Injection"
FG_TO_BLEND = "Foreground"
FG_BLEND_TO_BG = "Foreground to Background"
BG_TO_BLEND = "Background"
BG_BLEND_TO_FG = "Background to Foreground"
EVERYTHING = "Everything"
class LayerDiffuse:
def __init__(self) -> None:
self.vae_transparent_decoder = None
self.frames = 1
def get_layer_diffusion_method(self, method, has_blend_latent):
method = LayerMethod(method)
if method == LayerMethod.BG_TO_BLEND and has_blend_latent:
method = LayerMethod.BG_BLEND_TO_FG
elif method == LayerMethod.FG_TO_BLEND and has_blend_latent:
method = LayerMethod.FG_BLEND_TO_BG
return method
def apply_layer_c_concat(self, cond, uncond, c_concat):
def write_c_concat(cond):
new_cond = []
for t in cond:
n = [t[0], t[1].copy()]
if "model_conds" not in n[1]:
n[1]["model_conds"] = {}
n[1]["model_conds"]["c_concat"] = CONDRegular(c_concat)
new_cond.append(n)
return new_cond
return (write_c_concat(cond), write_c_concat(uncond))
def apply_layer_diffusion(self, model, method, weight, samples, blend_samples, positive, negative, image=None, additional_cond=(None, None, None)):
control_img: Optional[torch.TensorType] = None
sd_version = get_sd_version(model)
model_url = LAYER_DIFFUSION[method.value][sd_version]["model_url"]
if image is not None:
image = image.movedim(-1, 1)
try:
if hasattr(comfy.lora, "calculate_weight"):
comfy.lora.calculate_weight = calculate_weight_adjust_channel(comfy.lora.calculate_weight)
else:
ModelPatcher.calculate_weight = calculate_weight_adjust_channel(ModelPatcher.calculate_weight)
except:
pass
if method in [LayerMethod.FG_ONLY_CONV, LayerMethod.FG_ONLY_ATTN] and sd_version == 'sd1':
self.frames = 1
elif method in [LayerMethod.BG_TO_BLEND, LayerMethod.FG_TO_BLEND, LayerMethod.BG_BLEND_TO_FG, LayerMethod.FG_BLEND_TO_BG] and sd_version == 'sd1':
self.frames = 2
batch_size, _, height, width = samples['samples'].shape
if batch_size % 2 != 0:
raise Exception(f"The batch size should be a multiple of 2. 批次大小需为2的倍数")
control_img = image
elif method == LayerMethod.EVERYTHING and sd_version == 'sd1':
batch_size, _, height, width = samples['samples'].shape
self.frames = 3
if batch_size % 3 != 0:
raise Exception(f"The batch size should be a multiple of 3. 批次大小需为3的倍数")
if model_url is None:
raise Exception(f"{method.value} is not supported for {sd_version} model")
model_path = get_local_filepath(model_url, LAYER_DIFFUSION_DIR)
layer_lora_state_dict = load_layer_model_state_dict(model_path)
work_model = model.clone()
if sd_version == 'sd1':
patcher = AttentionSharingPatcher(
work_model, self.frames, use_control=control_img is not None
)
patcher.load_state_dict(layer_lora_state_dict, strict=True)
if control_img is not None:
patcher.set_control(control_img)
else:
layer_lora_patch_dict = to_lora_patch_dict(layer_lora_state_dict)
work_model.add_patches(layer_lora_patch_dict, weight)
# cond_contact
if method in [LayerMethod.FG_ONLY_ATTN, LayerMethod.FG_ONLY_CONV]:
samp_model = work_model
elif sd_version == 'sdxl':
if method in [LayerMethod.BG_TO_BLEND, LayerMethod.FG_TO_BLEND]:
c_concat = model.model.latent_format.process_in(samples["samples"])
else:
c_concat = model.model.latent_format.process_in(torch.cat([samples["samples"], blend_samples["samples"]], dim=1))
samp_model, positive, negative = (work_model,) + self.apply_layer_c_concat(positive, negative, c_concat)
elif sd_version == 'sd1':
if method in [LayerMethod.BG_TO_BLEND, LayerMethod.BG_BLEND_TO_FG]:
additional_cond = (additional_cond[0], None)
elif method in [LayerMethod.FG_TO_BLEND, LayerMethod.FG_BLEND_TO_BG]:
additional_cond = (additional_cond[1], None)
work_model.model_options.setdefault("transformer_options", {})
work_model.model_options["transformer_options"]["cond_overwrite"] = [
cond[0][0] if cond is not None else None
for cond in additional_cond
]
samp_model = work_model
return samp_model, positive, negative
def join_image_with_alpha(self, image, alpha):
out = image.movedim(-1, 1)
if out.shape[1] == 3: # RGB
out = torch.cat([out, torch.ones_like(out[:, :1, :, :])], dim=1)
for i in range(out.shape[0]):
out[i, 3, :, :] = alpha
return out.movedim(1, -1)
def image_to_alpha(self, image, latent):
pixel = image.movedim(-1, 1) # [B, H, W, C] => [B, C, H, W]
decoded = []
sub_batch_size = 16
for start_idx in range(0, latent.shape[0], sub_batch_size):
decoded.append(
self.vae_transparent_decoder.decode_pixel(
pixel[start_idx: start_idx + sub_batch_size],
latent[start_idx: start_idx + sub_batch_size],
)
)
pixel_with_alpha = torch.cat(decoded, dim=0)
# [B, C, H, W] => [B, H, W, C]
pixel_with_alpha = pixel_with_alpha.movedim(1, -1)
image = pixel_with_alpha[..., 1:]
alpha = pixel_with_alpha[..., 0]
alpha = 1.0 - alpha
try:
new_images, = JoinImageWithAlpha().execute(image, alpha)
except:
new_images, = JoinImageWithAlpha().join_image_with_alpha(image, alpha)
return new_images, alpha
def make_3d_mask(self, mask):
if len(mask.shape) == 4:
return mask.squeeze(0)
elif len(mask.shape) == 2:
return mask.unsqueeze(0)
return mask
def masks_to_list(self, masks):
if masks is None:
empty_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
return ([empty_mask],)
res = []
for mask in masks:
res.append(mask)
return [self.make_3d_mask(x) for x in res]
def layer_diffusion_decode(self, layer_diffusion_method, latent, blend_samples, samp_images, model):
alpha = []
if layer_diffusion_method is not None:
sd_version = get_sd_version(model)
if sd_version not in ['sdxl', 'sd1']:
raise Exception(f"Only SDXL and SD1.5 model supported for Layer Diffusion")
method = self.get_layer_diffusion_method(layer_diffusion_method, blend_samples is not None)
sd15_allow = True if sd_version == 'sd1' and method in [LayerMethod.FG_ONLY_ATTN, LayerMethod.EVERYTHING, LayerMethod.BG_TO_BLEND, LayerMethod.BG_BLEND_TO_FG] else False
sdxl_allow = True if sd_version == 'sdxl' and method in [LayerMethod.FG_ONLY_CONV, LayerMethod.FG_ONLY_ATTN, LayerMethod.BG_BLEND_TO_FG] else False
if sdxl_allow or sd15_allow:
if self.vae_transparent_decoder is None:
model_url = LAYER_DIFFUSION_VAE['decode'][sd_version]["model_url"]
if model_url is None:
raise Exception(f"{method.value} is not supported for {sd_version} model")
decoder_file = get_local_filepath(model_url, LAYER_DIFFUSION_DIR)
self.vae_transparent_decoder = TransparentVAEDecoder(
load_torch_file(decoder_file),
device=comfy.model_management.get_torch_device(),
dtype=(torch.float16 if comfy.model_management.should_use_fp16() else torch.float32),
)
if method in [LayerMethod.EVERYTHING, LayerMethod.BG_BLEND_TO_FG, LayerMethod.BG_TO_BLEND]:
new_images = []
sliced_samples = copy.copy({"samples": latent})
for index in range(len(samp_images)):
if index % self.frames == 0:
img = samp_images[index::self.frames]
alpha_images, _alpha = self.image_to_alpha(img, sliced_samples["samples"][index::self.frames])
alpha.append(self.make_3d_mask(_alpha[0]))
new_images.append(alpha_images[0])
else:
new_images.append(samp_images[index])
else:
new_images, alpha = self.image_to_alpha(samp_images, latent)
else:
new_images = samp_images
else:
new_images = samp_images
return (new_images, samp_images, alpha)

View File

@@ -0,0 +1,359 @@
# Currently only sd15
import functools
import torch
import einops
from comfy import model_management, utils
from comfy.ldm.modules.attention import optimized_attention
module_mapping_sd15 = {
0: "input_blocks.1.1.transformer_blocks.0.attn1",
1: "input_blocks.1.1.transformer_blocks.0.attn2",
2: "input_blocks.2.1.transformer_blocks.0.attn1",
3: "input_blocks.2.1.transformer_blocks.0.attn2",
4: "input_blocks.4.1.transformer_blocks.0.attn1",
5: "input_blocks.4.1.transformer_blocks.0.attn2",
6: "input_blocks.5.1.transformer_blocks.0.attn1",
7: "input_blocks.5.1.transformer_blocks.0.attn2",
8: "input_blocks.7.1.transformer_blocks.0.attn1",
9: "input_blocks.7.1.transformer_blocks.0.attn2",
10: "input_blocks.8.1.transformer_blocks.0.attn1",
11: "input_blocks.8.1.transformer_blocks.0.attn2",
12: "output_blocks.3.1.transformer_blocks.0.attn1",
13: "output_blocks.3.1.transformer_blocks.0.attn2",
14: "output_blocks.4.1.transformer_blocks.0.attn1",
15: "output_blocks.4.1.transformer_blocks.0.attn2",
16: "output_blocks.5.1.transformer_blocks.0.attn1",
17: "output_blocks.5.1.transformer_blocks.0.attn2",
18: "output_blocks.6.1.transformer_blocks.0.attn1",
19: "output_blocks.6.1.transformer_blocks.0.attn2",
20: "output_blocks.7.1.transformer_blocks.0.attn1",
21: "output_blocks.7.1.transformer_blocks.0.attn2",
22: "output_blocks.8.1.transformer_blocks.0.attn1",
23: "output_blocks.8.1.transformer_blocks.0.attn2",
24: "output_blocks.9.1.transformer_blocks.0.attn1",
25: "output_blocks.9.1.transformer_blocks.0.attn2",
26: "output_blocks.10.1.transformer_blocks.0.attn1",
27: "output_blocks.10.1.transformer_blocks.0.attn2",
28: "output_blocks.11.1.transformer_blocks.0.attn1",
29: "output_blocks.11.1.transformer_blocks.0.attn2",
30: "middle_block.1.transformer_blocks.0.attn1",
31: "middle_block.1.transformer_blocks.0.attn2",
}
def compute_cond_mark(cond_or_uncond, sigmas):
cond_or_uncond_size = int(sigmas.shape[0])
cond_mark = []
for cx in cond_or_uncond:
cond_mark += [cx] * cond_or_uncond_size
cond_mark = torch.Tensor(cond_mark).to(sigmas)
return cond_mark
class LoRALinearLayer(torch.nn.Module):
def __init__(self, in_features: int, out_features: int, rank: int = 256, org=None):
super().__init__()
self.down = torch.nn.Linear(in_features, rank, bias=False)
self.up = torch.nn.Linear(rank, out_features, bias=False)
self.org = [org]
def forward(self, h):
org_weight = self.org[0].weight.to(h)
org_bias = self.org[0].bias.to(h) if self.org[0].bias is not None else None
down_weight = self.down.weight
up_weight = self.up.weight
final_weight = org_weight + torch.mm(up_weight, down_weight)
return torch.nn.functional.linear(h, final_weight, org_bias)
class AttentionSharingUnit(torch.nn.Module):
# `transformer_options` passed to the most recent BasicTransformerBlock.forward
# call.
transformer_options: dict = {}
def __init__(self, module, frames=2, use_control=True, rank=256):
super().__init__()
self.heads = module.heads
self.frames = frames
self.original_module = [module]
q_in_channels, q_out_channels = (
module.to_q.in_features,
module.to_q.out_features,
)
k_in_channels, k_out_channels = (
module.to_k.in_features,
module.to_k.out_features,
)
v_in_channels, v_out_channels = (
module.to_v.in_features,
module.to_v.out_features,
)
o_in_channels, o_out_channels = (
module.to_out[0].in_features,
module.to_out[0].out_features,
)
hidden_size = k_out_channels
self.to_q_lora = [
LoRALinearLayer(q_in_channels, q_out_channels, rank, module.to_q)
for _ in range(self.frames)
]
self.to_k_lora = [
LoRALinearLayer(k_in_channels, k_out_channels, rank, module.to_k)
for _ in range(self.frames)
]
self.to_v_lora = [
LoRALinearLayer(v_in_channels, v_out_channels, rank, module.to_v)
for _ in range(self.frames)
]
self.to_out_lora = [
LoRALinearLayer(o_in_channels, o_out_channels, rank, module.to_out[0])
for _ in range(self.frames)
]
self.to_q_lora = torch.nn.ModuleList(self.to_q_lora)
self.to_k_lora = torch.nn.ModuleList(self.to_k_lora)
self.to_v_lora = torch.nn.ModuleList(self.to_v_lora)
self.to_out_lora = torch.nn.ModuleList(self.to_out_lora)
self.temporal_i = torch.nn.Linear(
in_features=hidden_size, out_features=hidden_size
)
self.temporal_n = torch.nn.LayerNorm(
hidden_size, elementwise_affine=True, eps=1e-6
)
self.temporal_q = torch.nn.Linear(
in_features=hidden_size, out_features=hidden_size
)
self.temporal_k = torch.nn.Linear(
in_features=hidden_size, out_features=hidden_size
)
self.temporal_v = torch.nn.Linear(
in_features=hidden_size, out_features=hidden_size
)
self.temporal_o = torch.nn.Linear(
in_features=hidden_size, out_features=hidden_size
)
self.control_convs = None
if use_control:
self.control_convs = [
torch.nn.Sequential(
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
torch.nn.Conv2d(256, hidden_size, kernel_size=1),
)
for _ in range(self.frames)
]
self.control_convs = torch.nn.ModuleList(self.control_convs)
self.control_signals = None
def forward(self, h, context=None, value=None):
transformer_options = self.transformer_options
modified_hidden_states = einops.rearrange(
h, "(b f) d c -> f b d c", f=self.frames
)
if self.control_convs is not None:
context_dim = int(modified_hidden_states.shape[2])
control_outs = []
for f in range(self.frames):
control_signal = self.control_signals[context_dim].to(
modified_hidden_states
)
control = self.control_convs[f](control_signal)
control = einops.rearrange(control, "b c h w -> b (h w) c")
control_outs.append(control)
control_outs = torch.stack(control_outs, dim=0)
modified_hidden_states = modified_hidden_states + control_outs.to(
modified_hidden_states
)
if context is None:
framed_context = modified_hidden_states
else:
framed_context = einops.rearrange(
context, "(b f) d c -> f b d c", f=self.frames
)
framed_cond_mark = einops.rearrange(
compute_cond_mark(
transformer_options["cond_or_uncond"],
transformer_options["sigmas"],
),
"(b f) -> f b",
f=self.frames,
).to(modified_hidden_states)
attn_outs = []
for f in range(self.frames):
fcf = framed_context[f]
if context is not None:
cond_overwrite = transformer_options.get("cond_overwrite", [])
if len(cond_overwrite) > f:
cond_overwrite = cond_overwrite[f]
else:
cond_overwrite = None
if cond_overwrite is not None:
cond_mark = framed_cond_mark[f][:, None, None]
fcf = cond_overwrite.to(fcf) * (1.0 - cond_mark) + fcf * cond_mark
q = self.to_q_lora[f](modified_hidden_states[f])
k = self.to_k_lora[f](fcf)
v = self.to_v_lora[f](fcf)
o = optimized_attention(q, k, v, self.heads)
o = self.to_out_lora[f](o)
o = self.original_module[0].to_out[1](o)
attn_outs.append(o)
attn_outs = torch.stack(attn_outs, dim=0)
modified_hidden_states = modified_hidden_states + attn_outs.to(
modified_hidden_states
)
modified_hidden_states = einops.rearrange(
modified_hidden_states, "f b d c -> (b f) d c", f=self.frames
)
x = modified_hidden_states
x = self.temporal_n(x)
x = self.temporal_i(x)
d = x.shape[1]
x = einops.rearrange(x, "(b f) d c -> (b d) f c", f=self.frames)
q = self.temporal_q(x)
k = self.temporal_k(x)
v = self.temporal_v(x)
x = optimized_attention(q, k, v, self.heads)
x = self.temporal_o(x)
x = einops.rearrange(x, "(b d) f c -> (b f) d c", d=d)
modified_hidden_states = modified_hidden_states + x
return modified_hidden_states - h
@classmethod
def hijack_transformer_block(cls):
def register_get_transformer_options(func):
@functools.wraps(func)
def forward(self, x, context=None, transformer_options={}):
cls.transformer_options = transformer_options
return func(self, x, context, transformer_options)
return forward
from comfy.ldm.modules.attention import BasicTransformerBlock
BasicTransformerBlock.forward = register_get_transformer_options(
BasicTransformerBlock.forward
)
AttentionSharingUnit.hijack_transformer_block()
class AdditionalAttentionCondsEncoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.blocks_0 = torch.nn.Sequential(
torch.nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
) # 64*64*256
self.blocks_1 = torch.nn.Sequential(
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
) # 32*32*256
self.blocks_2 = torch.nn.Sequential(
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
) # 16*16*256
self.blocks_3 = torch.nn.Sequential(
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=2),
torch.nn.SiLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
torch.nn.SiLU(),
) # 8*8*256
self.blks = [self.blocks_0, self.blocks_1, self.blocks_2, self.blocks_3]
def __call__(self, h):
results = {}
for b in self.blks:
h = b(h)
results[int(h.shape[2]) * int(h.shape[3])] = h
return results
class HookerLayers(torch.nn.Module):
def __init__(self, layer_list):
super().__init__()
self.layers = torch.nn.ModuleList(layer_list)
class AttentionSharingPatcher(torch.nn.Module):
def __init__(self, unet, frames=2, use_control=True, rank=256):
super().__init__()
model_management.unload_model_clones(unet)
units = []
for i in range(32):
real_key = module_mapping_sd15[i]
attn_module = utils.get_attr(unet.model.diffusion_model, real_key)
u = AttentionSharingUnit(
attn_module, frames=frames, use_control=use_control, rank=rank
)
units.append(u)
unet.add_object_patch("diffusion_model." + real_key, u)
self.hookers = HookerLayers(units)
if use_control:
self.kwargs_encoder = AdditionalAttentionCondsEncoder()
else:
self.kwargs_encoder = None
self.dtype = torch.float32
if model_management.should_use_fp16(model_management.get_torch_device()):
self.dtype = torch.float16
self.hookers.half()
return
def set_control(self, img):
img = img.cpu().float() * 2.0 - 1.0
signals = self.kwargs_encoder(img)
for m in self.hookers.layers:
m.control_signals = signals
return

View File

@@ -0,0 +1,390 @@
import torch.nn as nn
import torch
import cv2
import numpy as np
import comfy.model_management
from comfy.model_patcher import ModelPatcher
from tqdm import tqdm
from typing import Optional, Tuple
from ...libs.utils import install_package
from packaging import version
try:
install_package("diffusers", "0.27.2", True, "0.25.0")
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers import __version__
if __version__:
if version.parse(__version__) < version.parse("0.26.0"):
from diffusers.models.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
else:
from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
import functools
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
class LatentTransparencyOffsetEncoder(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.blocks = torch.nn.Sequential(
torch.nn.Conv2d(4, 32, kernel_size=3, padding=1, stride=1),
nn.SiLU(),
torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1),
nn.SiLU(),
torch.nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2),
nn.SiLU(),
torch.nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=1),
nn.SiLU(),
torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=2),
nn.SiLU(),
torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1),
nn.SiLU(),
torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2),
nn.SiLU(),
torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1),
nn.SiLU(),
zero_module(torch.nn.Conv2d(256, 4, kernel_size=3, padding=1, stride=1)),
)
def __call__(self, x):
return self.blocks(x)
# 1024 * 1024 * 3 -> 16 * 16 * 512 -> 1024 * 1024 * 3
class UNet1024(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
in_channels: int = 3,
out_channels: int = 3,
down_block_types: Tuple[str] = (
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
),
up_block_types: Tuple[str] = (
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
),
block_out_channels: Tuple[int] = (32, 32, 64, 128, 256, 512, 512),
layers_per_block: int = 2,
mid_block_scale_factor: float = 1,
downsample_padding: int = 1,
downsample_type: str = "conv",
upsample_type: str = "conv",
dropout: float = 0.0,
act_fn: str = "silu",
attention_head_dim: Optional[int] = 8,
norm_num_groups: int = 4,
norm_eps: float = 1e-5,
):
super().__init__()
# input
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)
)
self.latent_conv_in = zero_module(
nn.Conv2d(4, block_out_channels[2], kernel_size=1)
)
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=None,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attention_head_dim=(
attention_head_dim
if attention_head_dim is not None
else output_channel
),
downsample_padding=downsample_padding,
resnet_time_scale_shift="default",
downsample_type=downsample_type,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock2D(
in_channels=block_out_channels[-1],
temb_channels=None,
dropout=dropout,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift="default",
attention_head_dim=(
attention_head_dim
if attention_head_dim is not None
else block_out_channels[-1]
),
resnet_groups=norm_num_groups,
attn_groups=None,
add_attention=True,
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[
min(i + 1, len(block_out_channels) - 1)
]
is_final_block = i == len(block_out_channels) - 1
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=None,
add_upsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
attention_head_dim=(
attention_head_dim
if attention_head_dim is not None
else output_channel
),
resnet_time_scale_shift="default",
upsample_type=upsample_type,
dropout=dropout,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=3, padding=1
)
def forward(self, x, latent):
sample_latent = self.latent_conv_in(latent)
sample = self.conv_in(x)
emb = None
down_block_res_samples = (sample,)
for i, downsample_block in enumerate(self.down_blocks):
if i == 3:
sample = sample + sample_latent
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
sample = self.mid_block(sample, emb)
for upsample_block in self.up_blocks:
res_samples = down_block_res_samples[-len(upsample_block.resnets):]
down_block_res_samples = down_block_res_samples[
: -len(upsample_block.resnets)
]
sample = upsample_block(sample, res_samples, emb)
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
return sample
def checkerboard(shape):
return np.indices(shape).sum(axis=0) % 2
def fill_checkerboard_bg(y: torch.Tensor) -> torch.Tensor:
alpha = y[..., :1]
fg = y[..., 1:]
B, H, W, C = fg.shape
cb = checkerboard(shape=(H // 64, W // 64))
cb = cv2.resize(cb, (W, H), interpolation=cv2.INTER_NEAREST)
cb = (0.5 + (cb - 0.5) * 0.1)[None, ..., None]
cb = torch.from_numpy(cb).to(fg)
vis = fg * alpha + cb * (1 - alpha)
return vis
class TransparentVAEDecoder:
def __init__(self, sd, device, dtype):
self.load_device = device
self.dtype = dtype
model = UNet1024(in_channels=3, out_channels=4)
model.load_state_dict(sd, strict=True)
model.to(self.load_device, dtype=self.dtype)
model.eval()
self.model = model
@torch.no_grad()
def estimate_single_pass(self, pixel, latent):
y = self.model(pixel, latent)
return y
@torch.no_grad()
def estimate_augmented(self, pixel, latent):
args = [
[False, 0],
[False, 1],
[False, 2],
[False, 3],
[True, 0],
[True, 1],
[True, 2],
[True, 3],
]
result = []
for flip, rok in tqdm(args):
feed_pixel = pixel.clone()
feed_latent = latent.clone()
if flip:
feed_pixel = torch.flip(feed_pixel, dims=(3,))
feed_latent = torch.flip(feed_latent, dims=(3,))
feed_pixel = torch.rot90(feed_pixel, k=rok, dims=(2, 3))
feed_latent = torch.rot90(feed_latent, k=rok, dims=(2, 3))
eps = self.estimate_single_pass(feed_pixel, feed_latent).clip(0, 1)
eps = torch.rot90(eps, k=-rok, dims=(2, 3))
if flip:
eps = torch.flip(eps, dims=(3,))
result += [eps]
result = torch.stack(result, dim=0)
median = torch.median(result, dim=0).values
return median
@torch.no_grad()
def decode_pixel(
self, pixel: torch.TensorType, latent: torch.TensorType
) -> torch.TensorType:
# pixel.shape = [B, C=3, H, W]
assert pixel.shape[1] == 3
pixel_device = pixel.device
pixel_dtype = pixel.dtype
pixel = pixel.to(device=self.load_device, dtype=self.dtype)
latent = latent.to(device=self.load_device, dtype=self.dtype)
# y.shape = [B, C=4, H, W]
y = self.estimate_augmented(pixel, latent)
y = y.clip(0, 1)
assert y.shape[1] == 4
# Restore image to original device of input image.
return y.to(pixel_device, dtype=pixel_dtype)
def calculate_weight_adjust_channel(func):
"""Patches ComfyUI's LoRA weight application to accept multi-channel inputs."""
@functools.wraps(func)
def calculate_weight(
patches, weight: torch.Tensor, key: str, intermediate_type=torch.float32
) -> torch.Tensor:
weight = func(patches, weight, key, intermediate_type)
for p in patches:
alpha = p[0]
v = p[1]
# The recursion call should be handled in the main func call.
if isinstance(v, list):
continue
if len(v) == 1:
patch_type = "diff"
elif len(v) == 2:
patch_type = v[0]
v = v[1]
if patch_type == "diff":
w1 = v[0]
if all(
(
alpha != 0.0,
w1.shape != weight.shape,
w1.ndim == weight.ndim == 4,
)
):
new_shape = [max(n, m) for n, m in zip(weight.shape, w1.shape)]
print(
f"Merged with {key} channel changed from {weight.shape} to {new_shape}"
)
new_diff = alpha * comfy.model_management.cast_to_device(
w1, weight.device, weight.dtype
)
new_weight = torch.zeros(size=new_shape).to(weight)
new_weight[
: weight.shape[0],
: weight.shape[1],
: weight.shape[2],
: weight.shape[3],
] = weight
new_weight[
: new_diff.shape[0],
: new_diff.shape[1],
: new_diff.shape[2],
: new_diff.shape[3],
] += new_diff
new_weight = new_weight.contiguous().clone()
weight = new_weight
return weight
return calculate_weight
except ImportError:
ModelMixin = None
ConfigMixin = None
TransparentVAEDecoder = None
calculate_weight_adjust_channel = None
print("\33[33mModule 'diffusers' load failed. If you don't have it installed, do it:\033[0m")
print("\33[33mpip install diffusers\033[0m")