Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
1358
custom_nodes/ComfyUI-Easy-Use/py/nodes/adapter.py
Normal file
1358
custom_nodes/ComfyUI-Easy-Use/py/nodes/adapter.py
Normal file
File diff suppressed because it is too large
Load Diff
139
custom_nodes/ComfyUI-Easy-Use/py/nodes/api.py
Normal file
139
custom_nodes/ComfyUI-Easy-Use/py/nodes/api.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import comfy.utils
|
||||
from ..libs.api.fluxai import fluxaiAPI
|
||||
from ..libs.api.bizyair import bizyairAPI, encode_data
|
||||
from nodes import NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS
|
||||
|
||||
class joyCaption2API:
|
||||
API_URL = f"/supernode/joycaption2"
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"do_sample": ([True, False],),
|
||||
"temperature": (
|
||||
"FLOAT",
|
||||
{
|
||||
"default": 0.5,
|
||||
"min": 0.0,
|
||||
"max": 2.0,
|
||||
"step": 0.01,
|
||||
"round": 0.001,
|
||||
"display": "number",
|
||||
},
|
||||
),
|
||||
"max_tokens": (
|
||||
"INT",
|
||||
{
|
||||
"default": 256,
|
||||
"min": 16,
|
||||
"max": 512,
|
||||
"step": 16,
|
||||
"display": "number",
|
||||
},
|
||||
),
|
||||
"caption_type": (
|
||||
[
|
||||
"Descriptive",
|
||||
"Descriptive (Informal)",
|
||||
"Training Prompt",
|
||||
"MidJourney",
|
||||
"Booru tag list",
|
||||
"Booru-like tag list",
|
||||
"Art Critic",
|
||||
"Product Listing",
|
||||
"Social Media Post",
|
||||
],
|
||||
),
|
||||
"caption_length": (
|
||||
["any", "very short", "short", "medium-length", "long", "very long"]
|
||||
+ [str(i) for i in range(20, 261, 10)],
|
||||
),
|
||||
"extra_options": (
|
||||
"STRING",
|
||||
{
|
||||
"placeholder": "Extra options(e.g):\nIf there is a person/character in the image you must refer to them as {name}.",
|
||||
"tooltip": "Extra options for the model",
|
||||
"multiline": True,
|
||||
},
|
||||
),
|
||||
"name_input": (
|
||||
"STRING",
|
||||
{
|
||||
"default": "",
|
||||
"tooltip": "Name input is only used if an Extra Option is selected that requires it.",
|
||||
},
|
||||
),
|
||||
"custom_prompt": (
|
||||
"STRING",
|
||||
{
|
||||
"default": "",
|
||||
"multiline": True,
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional":{
|
||||
"apikey_override": ("STRING", {"default": "", "forceInput": True, "tooltip":"Override the API key in the local config"}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("caption",)
|
||||
|
||||
FUNCTION = "joycaption"
|
||||
OUTPUT_NODE = False
|
||||
|
||||
CATEGORY = "EasyUse/API"
|
||||
|
||||
def joycaption(
|
||||
self,
|
||||
image,
|
||||
do_sample,
|
||||
temperature,
|
||||
max_tokens,
|
||||
caption_type,
|
||||
caption_length,
|
||||
extra_options,
|
||||
name_input,
|
||||
custom_prompt,
|
||||
apikey_override=None
|
||||
):
|
||||
pbar = comfy.utils.ProgressBar(100)
|
||||
pbar.update_absolute(10)
|
||||
SIZE_LIMIT = 1536
|
||||
_, w, h, c = image.shape
|
||||
if w > SIZE_LIMIT or h > SIZE_LIMIT:
|
||||
node_class = ALL_NODE_CLASS_MAPPINGS['easy imageScaleDownToSize']
|
||||
image, = node_class().image_scale_down_to_size(image, SIZE_LIMIT, True)
|
||||
|
||||
payload = {
|
||||
"image": None,
|
||||
"do_sample": do_sample == True,
|
||||
"temperature": temperature,
|
||||
"max_new_tokens": max_tokens,
|
||||
"caption_type": caption_type,
|
||||
"caption_length": caption_length,
|
||||
"extra_options": [extra_options],
|
||||
"name_input": name_input,
|
||||
"custom_prompt": custom_prompt,
|
||||
}
|
||||
|
||||
pbar.update_absolute(30)
|
||||
caption = bizyairAPI.joyCaption(payload, image, apikey_override, API_URL=self.API_URL)
|
||||
|
||||
pbar.update_absolute(100)
|
||||
return (caption,)
|
||||
|
||||
class joyCaption3API(joyCaption2API):
|
||||
API_URL = f"/supernode/joycaption3"
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy joyCaption2API": joyCaption2API,
|
||||
"easy joyCaption3API": joyCaption3API,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy joyCaption2API": "JoyCaption2 (BizyAIR)",
|
||||
"easy joyCaption3API": "JoyCaption3 (BizyAIR)",
|
||||
}
|
||||
521
custom_nodes/ComfyUI-Easy-Use/py/nodes/deprecated.py
Normal file
521
custom_nodes/ComfyUI-Easy-Use/py/nodes/deprecated.py
Normal file
@@ -0,0 +1,521 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import json
|
||||
import torch
|
||||
import folder_paths
|
||||
import comfy
|
||||
import comfy.model_management
|
||||
from PIL import Image
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
from nodes import ConditioningSetMask, RepeatLatentBatch
|
||||
from comfy_extras.nodes_mask import LatentCompositeMasked
|
||||
from ..libs.log import log_node_info, log_node_warn
|
||||
from ..libs.adv_encode import advanced_encode
|
||||
from ..libs.utils import AlwaysEqualProxy
|
||||
any_type = AlwaysEqualProxy("*")
|
||||
|
||||
|
||||
class If:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"any": (any_type,),
|
||||
"if": (any_type,),
|
||||
"else": (any_type,),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = (any_type,)
|
||||
RETURN_NAMES = ("?",)
|
||||
FUNCTION = "execute"
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
|
||||
def execute(self, *args, **kwargs):
|
||||
return (kwargs['if'] if kwargs['any'] else kwargs['else'],)
|
||||
|
||||
|
||||
class poseEditor:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"image": ("STRING", {"default": ""})
|
||||
}}
|
||||
|
||||
FUNCTION = "output_pose"
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
RETURN_TYPES = ()
|
||||
RETURN_NAMES = ()
|
||||
|
||||
def output_pose(self, image):
|
||||
return ()
|
||||
|
||||
|
||||
class imageToMask:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"image": ("IMAGE",),
|
||||
"channel": (['red', 'green', 'blue'],),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MASK",)
|
||||
FUNCTION = "convert"
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
|
||||
def convert_to_single_channel(self, image, channel='red'):
|
||||
from PIL import Image
|
||||
# Convert to RGB mode to access individual channels
|
||||
image = image.convert('RGB')
|
||||
|
||||
# Extract the desired channel and convert to greyscale
|
||||
if channel == 'red':
|
||||
channel_img = image.split()[0].convert('L')
|
||||
elif channel == 'green':
|
||||
channel_img = image.split()[1].convert('L')
|
||||
elif channel == 'blue':
|
||||
channel_img = image.split()[2].convert('L')
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid channel option. Please choose 'red', 'green', or 'blue'.")
|
||||
|
||||
# Convert the greyscale channel back to RGB mode
|
||||
channel_img = Image.merge(
|
||||
'RGB', (channel_img, channel_img, channel_img))
|
||||
|
||||
return channel_img
|
||||
|
||||
def convert(self, image, channel='red'):
|
||||
from ..libs.image import pil2tensor, tensor2pil
|
||||
image = self.convert_to_single_channel(tensor2pil(image), channel)
|
||||
image = pil2tensor(image)
|
||||
return (image.squeeze().mean(2),)
|
||||
|
||||
# 显示推理时间
|
||||
class showSpentTime:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
},
|
||||
"hidden": {
|
||||
"unique_id": "UNIQUE_ID",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO",
|
||||
},
|
||||
}
|
||||
|
||||
FUNCTION = "notify"
|
||||
OUTPUT_NODE = True
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
RETURN_TYPES = ()
|
||||
RETURN_NAMES = ()
|
||||
|
||||
def notify(self, pipe, spent_time=None, unique_id=None, extra_pnginfo=None):
|
||||
if unique_id and extra_pnginfo and "workflow" in extra_pnginfo:
|
||||
workflow = extra_pnginfo["workflow"]
|
||||
node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id), None)
|
||||
if node:
|
||||
spent_time = pipe['loader_settings']['spent_time'] if 'spent_time' in pipe['loader_settings'] else ''
|
||||
node["widgets_values"] = [spent_time]
|
||||
|
||||
return {"ui": {"text": [spent_time]}, "result": {}}
|
||||
|
||||
|
||||
# 潜空间sigma相乘
|
||||
class latentNoisy:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
|
||||
"steps": ("INT", {"default": 10000, "min": 0, "max": 10000}),
|
||||
"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}),
|
||||
"source": (["CPU", "GPU"],),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
},
|
||||
"optional": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"optional_model": ("MODEL",),
|
||||
"optional_latent": ("LATENT",)
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "LATENT", "FLOAT",)
|
||||
RETURN_NAMES = ("pipe", "latent", "sigma",)
|
||||
FUNCTION = "run"
|
||||
DEPRECATED = True
|
||||
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
|
||||
def run(self, sampler_name, scheduler, steps, start_at_step, end_at_step, source, seed, pipe=None, optional_model=None, optional_latent=None):
|
||||
model = optional_model if optional_model is not None else pipe["model"]
|
||||
batch_size = pipe["loader_settings"]["batch_size"]
|
||||
empty_latent_height = pipe["loader_settings"]["empty_latent_height"]
|
||||
empty_latent_width = pipe["loader_settings"]["empty_latent_width"]
|
||||
|
||||
if optional_latent is not None:
|
||||
samples = optional_latent
|
||||
else:
|
||||
torch.manual_seed(seed)
|
||||
if source == "CPU":
|
||||
device = "cpu"
|
||||
else:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
noise = torch.randn((batch_size, 4, empty_latent_height // 8, empty_latent_width // 8), dtype=torch.float32,
|
||||
device=device).cpu()
|
||||
|
||||
samples = {"samples": noise}
|
||||
|
||||
device = comfy.model_management.get_torch_device()
|
||||
end_at_step = min(steps, end_at_step)
|
||||
start_at_step = min(start_at_step, end_at_step)
|
||||
comfy.model_management.load_model_gpu(model)
|
||||
model_patcher = comfy.model_patcher.ModelPatcher(model.model, load_device=device, offload_device=comfy.model_management.unet_offload_device())
|
||||
sampler = comfy.samplers.KSampler(model_patcher, steps=steps, device=device, sampler=sampler_name,
|
||||
scheduler=scheduler, denoise=1.0, model_options=model.model_options)
|
||||
sigmas = sampler.sigmas
|
||||
sigma = sigmas[start_at_step] - sigmas[end_at_step]
|
||||
sigma /= model.model.latent_format.scale_factor
|
||||
sigma = sigma.cpu().numpy()
|
||||
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = s1 * sigma
|
||||
|
||||
if pipe is None:
|
||||
pipe = {}
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"samples": samples_out
|
||||
}
|
||||
del pipe
|
||||
|
||||
return (new_pipe, samples_out, sigma)
|
||||
|
||||
# Latent遮罩复合
|
||||
class latentCompositeMaskedWithCond:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"text_combine": ("LIST",),
|
||||
"source_latent": ("LATENT",),
|
||||
"source_mask": ("MASK",),
|
||||
"destination_mask": ("MASK",),
|
||||
"text_combine_mode": (["add", "replace", "cover"], {"default": "add"}),
|
||||
"replace_text": ("STRING", {"default": ""})
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
OUTPUT_IS_LIST = (False, False, True)
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "LATENT", "CONDITIONING")
|
||||
RETURN_NAMES = ("pipe", "latent", "conditioning",)
|
||||
FUNCTION = "run"
|
||||
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
|
||||
def run(self, pipe, text_combine, source_latent, source_mask, destination_mask, text_combine_mode, replace_text, prompt=None, extra_pnginfo=None, my_unique_id=None):
|
||||
positive = None
|
||||
clip = pipe["clip"]
|
||||
destination_latent = pipe["samples"]
|
||||
|
||||
conds = []
|
||||
|
||||
for text in text_combine:
|
||||
if text_combine_mode == 'cover':
|
||||
positive = text
|
||||
elif text_combine_mode == 'replace' and replace_text != '':
|
||||
positive = pipe["loader_settings"]["positive"].replace(replace_text, text)
|
||||
else:
|
||||
positive = pipe["loader_settings"]["positive"] + ',' + text
|
||||
positive_token_normalization = pipe["loader_settings"]["positive_token_normalization"]
|
||||
positive_weight_interpretation = pipe["loader_settings"]["positive_weight_interpretation"]
|
||||
a1111_prompt_style = pipe["loader_settings"]["a1111_prompt_style"]
|
||||
positive_cond = pipe["positive"]
|
||||
|
||||
log_node_warn("Positive encoding...")
|
||||
steps = pipe["loader_settings"]["steps"] if "steps" in pipe["loader_settings"] else 1
|
||||
positive_embeddings_final = advanced_encode(clip, positive,
|
||||
positive_token_normalization,
|
||||
positive_weight_interpretation, w_max=1.0,
|
||||
apply_to_pooled='enable', a1111_prompt_style=a1111_prompt_style, steps=steps)
|
||||
|
||||
# source cond
|
||||
(cond_1,) = ConditioningSetMask().append(positive_cond, source_mask, "default", 1)
|
||||
(cond_2,) = ConditioningSetMask().append(positive_embeddings_final, destination_mask, "default", 1)
|
||||
positive_cond = cond_1 + cond_2
|
||||
|
||||
conds.append(positive_cond)
|
||||
# latent composite masked
|
||||
(samples,) = LatentCompositeMasked().composite(destination_latent, source_latent, 0, 0, False)
|
||||
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"samples": samples,
|
||||
"loader_settings": {
|
||||
**pipe["loader_settings"],
|
||||
"positive": positive,
|
||||
}
|
||||
}
|
||||
|
||||
del pipe
|
||||
|
||||
return (new_pipe, samples, conds)
|
||||
|
||||
# 噪声注入到潜空间
|
||||
class injectNoiseToLatent:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}),
|
||||
"normalize": ("BOOLEAN", {"default": False}),
|
||||
"average": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
"optional": {
|
||||
"pipe_to_noise": ("PIPE_LINE",),
|
||||
"image_to_latent": ("IMAGE",),
|
||||
"latent": ("LATENT",),
|
||||
"noise": ("LATENT",),
|
||||
"mask": ("MASK",),
|
||||
"mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}),
|
||||
"seed": ("INT", {"default": 123, "min": 0, "max": 0xffffffffffffffff, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "inject"
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
|
||||
|
||||
def inject(self,strength, normalize, average, pipe_to_noise=None, noise=None, image_to_latent=None, latent=None, mix_randn_amount=0, mask=None, seed=None):
|
||||
|
||||
vae = pipe_to_noise["vae"] if pipe_to_noise is not None else pipe_to_noise["vae"]
|
||||
batch_size = pipe_to_noise["loader_settings"]["batch_size"] if pipe_to_noise is not None and "batch_size" in pipe_to_noise["loader_settings"] else 1
|
||||
if noise is None and pipe_to_noise is not None:
|
||||
noise = pipe_to_noise["samples"]
|
||||
elif noise is None:
|
||||
raise Exception("InjectNoiseToLatent: No noise provided")
|
||||
|
||||
if image_to_latent is not None and vae is not None:
|
||||
samples = {"samples": vae.encode(image_to_latent[:, :, :, :3])}
|
||||
latents = RepeatLatentBatch().repeat(samples, batch_size)[0]
|
||||
elif latent is not None:
|
||||
latents = latent
|
||||
else:
|
||||
latents = {"samples": noise["samples"].clone()}
|
||||
|
||||
samples = latents.copy()
|
||||
if latents["samples"].shape != noise["samples"].shape:
|
||||
raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape")
|
||||
if average:
|
||||
noised = (samples["samples"].clone() + noise["samples"].clone()) / 2
|
||||
else:
|
||||
noised = samples["samples"].clone() + noise["samples"].clone() * strength
|
||||
if normalize:
|
||||
noised = noised / noised.std()
|
||||
if mask is not None:
|
||||
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])),
|
||||
size=(noised.shape[2], noised.shape[3]), mode="bilinear")
|
||||
mask = mask.expand((-1, noised.shape[1], -1, -1))
|
||||
if mask.shape[0] < noised.shape[0]:
|
||||
mask = mask.repeat((noised.shape[0] - 1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]]
|
||||
noised = mask * noised + (1 - mask) * latents["samples"]
|
||||
if mix_randn_amount > 0:
|
||||
if seed is not None:
|
||||
torch.manual_seed(seed)
|
||||
rand_noise = torch.randn_like(noised)
|
||||
noised = ((1 - mix_randn_amount) * noised + mix_randn_amount *
|
||||
rand_noise) / ((mix_randn_amount ** 2 + (1 - mix_randn_amount) ** 2) ** 0.5)
|
||||
samples["samples"] = noised
|
||||
return (samples,)
|
||||
|
||||
|
||||
from ..libs.api.stability import stableAPI
|
||||
class stableDiffusion3API:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"positive": ("STRING", {"default": "", "placeholder": "Positive", "multiline": True}),
|
||||
"negative": ("STRING", {"default": "", "placeholder": "Negative", "multiline": True}),
|
||||
"model": (["sd3", "sd3-turbo"],),
|
||||
"aspect_ratio": (['16:9', '1:1', '21:9', '2:3', '3:2', '4:5', '5:4', '9:16', '9:21'],),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 4294967294}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0}),
|
||||
},
|
||||
"optional": {
|
||||
"optional_image": ("IMAGE",),
|
||||
},
|
||||
"hidden": {
|
||||
"unique_id": "UNIQUE_ID",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO",
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
RETURN_NAMES = ("image",)
|
||||
|
||||
FUNCTION = "generate"
|
||||
OUTPUT_NODE = False
|
||||
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
DEPRECATED = True
|
||||
|
||||
|
||||
def generate(self, positive, negative, model, aspect_ratio, seed, denoise, optional_image=None, unique_id=None, extra_pnginfo=None):
|
||||
stableAPI.getAPIKeys()
|
||||
mode = 'text-to-image'
|
||||
if optional_image is not None:
|
||||
mode = 'image-to-image'
|
||||
output_image = stableAPI.generate_sd3_image(positive, negative, aspect_ratio, seed=seed, mode=mode, model=model, strength=denoise, image=optional_image)
|
||||
return (output_image,)
|
||||
|
||||
|
||||
class saveImageLazy():
|
||||
def __init__(self):
|
||||
self.output_dir = folder_paths.get_output_directory()
|
||||
self.type = "output"
|
||||
self.compress_level = 4
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"images": ("IMAGE",),
|
||||
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||
"save_metadata": ("BOOLEAN", {"default": True}),
|
||||
},
|
||||
"optional":{},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
RETURN_NAMES = ("images",)
|
||||
OUTPUT_NODE = False
|
||||
FUNCTION = "save"
|
||||
|
||||
DEPRECATED = True
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
|
||||
def save(self, images, filename_prefix, save_metadata, prompt=None, extra_pnginfo=None):
|
||||
extension = 'png'
|
||||
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
|
||||
|
||||
results = list()
|
||||
for (batch_number, image) in enumerate(images):
|
||||
i = 255. * image.cpu().numpy()
|
||||
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
||||
metadata = None
|
||||
|
||||
filename_with_batch_num = filename.replace(
|
||||
"%batch_num%", str(batch_number))
|
||||
|
||||
counter = 1
|
||||
|
||||
if os.path.exists(full_output_folder) and os.listdir(full_output_folder):
|
||||
filtered_filenames = list(filter(
|
||||
lambda filename: filename.startswith(
|
||||
filename_with_batch_num + "_")
|
||||
and filename[len(filename_with_batch_num) + 1:-4].isdigit(),
|
||||
os.listdir(full_output_folder)
|
||||
))
|
||||
|
||||
if filtered_filenames:
|
||||
max_counter = max(
|
||||
int(filename[len(filename_with_batch_num) + 1:-4])
|
||||
for filename in filtered_filenames
|
||||
)
|
||||
counter = max_counter + 1
|
||||
|
||||
file = f"{filename_with_batch_num}_{counter:05}.{extension}"
|
||||
|
||||
save_path = os.path.join(full_output_folder, file)
|
||||
|
||||
if save_metadata:
|
||||
metadata = PngInfo()
|
||||
if prompt is not None:
|
||||
metadata.add_text("prompt", json.dumps(prompt))
|
||||
if extra_pnginfo is not None:
|
||||
for x in extra_pnginfo:
|
||||
metadata.add_text(
|
||||
x, json.dumps(extra_pnginfo[x]))
|
||||
|
||||
img.save(save_path, pnginfo=metadata)
|
||||
|
||||
results.append({
|
||||
"filename": file,
|
||||
"subfolder": subfolder,
|
||||
"type": self.type
|
||||
})
|
||||
|
||||
return {"ui": {"images": results} , "result": (images,)}
|
||||
|
||||
from .logic import saveText, showAnything
|
||||
|
||||
class showAnythingLazy(showAnything):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {}, "optional": {"anything": (any_type, {}), },
|
||||
"hidden": {"unique_id": "UNIQUE_ID", "extra_pnginfo": "EXTRA_PNGINFO",
|
||||
}}
|
||||
|
||||
RETURN_TYPES = (any_type,)
|
||||
RETURN_NAMES = ('output',)
|
||||
INPUT_IS_LIST = True
|
||||
OUTPUT_NODE = False
|
||||
OUTPUT_IS_LIST = (False,)
|
||||
DEPRECATED = True
|
||||
FUNCTION = "log_input"
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
|
||||
class saveTextLazy(saveText):
|
||||
|
||||
RETURN_TYPES = ("STRING", "IMAGE")
|
||||
RETURN_NAMES = ("text", 'image',)
|
||||
|
||||
FUNCTION = "save_text"
|
||||
OUTPUT_NODE = False
|
||||
DEPRECATED = True
|
||||
CATEGORY = "EasyUse/🚫 Deprecated"
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy if": If,
|
||||
"easy poseEditor": poseEditor,
|
||||
"easy imageToMask": imageToMask,
|
||||
"easy showSpentTime": showSpentTime,
|
||||
"easy latentNoisy": latentNoisy,
|
||||
"easy latentCompositeMaskedWithCond": latentCompositeMaskedWithCond,
|
||||
"easy injectNoiseToLatent": injectNoiseToLatent,
|
||||
"easy stableDiffusion3API": stableDiffusion3API,
|
||||
"easy saveImageLazy": saveImageLazy,
|
||||
"easy saveTextLazy": saveTextLazy,
|
||||
"easy showAnythingLazy": showAnythingLazy,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy if": "If (🚫Deprecated)",
|
||||
"easy poseEditor": "PoseEditor (🚫Deprecated)",
|
||||
"easy imageToMask": "ImageToMask (🚫Deprecated)",
|
||||
"easy showSpentTime": "Show Spent Time (🚫Deprecated)",
|
||||
"easy latentNoisy": "LatentNoisy (🚫Deprecated)",
|
||||
"easy latentCompositeMaskedWithCond": "LatentCompositeMaskedWithCond (🚫Deprecated)",
|
||||
"easy injectNoiseToLatent": "InjectNoiseToLatent (🚫Deprecated)",
|
||||
"easy stableDiffusion3API": "StableDiffusion3API (🚫Deprecated)",
|
||||
"easy saveImageLazy": "SaveImageLazy (🚫Deprecated)",
|
||||
"easy saveTextLazy": "SaveTextLazy (🚫Deprecated)",
|
||||
"easy showAnythingLazy": "ShowAnythingLazy (🚫Deprecated)",
|
||||
}
|
||||
643
custom_nodes/ComfyUI-Easy-Use/py/nodes/fix.py
Normal file
643
custom_nodes/ComfyUI-Easy-Use/py/nodes/fix.py
Normal file
@@ -0,0 +1,643 @@
|
||||
import sys
|
||||
import time
|
||||
import comfy
|
||||
import torch
|
||||
import folder_paths
|
||||
|
||||
from comfy_extras.chainner_models import model_loading
|
||||
|
||||
from server import PromptServer
|
||||
from nodes import MAX_RESOLUTION, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS
|
||||
|
||||
from ..libs.utils import easySave, get_sd_version
|
||||
from ..libs.sampler import easySampler
|
||||
from .. import easyCache, sampler
|
||||
|
||||
class hiresFix:
|
||||
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos", "bislerp"]
|
||||
crop_methods = ["disabled", "center"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model_name": (folder_paths.get_filename_list("upscale_models"),),
|
||||
"rescale_after_model": ([False, True], {"default": True}),
|
||||
"rescale_method": (s.upscale_methods,),
|
||||
"rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect'],),
|
||||
"percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}),
|
||||
"width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"crop": (s.crop_methods,),
|
||||
"image_output": (["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"],{"default": "Preview"}),
|
||||
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
"save_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||
},
|
||||
"optional": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"image": ("IMAGE",),
|
||||
"vae": ("VAE",),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "IMAGE", "LATENT", )
|
||||
RETURN_NAMES = ('pipe', 'image', "latent", )
|
||||
|
||||
FUNCTION = "upscale"
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def vae_encode_crop_pixels(self, pixels):
|
||||
x = (pixels.shape[1] // 8) * 8
|
||||
y = (pixels.shape[2] // 8) * 8
|
||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||
x_offset = (pixels.shape[1] % 8) // 2
|
||||
y_offset = (pixels.shape[2] % 8) // 2
|
||||
pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
|
||||
return pixels
|
||||
|
||||
def upscale(self, model_name, rescale_after_model, rescale_method, rescale, percent, width, height,
|
||||
longer_side, crop, image_output, link_id, save_prefix, pipe=None, image=None, vae=None, prompt=None,
|
||||
extra_pnginfo=None, my_unique_id=None):
|
||||
|
||||
new_pipe = {}
|
||||
if pipe is not None:
|
||||
image = image if image is not None else pipe["images"]
|
||||
vae = vae if vae is not None else pipe.get("vae")
|
||||
elif image is None or vae is None:
|
||||
raise ValueError("pipe or image or vae missing.")
|
||||
# Load Model
|
||||
model_path = folder_paths.get_full_path("upscale_models", model_name)
|
||||
sd = comfy.utils.load_torch_file(model_path, safe_load=True)
|
||||
upscale_model = model_loading.load_state_dict(sd).eval()
|
||||
|
||||
# Model upscale
|
||||
device = comfy.model_management.get_torch_device()
|
||||
upscale_model.to(device)
|
||||
in_img = image.movedim(-1, -3).to(device)
|
||||
|
||||
tile = 128 + 64
|
||||
overlap = 8
|
||||
steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile,
|
||||
tile_y=tile, overlap=overlap)
|
||||
pbar = comfy.utils.ProgressBar(steps)
|
||||
s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap,
|
||||
upscale_amount=upscale_model.scale, pbar=pbar)
|
||||
upscale_model.cpu()
|
||||
s = torch.clamp(s.movedim(-3, -1), min=0, max=1.0)
|
||||
|
||||
# Post Model Rescale
|
||||
if rescale_after_model == True:
|
||||
samples = s.movedim(-1, 1)
|
||||
orig_height = samples.shape[2]
|
||||
orig_width = samples.shape[3]
|
||||
if rescale == "by percentage" and percent != 0:
|
||||
height = percent / 100 * orig_height
|
||||
width = percent / 100 * orig_width
|
||||
if (width > MAX_RESOLUTION):
|
||||
width = MAX_RESOLUTION
|
||||
if (height > MAX_RESOLUTION):
|
||||
height = MAX_RESOLUTION
|
||||
|
||||
width = easySampler.enforce_mul_of_64(width)
|
||||
height = easySampler.enforce_mul_of_64(height)
|
||||
elif rescale == "to longer side - maintain aspect":
|
||||
longer_side = easySampler.enforce_mul_of_64(longer_side)
|
||||
if orig_width > orig_height:
|
||||
width, height = longer_side, easySampler.enforce_mul_of_64(longer_side * orig_height / orig_width)
|
||||
else:
|
||||
width, height = easySampler.enforce_mul_of_64(longer_side * orig_width / orig_height), longer_side
|
||||
|
||||
s = comfy.utils.common_upscale(samples, width, height, rescale_method, crop)
|
||||
s = s.movedim(1, -1)
|
||||
|
||||
# vae encode
|
||||
pixels = self.vae_encode_crop_pixels(s)
|
||||
t = vae.encode(pixels[:, :, :, :3])
|
||||
|
||||
if pipe is not None:
|
||||
new_pipe = {
|
||||
"model": pipe['model'],
|
||||
"positive": pipe['positive'],
|
||||
"negative": pipe['negative'],
|
||||
"vae": vae,
|
||||
"clip": pipe['clip'],
|
||||
|
||||
"samples": {"samples": t},
|
||||
"images": s,
|
||||
"seed": pipe['seed'],
|
||||
|
||||
"loader_settings": {
|
||||
**pipe["loader_settings"],
|
||||
}
|
||||
}
|
||||
del pipe
|
||||
else:
|
||||
new_pipe = {}
|
||||
|
||||
results = easySave(s, save_prefix, image_output, prompt, extra_pnginfo)
|
||||
|
||||
if image_output in ("Sender", "Sender&Save"):
|
||||
PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": results})
|
||||
|
||||
if image_output in ("Hide", "Hide&Save"):
|
||||
return (new_pipe, s, {"samples": t},)
|
||||
|
||||
return {"ui": {"images": results},
|
||||
"result": (new_pipe, s, {"samples": t},)}
|
||||
|
||||
# 预细节修复
|
||||
class preDetailerFix:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"guide_size": ("FLOAT", {"default": 256, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}),
|
||||
"max_size": ("FLOAT", {"default": 768, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (comfy.samplers.KSampler.SCHEDULERS + ['align_your_steps'],),
|
||||
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}),
|
||||
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}),
|
||||
"noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}),
|
||||
"force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
||||
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}),
|
||||
},
|
||||
"optional": {
|
||||
"bbox_segm_pipe": ("PIPE_LINE",),
|
||||
"sam_pipe": ("PIPE_LINE",),
|
||||
"optional_image": ("IMAGE",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
OUTPUT_IS_LIST = (False,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
|
||||
def doit(self, pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, denoise, feather, noise_mask, force_inpaint, drop_size, wildcard, cycle, bbox_segm_pipe=None, sam_pipe=None, optional_image=None):
|
||||
|
||||
model = pipe["model"] if "model" in pipe else None
|
||||
if model is None:
|
||||
raise Exception(f"[ERROR] pipe['model'] is missing")
|
||||
clip = pipe["clip"] if"clip" in pipe else None
|
||||
if clip is None:
|
||||
raise Exception(f"[ERROR] pipe['clip'] is missing")
|
||||
vae = pipe["vae"] if "vae" in pipe else None
|
||||
if vae is None:
|
||||
raise Exception(f"[ERROR] pipe['vae'] is missing")
|
||||
if optional_image is not None:
|
||||
images = optional_image
|
||||
else:
|
||||
images = pipe["images"] if "images" in pipe else None
|
||||
if images is None:
|
||||
raise Exception(f"[ERROR] pipe['image'] is missing")
|
||||
positive = pipe["positive"] if "positive" in pipe else None
|
||||
if positive is None:
|
||||
raise Exception(f"[ERROR] pipe['positive'] is missing")
|
||||
negative = pipe["negative"] if "negative" in pipe else None
|
||||
if negative is None:
|
||||
raise Exception(f"[ERROR] pipe['negative'] is missing")
|
||||
bbox_segm_pipe = bbox_segm_pipe or (pipe["bbox_segm_pipe"] if pipe and "bbox_segm_pipe" in pipe else None)
|
||||
if bbox_segm_pipe is None:
|
||||
raise Exception(f"[ERROR] bbox_segm_pipe or pipe['bbox_segm_pipe'] is missing")
|
||||
sam_pipe = sam_pipe or (pipe["sam_pipe"] if pipe and "sam_pipe" in pipe else None)
|
||||
if sam_pipe is None:
|
||||
raise Exception(f"[ERROR] sam_pipe or pipe['sam_pipe'] is missing")
|
||||
|
||||
loader_settings = pipe["loader_settings"] if "loader_settings" in pipe else {}
|
||||
|
||||
if(scheduler == 'align_your_steps'):
|
||||
model_version = get_sd_version(model)
|
||||
if model_version == 'sdxl':
|
||||
scheduler = 'AYS SDXL'
|
||||
elif model_version == 'svd':
|
||||
scheduler = 'AYS SVD'
|
||||
else:
|
||||
scheduler = 'AYS SD1'
|
||||
|
||||
new_pipe = {
|
||||
"images": images,
|
||||
"model": model,
|
||||
"clip": clip,
|
||||
"vae": vae,
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"seed": seed,
|
||||
|
||||
"bbox_segm_pipe": bbox_segm_pipe,
|
||||
"sam_pipe": sam_pipe,
|
||||
|
||||
"loader_settings": loader_settings,
|
||||
|
||||
"detail_fix_settings": {
|
||||
"guide_size": guide_size,
|
||||
"guide_size_for": guide_size_for,
|
||||
"max_size": max_size,
|
||||
"seed": seed,
|
||||
"steps": steps,
|
||||
"cfg": cfg,
|
||||
"sampler_name": sampler_name,
|
||||
"scheduler": scheduler,
|
||||
"denoise": denoise,
|
||||
"feather": feather,
|
||||
"noise_mask": noise_mask,
|
||||
"force_inpaint": force_inpaint,
|
||||
"drop_size": drop_size,
|
||||
"wildcard": wildcard,
|
||||
"cycle": cycle
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
del bbox_segm_pipe
|
||||
del sam_pipe
|
||||
|
||||
return (new_pipe,)
|
||||
|
||||
# 预遮罩细节修复
|
||||
class preMaskDetailerFix:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"mask": ("MASK",),
|
||||
|
||||
"guide_size": ("FLOAT", {"default": 384, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}),
|
||||
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
|
||||
"mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}),
|
||||
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
|
||||
"scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
|
||||
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}),
|
||||
|
||||
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}),
|
||||
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}),
|
||||
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}),
|
||||
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 100}),
|
||||
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}),
|
||||
},
|
||||
"optional": {
|
||||
# "patch": ("INPAINT_PATCH",),
|
||||
"optional_image": ("IMAGE",),
|
||||
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}),
|
||||
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
OUTPUT_IS_LIST = (False,)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
|
||||
def doit(self, pipe, mask, guide_size, guide_size_for, max_size, mask_mode, seed, steps, cfg, sampler_name, scheduler, denoise, feather, crop_factor, drop_size,refiner_ratio, batch_size, cycle, optional_image=None, inpaint_model=False, noise_mask_feather=20):
|
||||
|
||||
model = pipe["model"] if "model" in pipe else None
|
||||
if model is None:
|
||||
raise Exception(f"[ERROR] pipe['model'] is missing")
|
||||
clip = pipe["clip"] if"clip" in pipe else None
|
||||
if clip is None:
|
||||
raise Exception(f"[ERROR] pipe['clip'] is missing")
|
||||
vae = pipe["vae"] if "vae" in pipe else None
|
||||
if vae is None:
|
||||
raise Exception(f"[ERROR] pipe['vae'] is missing")
|
||||
if optional_image is not None:
|
||||
images = optional_image
|
||||
else:
|
||||
images = pipe["images"] if "images" in pipe else None
|
||||
if images is None:
|
||||
raise Exception(f"[ERROR] pipe['image'] is missing")
|
||||
positive = pipe["positive"] if "positive" in pipe else None
|
||||
if positive is None:
|
||||
raise Exception(f"[ERROR] pipe['positive'] is missing")
|
||||
negative = pipe["negative"] if "negative" in pipe else None
|
||||
if negative is None:
|
||||
raise Exception(f"[ERROR] pipe['negative'] is missing")
|
||||
latent = pipe["samples"] if "samples" in pipe else None
|
||||
if latent is None:
|
||||
raise Exception(f"[ERROR] pipe['samples'] is missing")
|
||||
|
||||
if 'noise_mask' not in latent:
|
||||
if images is None:
|
||||
raise Exception("No Images found")
|
||||
if vae is None:
|
||||
raise Exception("No VAE found")
|
||||
x = (images.shape[1] // 8) * 8
|
||||
y = (images.shape[2] // 8) * 8
|
||||
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])),
|
||||
size=(images.shape[1], images.shape[2]), mode="bilinear")
|
||||
|
||||
pixels = images.clone()
|
||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||
x_offset = (pixels.shape[1] % 8) // 2
|
||||
y_offset = (pixels.shape[2] % 8) // 2
|
||||
pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
|
||||
mask = mask[:, :, x_offset:x + x_offset, y_offset:y + y_offset]
|
||||
|
||||
mask_erosion = mask
|
||||
|
||||
m = (1.0 - mask.round()).squeeze(1)
|
||||
for i in range(3):
|
||||
pixels[:, :, :, i] -= 0.5
|
||||
pixels[:, :, :, i] *= m
|
||||
pixels[:, :, :, i] += 0.5
|
||||
t = vae.encode(pixels)
|
||||
|
||||
latent = {"samples": t, "noise_mask": (mask_erosion[:, :, :x, :y].round())}
|
||||
# when patch was linked
|
||||
# if patch is not None:
|
||||
# worker = InpaintWorker(node_name="easy kSamplerInpainting")
|
||||
# model, = worker.patch(model, latent, patch)
|
||||
|
||||
loader_settings = pipe["loader_settings"] if "loader_settings" in pipe else {}
|
||||
|
||||
new_pipe = {
|
||||
"images": images,
|
||||
"model": model,
|
||||
"clip": clip,
|
||||
"vae": vae,
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"seed": seed,
|
||||
"mask": mask,
|
||||
|
||||
"loader_settings": loader_settings,
|
||||
|
||||
"detail_fix_settings": {
|
||||
"guide_size": guide_size,
|
||||
"guide_size_for": guide_size_for,
|
||||
"max_size": max_size,
|
||||
"seed": seed,
|
||||
"steps": steps,
|
||||
"cfg": cfg,
|
||||
"sampler_name": sampler_name,
|
||||
"scheduler": scheduler,
|
||||
"denoise": denoise,
|
||||
"feather": feather,
|
||||
"crop_factor": crop_factor,
|
||||
"drop_size": drop_size,
|
||||
"refiner_ratio": refiner_ratio,
|
||||
"batch_size": batch_size,
|
||||
"cycle": cycle
|
||||
},
|
||||
|
||||
"mask_settings": {
|
||||
"mask_mode": mask_mode,
|
||||
"inpaint_model": inpaint_model,
|
||||
"noise_mask_feather": noise_mask_feather
|
||||
}
|
||||
}
|
||||
|
||||
del pipe
|
||||
|
||||
return (new_pipe,)
|
||||
|
||||
# 细节修复
|
||||
class detailerFix:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"image_output": (["Hide", "Preview", "Save", "Hide&Save", "Sender", "Sender&Save"],{"default": "Preview"}),
|
||||
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}),
|
||||
"save_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||
},
|
||||
"optional": {
|
||||
"model": ("MODEL",),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID", }
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "IMAGE", "IMAGE", "IMAGE")
|
||||
RETURN_NAMES = ("pipe", "image", "cropped_refined", "cropped_enhanced_alpha")
|
||||
OUTPUT_NODE = True
|
||||
OUTPUT_IS_LIST = (False, False, True, True)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
|
||||
|
||||
def doit(self, pipe, image_output, link_id, save_prefix, model=None, prompt=None, extra_pnginfo=None, my_unique_id=None):
|
||||
|
||||
# Clean loaded_objects
|
||||
easyCache.update_loaded_objects(prompt)
|
||||
|
||||
my_unique_id = int(my_unique_id)
|
||||
|
||||
model = model or (pipe["model"] if "model" in pipe else None)
|
||||
if model is None:
|
||||
raise Exception(f"[ERROR] model or pipe['model'] is missing")
|
||||
|
||||
detail_fix_settings = pipe["detail_fix_settings"] if "detail_fix_settings" in pipe else None
|
||||
if detail_fix_settings is None:
|
||||
raise Exception(f"[ERROR] detail_fix_settings or pipe['detail_fix_settings'] is missing")
|
||||
|
||||
mask = pipe["mask"] if "mask" in pipe else None
|
||||
|
||||
image = pipe["images"]
|
||||
clip = pipe["clip"]
|
||||
vae = pipe["vae"]
|
||||
seed = pipe["seed"]
|
||||
positive = pipe["positive"]
|
||||
negative = pipe["negative"]
|
||||
loader_settings = pipe["loader_settings"] if "loader_settings" in pipe else {}
|
||||
guide_size = pipe["detail_fix_settings"]["guide_size"] if "guide_size" in pipe["detail_fix_settings"] else 256
|
||||
guide_size_for = pipe["detail_fix_settings"]["guide_size_for"] if "guide_size_for" in pipe[
|
||||
"detail_fix_settings"] else True
|
||||
max_size = pipe["detail_fix_settings"]["max_size"] if "max_size" in pipe["detail_fix_settings"] else 768
|
||||
steps = pipe["detail_fix_settings"]["steps"] if "steps" in pipe["detail_fix_settings"] else 20
|
||||
cfg = pipe["detail_fix_settings"]["cfg"] if "cfg" in pipe["detail_fix_settings"] else 1.0
|
||||
sampler_name = pipe["detail_fix_settings"]["sampler_name"] if "sampler_name" in pipe[
|
||||
"detail_fix_settings"] else None
|
||||
scheduler = pipe["detail_fix_settings"]["scheduler"] if "scheduler" in pipe["detail_fix_settings"] else None
|
||||
denoise = pipe["detail_fix_settings"]["denoise"] if "denoise" in pipe["detail_fix_settings"] else 0.5
|
||||
feather = pipe["detail_fix_settings"]["feather"] if "feather" in pipe["detail_fix_settings"] else 5
|
||||
crop_factor = pipe["detail_fix_settings"]["crop_factor"] if "crop_factor" in pipe["detail_fix_settings"] else 3.0
|
||||
drop_size = pipe["detail_fix_settings"]["drop_size"] if "drop_size" in pipe["detail_fix_settings"] else 10
|
||||
refiner_ratio = pipe["detail_fix_settings"]["refiner_ratio"] if "refiner_ratio" in pipe else 0.2
|
||||
batch_size = pipe["detail_fix_settings"]["batch_size"] if "batch_size" in pipe["detail_fix_settings"] else 1
|
||||
noise_mask = pipe["detail_fix_settings"]["noise_mask"] if "noise_mask" in pipe["detail_fix_settings"] else None
|
||||
force_inpaint = pipe["detail_fix_settings"]["force_inpaint"] if "force_inpaint" in pipe["detail_fix_settings"] else False
|
||||
wildcard = pipe["detail_fix_settings"]["wildcard"] if "wildcard" in pipe["detail_fix_settings"] else ""
|
||||
cycle = pipe["detail_fix_settings"]["cycle"] if "cycle" in pipe["detail_fix_settings"] else 1
|
||||
|
||||
bbox_segm_pipe = pipe["bbox_segm_pipe"] if pipe and "bbox_segm_pipe" in pipe else None
|
||||
sam_pipe = pipe["sam_pipe"] if "sam_pipe" in pipe else None
|
||||
|
||||
# 细节修复初始时间
|
||||
start_time = int(time.time() * 1000)
|
||||
if "mask_settings" in pipe:
|
||||
mask_mode = pipe['mask_settings']["mask_mode"] if "inpaint_model" in pipe['mask_settings'] else True
|
||||
inpaint_model = pipe['mask_settings']["inpaint_model"] if "inpaint_model" in pipe['mask_settings'] else False
|
||||
noise_mask_feather = pipe['mask_settings']["noise_mask_feather"] if "noise_mask_feather" in pipe['mask_settings'] else 20
|
||||
cls = ALL_NODE_CLASS_MAPPINGS["MaskDetailerPipe"]
|
||||
if "MaskDetailerPipe" not in ALL_NODE_CLASS_MAPPINGS:
|
||||
raise Exception(f"[ERROR] To use MaskDetailerPipe, you need to install 'Impact Pack'")
|
||||
basic_pipe = (model, clip, vae, positive, negative)
|
||||
result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, basic_pipe, refiner_basic_pipe_opt = cls().doit(image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode,
|
||||
seed, steps, cfg, sampler_name, scheduler, denoise,
|
||||
feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1,
|
||||
refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather)
|
||||
result_mask = mask
|
||||
result_cnet_images = ()
|
||||
else:
|
||||
if bbox_segm_pipe is None:
|
||||
raise Exception(f"[ERROR] bbox_segm_pipe or pipe['bbox_segm_pipe'] is missing")
|
||||
if sam_pipe is None:
|
||||
raise Exception(f"[ERROR] sam_pipe or pipe['sam_pipe'] is missing")
|
||||
bbox_detector_opt, bbox_threshold, bbox_dilation, bbox_crop_factor, segm_detector_opt = bbox_segm_pipe
|
||||
sam_model_opt, sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative = sam_pipe
|
||||
if "FaceDetailer" not in ALL_NODE_CLASS_MAPPINGS:
|
||||
raise Exception(f"[ERROR] To use FaceDetailer, you need to install 'Impact Pack'")
|
||||
cls = ALL_NODE_CLASS_MAPPINGS["FaceDetailer"]
|
||||
|
||||
result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, pipe, result_cnet_images = cls().doit(
|
||||
image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name,
|
||||
scheduler,
|
||||
positive, negative, denoise, feather, noise_mask, force_inpaint,
|
||||
bbox_threshold, bbox_dilation, bbox_crop_factor,
|
||||
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold,
|
||||
sam_mask_hint_use_negative, drop_size, bbox_detector_opt, wildcard, cycle, sam_model_opt,
|
||||
segm_detector_opt,
|
||||
detailer_hook=None)
|
||||
|
||||
# 细节修复结束时间
|
||||
end_time = int(time.time() * 1000)
|
||||
|
||||
spent_time = 'Fix:' + str((end_time - start_time) / 1000) + '"'
|
||||
|
||||
results = easySave(result_img, save_prefix, image_output, prompt, extra_pnginfo)
|
||||
sampler.update_value_by_id("results", my_unique_id, results)
|
||||
|
||||
# Clean loaded_objects
|
||||
easyCache.update_loaded_objects(prompt)
|
||||
|
||||
new_pipe = {
|
||||
"samples": None,
|
||||
"images": result_img,
|
||||
"model": model,
|
||||
"clip": clip,
|
||||
"vae": vae,
|
||||
"seed": seed,
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"wildcard": wildcard,
|
||||
"bbox_segm_pipe": bbox_segm_pipe,
|
||||
"sam_pipe": sam_pipe,
|
||||
|
||||
"loader_settings": {
|
||||
**loader_settings,
|
||||
"spent_time": spent_time
|
||||
},
|
||||
"detail_fix_settings": detail_fix_settings
|
||||
}
|
||||
if "mask_settings" in pipe:
|
||||
new_pipe["mask_settings"] = pipe["mask_settings"]
|
||||
|
||||
sampler.update_value_by_id("pipe_line", my_unique_id, new_pipe)
|
||||
|
||||
del bbox_segm_pipe
|
||||
del sam_pipe
|
||||
del pipe
|
||||
|
||||
if image_output in ("Hide", "Hide&Save"):
|
||||
return (new_pipe, result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, result_cnet_images)
|
||||
|
||||
if image_output in ("Sender", "Sender&Save"):
|
||||
PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": results})
|
||||
|
||||
return {"ui": {"images": results}, "result": (new_pipe, result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, result_cnet_images )}
|
||||
|
||||
class ultralyticsDetectorForDetailerFix:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
bboxs = ["bbox/" + x for x in folder_paths.get_filename_list("ultralytics_bbox")]
|
||||
segms = ["segm/" + x for x in folder_paths.get_filename_list("ultralytics_segm")]
|
||||
return {"required":
|
||||
{"model_name": (bboxs + segms,),
|
||||
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}),
|
||||
"bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("bbox_segm_pipe",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
|
||||
def doit(self, model_name, bbox_threshold, bbox_dilation, bbox_crop_factor):
|
||||
if 'UltralyticsDetectorProvider' not in ALL_NODE_CLASS_MAPPINGS:
|
||||
raise Exception(f"[ERROR] To use UltralyticsDetectorProvider, you need to install 'Impact Pack'")
|
||||
cls = ALL_NODE_CLASS_MAPPINGS['UltralyticsDetectorProvider']
|
||||
bbox_detector, segm_detector = cls().doit(model_name)
|
||||
pipe = (bbox_detector, bbox_threshold, bbox_dilation, bbox_crop_factor, segm_detector)
|
||||
return (pipe,)
|
||||
|
||||
class samLoaderForDetailerFix:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"model_name": (folder_paths.get_filename_list("sams"),),
|
||||
"device_mode": (["AUTO", "Prefer GPU", "CPU"],{"default": "AUTO"}),
|
||||
"sam_detection_hint": (
|
||||
["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points",
|
||||
"mask-point-bbox", "none"],),
|
||||
"sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
"sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}),
|
||||
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"sam_mask_hint_use_negative": (["False", "Small", "Outter"],),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("sam_pipe",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Fix"
|
||||
|
||||
def doit(self, model_name, device_mode, sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative):
|
||||
if 'SAMLoader' not in ALL_NODE_CLASS_MAPPINGS:
|
||||
raise Exception(f"[ERROR] To use SAMLoader, you need to install 'Impact Pack'")
|
||||
cls = ALL_NODE_CLASS_MAPPINGS['SAMLoader']
|
||||
(sam_model,) = cls().load_model(model_name, device_mode)
|
||||
pipe = (sam_model, sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, sam_mask_hint_use_negative)
|
||||
return (pipe,)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy hiresFix": hiresFix,
|
||||
"easy preDetailerFix": preDetailerFix,
|
||||
"easy preMaskDetailerFix": preMaskDetailerFix,
|
||||
"easy ultralyticsDetectorPipe": ultralyticsDetectorForDetailerFix,
|
||||
"easy samLoaderPipe": samLoaderForDetailerFix,
|
||||
"easy detailerFix": detailerFix
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy hiresFix": "HiresFix",
|
||||
"easy preDetailerFix": "PreDetailerFix",
|
||||
"easy preMaskDetailerFix": "preMaskDetailerFix",
|
||||
"easy ultralyticsDetectorPipe": "UltralyticsDetector (Pipe)",
|
||||
"easy samLoaderPipe": "SAMLoader (Pipe)",
|
||||
"easy detailerFix": "DetailerFix",
|
||||
}
|
||||
2205
custom_nodes/ComfyUI-Easy-Use/py/nodes/image.py
Normal file
2205
custom_nodes/ComfyUI-Easy-Use/py/nodes/image.py
Normal file
File diff suppressed because it is too large
Load Diff
356
custom_nodes/ComfyUI-Easy-Use/py/nodes/inpaint.py
Normal file
356
custom_nodes/ComfyUI-Easy-Use/py/nodes/inpaint.py
Normal file
@@ -0,0 +1,356 @@
|
||||
import re
|
||||
import torch
|
||||
import comfy
|
||||
from comfy_extras.nodes_mask import GrowMask
|
||||
from nodes import VAEEncodeForInpaint, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS
|
||||
from ..libs.utils import get_local_filepath
|
||||
from ..libs.log import log_node_info
|
||||
from ..libs import cache as backend_cache
|
||||
from ..config import *
|
||||
|
||||
# FooocusInpaint
|
||||
class applyFooocusInpaint:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"latent": ("LATENT",),
|
||||
"head": (list(FOOOCUS_INPAINT_HEAD.keys()),),
|
||||
"patch": (list(FOOOCUS_INPAINT_PATCH.keys()),),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
RETURN_NAMES = ("model",)
|
||||
CATEGORY = "EasyUse/Inpaint"
|
||||
FUNCTION = "apply"
|
||||
|
||||
def apply(self, model, latent, head, patch):
|
||||
from ..modules.fooocus import InpaintHead, InpaintWorker
|
||||
head_file = get_local_filepath(FOOOCUS_INPAINT_HEAD[head]["model_url"], INPAINT_DIR)
|
||||
inpaint_head_model = InpaintHead()
|
||||
sd = torch.load(head_file, map_location='cpu')
|
||||
inpaint_head_model.load_state_dict(sd)
|
||||
|
||||
patch_file = get_local_filepath(FOOOCUS_INPAINT_PATCH[patch]["model_url"], INPAINT_DIR)
|
||||
inpaint_lora = comfy.utils.load_torch_file(patch_file, safe_load=True)
|
||||
|
||||
patch = (inpaint_head_model, inpaint_lora)
|
||||
worker = InpaintWorker(node_name="easy kSamplerInpainting")
|
||||
cloned = model.clone()
|
||||
|
||||
m, = worker.patch(cloned, latent, patch)
|
||||
return (m,)
|
||||
|
||||
# brushnet
|
||||
from ..modules.brushnet import BrushNet
|
||||
class applyBrushNet:
|
||||
|
||||
def get_files_with_extension(folder='inpaint', extensions='.safetensors'):
|
||||
return [file for file in folder_paths.get_filename_list(folder) if file.endswith(extensions)]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"image": ("IMAGE",),
|
||||
"mask": ("MASK",),
|
||||
"brushnet": (s.get_files_with_extension(),),
|
||||
"dtype": (['float16', 'bfloat16', 'float32', 'float64'], ),
|
||||
"scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
|
||||
"start_at": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"end_at": ("INT", {"default": 10000, "min": 0, "max": 10000}),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
CATEGORY = "EasyUse/Inpaint"
|
||||
FUNCTION = "apply"
|
||||
|
||||
def apply(self, pipe, image, mask, brushnet, dtype, scale, start_at, end_at):
|
||||
|
||||
model = pipe['model']
|
||||
vae = pipe['vae']
|
||||
positive = pipe['positive']
|
||||
negative = pipe['negative']
|
||||
cls = BrushNet()
|
||||
if brushnet in backend_cache.cache:
|
||||
log_node_info("easy brushnetApply", f"Using {brushnet} Cached")
|
||||
_, brushnet_model = backend_cache.cache[brushnet][1]
|
||||
else:
|
||||
brushnet_file = os.path.join(folder_paths.get_full_path("inpaint", brushnet))
|
||||
brushnet_model, = cls.load_brushnet_model(brushnet_file, dtype)
|
||||
backend_cache.update_cache(brushnet, 'brushnet', (False, brushnet_model))
|
||||
m, positive, negative, latent = cls.brushnet_model_update(model=model, vae=vae, image=image, mask=mask,
|
||||
brushnet=brushnet_model, positive=positive,
|
||||
negative=negative, scale=scale, start_at=start_at,
|
||||
end_at=end_at)
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"model": m,
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"samples": latent,
|
||||
}
|
||||
del pipe
|
||||
return (new_pipe,)
|
||||
|
||||
# #powerpaint
|
||||
class applyPowerPaint:
|
||||
def get_files_with_extension(folder='inpaint', extensions='.safetensors'):
|
||||
return [file for file in folder_paths.get_filename_list(folder) if file.endswith(extensions)]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"image": ("IMAGE",),
|
||||
"mask": ("MASK",),
|
||||
"powerpaint_model": (s.get_files_with_extension(),),
|
||||
"powerpaint_clip": (s.get_files_with_extension(extensions='.bin'),),
|
||||
"dtype": (['float16', 'bfloat16', 'float32', 'float64'],),
|
||||
"fitting": ("FLOAT", {"default": 1.0, "min": 0.3, "max": 1.0}),
|
||||
"function": (['text guided', 'shape guided', 'object removal', 'context aware', 'image outpainting'],),
|
||||
"scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
|
||||
"start_at": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"end_at": ("INT", {"default": 10000, "min": 0, "max": 10000}),
|
||||
"save_memory": (['none', 'auto', 'max'],),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
CATEGORY = "EasyUse/Inpaint"
|
||||
FUNCTION = "apply"
|
||||
|
||||
def apply(self, pipe, image, mask, powerpaint_model, powerpaint_clip, dtype, fitting, function, scale, start_at, end_at, save_memory='none'):
|
||||
model = pipe['model']
|
||||
vae = pipe['vae']
|
||||
positive = pipe['positive']
|
||||
negative = pipe['negative']
|
||||
|
||||
cls = BrushNet()
|
||||
# load powerpaint clip
|
||||
if powerpaint_clip in backend_cache.cache:
|
||||
log_node_info("easy powerpaintApply", f"Using {powerpaint_clip} Cached")
|
||||
_, ppclip = backend_cache.cache[powerpaint_clip][1]
|
||||
else:
|
||||
model_url = POWERPAINT_MODELS['base_fp16']['model_url']
|
||||
base_clip = get_local_filepath(model_url, os.path.join(folder_paths.models_dir, 'clip'))
|
||||
ppclip, = cls.load_powerpaint_clip(base_clip, os.path.join(folder_paths.get_full_path("inpaint", powerpaint_clip)))
|
||||
backend_cache.update_cache(powerpaint_clip, 'ppclip', (False, ppclip))
|
||||
# load powerpaint model
|
||||
if powerpaint_model in backend_cache.cache:
|
||||
log_node_info("easy powerpaintApply", f"Using {powerpaint_model} Cached")
|
||||
_, powerpaint = backend_cache.cache[powerpaint_model][1]
|
||||
else:
|
||||
powerpaint_file = os.path.join(folder_paths.get_full_path("inpaint", powerpaint_model))
|
||||
powerpaint, = cls.load_brushnet_model(powerpaint_file, dtype)
|
||||
backend_cache.update_cache(powerpaint_model, 'powerpaint', (False, powerpaint))
|
||||
m, positive, negative, latent = cls.powerpaint_model_update(model=model, vae=vae, image=image, mask=mask, powerpaint=powerpaint,
|
||||
clip=ppclip, positive=positive,
|
||||
negative=negative, fitting=fitting, function=function,
|
||||
scale=scale, start_at=start_at, end_at=end_at, save_memory=save_memory)
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"model": m,
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"samples": latent,
|
||||
}
|
||||
del pipe
|
||||
return (new_pipe,)
|
||||
|
||||
from node_helpers import conditioning_set_values
|
||||
class applyInpaint:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"image": ("IMAGE",),
|
||||
"mask": ("MASK",),
|
||||
"inpaint_mode": (('normal', 'fooocus_inpaint', 'brushnet_random', 'brushnet_segmentation', 'powerpaint'),),
|
||||
"encode": (('none', 'vae_encode_inpaint', 'inpaint_model_conditioning', 'different_diffusion'), {"default": "none"}),
|
||||
"grow_mask_by": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}),
|
||||
"dtype": (['float16', 'bfloat16', 'float32', 'float64'],),
|
||||
"fitting": ("FLOAT", {"default": 1.0, "min": 0.3, "max": 1.0}),
|
||||
"function": (['text guided', 'shape guided', 'object removal', 'context aware', 'image outpainting'],),
|
||||
"scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0}),
|
||||
"start_at": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"end_at": ("INT", {"default": 10000, "min": 0, "max": 10000}),
|
||||
},
|
||||
"optional":{
|
||||
"noise_mask": ("BOOLEAN", {"default": True})
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
CATEGORY = "EasyUse/Inpaint"
|
||||
FUNCTION = "apply"
|
||||
|
||||
def inpaint_model_conditioning(self, pipe, image, vae, mask, grow_mask_by, noise_mask=True):
|
||||
if grow_mask_by >0:
|
||||
mask, = GrowMask().expand_mask(mask, grow_mask_by, False)
|
||||
positive, negative, = pipe['positive'], pipe['negative']
|
||||
|
||||
pixels = image
|
||||
x = (pixels.shape[1] // 8) * 8
|
||||
y = (pixels.shape[2] // 8) * 8
|
||||
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])),
|
||||
size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
|
||||
|
||||
orig_pixels = pixels
|
||||
pixels = orig_pixels.clone()
|
||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||
x_offset = (pixels.shape[1] % 8) // 2
|
||||
y_offset = (pixels.shape[2] % 8) // 2
|
||||
pixels = pixels[:, x_offset:x + x_offset, y_offset:y + y_offset, :]
|
||||
mask = mask[:, :, x_offset:x + x_offset, y_offset:y + y_offset]
|
||||
|
||||
m = (1.0 - mask.round()).squeeze(1)
|
||||
for i in range(3):
|
||||
pixels[:, :, :, i] -= 0.5
|
||||
pixels[:, :, :, i] *= m
|
||||
pixels[:, :, :, i] += 0.5
|
||||
concat_latent = vae.encode(pixels)
|
||||
orig_latent = vae.encode(orig_pixels)
|
||||
|
||||
out_latent = {}
|
||||
|
||||
out_latent["samples"] = orig_latent
|
||||
if noise_mask:
|
||||
out_latent["noise_mask"] = mask
|
||||
|
||||
out = []
|
||||
for conditioning in [positive, negative]:
|
||||
c = conditioning_set_values(conditioning, {"concat_latent_image": concat_latent,
|
||||
"concat_mask": mask})
|
||||
out.append(c)
|
||||
|
||||
pipe['positive'] = out[0]
|
||||
pipe['negative'] = out[1]
|
||||
pipe['samples'] = out_latent
|
||||
|
||||
return pipe
|
||||
|
||||
def get_brushnet_model(self, type, model):
|
||||
model_type = 'sdxl' if isinstance(model.model.model_config, comfy.supported_models.SDXL) else 'sd1'
|
||||
if type == 'brushnet_random':
|
||||
brush_model = BRUSHNET_MODELS['random_mask'][model_type]['model_url']
|
||||
if model_type == 'sdxl':
|
||||
pattern = 'brushnet.random.mask.sdxl.*.(safetensors|bin)$'
|
||||
else:
|
||||
pattern = 'brushnet.random.mask.*.(safetensors|bin)$'
|
||||
elif type == 'brushnet_segmentation':
|
||||
brush_model = BRUSHNET_MODELS['segmentation_mask'][model_type]['model_url']
|
||||
if model_type == 'sdxl':
|
||||
pattern = 'brushnet.segmentation.mask.sdxl.*.(safetensors|bin)$'
|
||||
else:
|
||||
pattern = 'brushnet.segmentation.mask.*.(safetensors|bin)$'
|
||||
|
||||
|
||||
brushfile = [e for e in folder_paths.get_filename_list('inpaint') if re.search(pattern, e, re.IGNORECASE)]
|
||||
brushname = brushfile[0] if brushfile else None
|
||||
if not brushname:
|
||||
from urllib.parse import urlparse
|
||||
get_local_filepath(brush_model, INPAINT_DIR)
|
||||
parsed_url = urlparse(brush_model)
|
||||
brushname = os.path.basename(parsed_url.path)
|
||||
return brushname
|
||||
|
||||
def get_powerpaint_model(self, model):
|
||||
model_type = 'sdxl' if isinstance(model.model.model_config, comfy.supported_models.SDXL) else 'sd1'
|
||||
if model_type == 'sdxl':
|
||||
raise Exception("Powerpaint not supported for SDXL models")
|
||||
|
||||
powerpaint_model = POWERPAINT_MODELS['v2.1']['model_url']
|
||||
powerpaint_clip = POWERPAINT_MODELS['v2.1']['clip_url']
|
||||
|
||||
from urllib.parse import urlparse
|
||||
get_local_filepath(powerpaint_model, os.path.join(INPAINT_DIR, 'powerpaint'))
|
||||
model_parsed_url = urlparse(powerpaint_model)
|
||||
clip_parsed_url = urlparse(powerpaint_clip)
|
||||
model_name = os.path.join("powerpaint",os.path.basename(model_parsed_url.path))
|
||||
clip_name = os.path.join("powerpaint",os.path.basename(clip_parsed_url.path))
|
||||
return model_name, clip_name
|
||||
|
||||
def apply(self, pipe, image, mask, inpaint_mode, encode, grow_mask_by, dtype, fitting, function, scale, start_at, end_at, noise_mask=True):
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
}
|
||||
del pipe
|
||||
if inpaint_mode in ['brushnet_random', 'brushnet_segmentation']:
|
||||
brushnet = self.get_brushnet_model(inpaint_mode, new_pipe['model'])
|
||||
new_pipe, = applyBrushNet().apply(new_pipe, image, mask, brushnet, dtype, scale, start_at, end_at)
|
||||
elif inpaint_mode == 'powerpaint':
|
||||
powerpaint_model, powerpaint_clip = self.get_powerpaint_model(new_pipe['model'])
|
||||
new_pipe, = applyPowerPaint().apply(new_pipe, image, mask, powerpaint_model, powerpaint_clip, dtype, fitting, function, scale, start_at, end_at)
|
||||
|
||||
vae = new_pipe['vae']
|
||||
if encode == 'none':
|
||||
if inpaint_mode == 'fooocus_inpaint':
|
||||
model, = applyFooocusInpaint().apply(new_pipe['model'], new_pipe['samples'],
|
||||
list(FOOOCUS_INPAINT_HEAD.keys())[0],
|
||||
list(FOOOCUS_INPAINT_PATCH.keys())[0])
|
||||
new_pipe['model'] = model
|
||||
elif encode == 'vae_encode_inpaint':
|
||||
latent, = VAEEncodeForInpaint().encode(vae, image, mask, grow_mask_by)
|
||||
new_pipe['samples'] = latent
|
||||
if inpaint_mode == 'fooocus_inpaint':
|
||||
model, = applyFooocusInpaint().apply(new_pipe['model'], new_pipe['samples'],
|
||||
list(FOOOCUS_INPAINT_HEAD.keys())[0],
|
||||
list(FOOOCUS_INPAINT_PATCH.keys())[0])
|
||||
new_pipe['model'] = model
|
||||
elif encode == 'inpaint_model_conditioning':
|
||||
if inpaint_mode == 'fooocus_inpaint':
|
||||
latent, = VAEEncodeForInpaint().encode(vae, image, mask, grow_mask_by)
|
||||
new_pipe['samples'] = latent
|
||||
model, = applyFooocusInpaint().apply(new_pipe['model'], new_pipe['samples'],
|
||||
list(FOOOCUS_INPAINT_HEAD.keys())[0],
|
||||
list(FOOOCUS_INPAINT_PATCH.keys())[0])
|
||||
new_pipe['model'] = model
|
||||
new_pipe = self.inpaint_model_conditioning(new_pipe, image, vae, mask, 0, noise_mask=noise_mask)
|
||||
else:
|
||||
new_pipe = self.inpaint_model_conditioning(new_pipe, image, vae, mask, grow_mask_by, noise_mask=noise_mask)
|
||||
elif encode == 'different_diffusion':
|
||||
if inpaint_mode == 'fooocus_inpaint':
|
||||
latent, = VAEEncodeForInpaint().encode(vae, image, mask, grow_mask_by)
|
||||
new_pipe['samples'] = latent
|
||||
model, = applyFooocusInpaint().apply(new_pipe['model'], new_pipe['samples'],
|
||||
list(FOOOCUS_INPAINT_HEAD.keys())[0],
|
||||
list(FOOOCUS_INPAINT_PATCH.keys())[0])
|
||||
new_pipe['model'] = model
|
||||
new_pipe = self.inpaint_model_conditioning(new_pipe, image, vae, mask, 0, noise_mask=noise_mask)
|
||||
else:
|
||||
new_pipe = self.inpaint_model_conditioning(new_pipe, image, vae, mask, grow_mask_by, noise_mask=noise_mask)
|
||||
cls = ALL_NODE_CLASS_MAPPINGS['DifferentialDiffusion']
|
||||
if cls is not None:
|
||||
try:
|
||||
model, = cls().execute(new_pipe['model'])
|
||||
except Exception:
|
||||
model, = cls().apply(new_pipe['model'])
|
||||
new_pipe['model'] = model
|
||||
else:
|
||||
raise Exception("Differential Diffusion not found,please update comfyui")
|
||||
|
||||
return (new_pipe,)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy applyFooocusInpaint": applyFooocusInpaint,
|
||||
"easy applyBrushNet": applyBrushNet,
|
||||
"easy applyPowerPaint": applyPowerPaint,
|
||||
"easy applyInpaint": applyInpaint
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy applyFooocusInpaint": "Easy Apply Fooocus Inpaint",
|
||||
"easy applyBrushNet": "Easy Apply BrushNet",
|
||||
"easy applyPowerPaint": "Easy Apply PowerPaint",
|
||||
"easy applyInpaint": "Easy Apply Inpaint"
|
||||
}
|
||||
1565
custom_nodes/ComfyUI-Easy-Use/py/nodes/loaders.py
Normal file
1565
custom_nodes/ComfyUI-Easy-Use/py/nodes/loaders.py
Normal file
File diff suppressed because it is too large
Load Diff
2013
custom_nodes/ComfyUI-Easy-Use/py/nodes/logic.py
Executable file
2013
custom_nodes/ComfyUI-Easy-Use/py/nodes/logic.py
Executable file
File diff suppressed because it is too large
Load Diff
778
custom_nodes/ComfyUI-Easy-Use/py/nodes/pipe.py
Normal file
778
custom_nodes/ComfyUI-Easy-Use/py/nodes/pipe.py
Normal file
@@ -0,0 +1,778 @@
|
||||
import os
|
||||
import folder_paths
|
||||
import comfy.samplers, comfy.supported_models
|
||||
|
||||
from nodes import LatentFromBatch, RepeatLatentBatch
|
||||
from ..config import MAX_SEED_NUM
|
||||
|
||||
from ..libs.log import log_node_warn
|
||||
from ..libs.utils import get_sd_version
|
||||
from ..libs.conditioning import prompt_to_cond, set_cond
|
||||
|
||||
from .. import easyCache
|
||||
|
||||
# 节点束输入
|
||||
class pipeIn:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {},
|
||||
"optional": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"model": ("MODEL",),
|
||||
"pos": ("CONDITIONING",),
|
||||
"neg": ("CONDITIONING",),
|
||||
"latent": ("LATENT",),
|
||||
"vae": ("VAE",),
|
||||
"clip": ("CLIP",),
|
||||
"image": ("IMAGE",),
|
||||
"xyPlot": ("XYPLOT",),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
FUNCTION = "flush"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def flush(self, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, image=None, xyplot=None, my_unique_id=None):
|
||||
|
||||
model = model if model is not None else pipe.get("model")
|
||||
if model is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Model missing from pipeLine")
|
||||
pos = pos if pos is not None else pipe.get("positive")
|
||||
if pos is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Pos Conditioning missing from pipeLine")
|
||||
neg = neg if neg is not None else pipe.get("negative")
|
||||
if neg is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Neg Conditioning missing from pipeLine")
|
||||
vae = vae if vae is not None else pipe.get("vae")
|
||||
if vae is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "VAE missing from pipeLine")
|
||||
clip = clip if clip is not None else pipe.get("clip") if pipe is not None and "clip" in pipe else None
|
||||
# if clip is None:
|
||||
# log_node_warn(f'pipeIn[{my_unique_id}]', "Clip missing from pipeLine")
|
||||
if latent is not None:
|
||||
samples = latent
|
||||
elif image is None:
|
||||
samples = pipe.get("samples") if pipe is not None else None
|
||||
image = pipe.get("images") if pipe is not None else None
|
||||
elif image is not None:
|
||||
if pipe is None:
|
||||
batch_size = 1
|
||||
else:
|
||||
batch_size = pipe["loader_settings"]["batch_size"] if "batch_size" in pipe["loader_settings"] else 1
|
||||
samples = {"samples": vae.encode(image[:, :, :, :3])}
|
||||
samples = RepeatLatentBatch().repeat(samples, batch_size)[0]
|
||||
|
||||
if pipe is None:
|
||||
pipe = {"loader_settings": {"positive": "", "negative": "", "xyplot": None}}
|
||||
|
||||
xyplot = xyplot if xyplot is not None else pipe['loader_settings']['xyplot'] if xyplot in pipe['loader_settings'] else None
|
||||
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"model": model,
|
||||
"positive": pos,
|
||||
"negative": neg,
|
||||
"vae": vae,
|
||||
"clip": clip,
|
||||
|
||||
"samples": samples,
|
||||
"images": image,
|
||||
"seed": pipe.get('seed') if pipe is not None and "seed" in pipe else None,
|
||||
|
||||
"loader_settings": {
|
||||
**pipe["loader_settings"],
|
||||
"xyplot": xyplot
|
||||
}
|
||||
}
|
||||
del pipe
|
||||
|
||||
return (new_pipe,)
|
||||
|
||||
# 节点束输出
|
||||
class pipeOut:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",)
|
||||
RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "image", "seed",)
|
||||
FUNCTION = "flush"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def flush(self, pipe, my_unique_id=None):
|
||||
model = pipe.get("model")
|
||||
pos = pipe.get("positive")
|
||||
neg = pipe.get("negative")
|
||||
latent = pipe.get("samples")
|
||||
vae = pipe.get("vae")
|
||||
clip = pipe.get("clip")
|
||||
image = pipe.get("images")
|
||||
seed = pipe.get("seed")
|
||||
|
||||
return pipe, model, pos, neg, latent, vae, clip, image, seed
|
||||
|
||||
# 编辑节点束
|
||||
class pipeEdit:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"clip_skip": ("INT", {"default": -1, "min": -24, "max": 0, "step": 1}),
|
||||
|
||||
"optional_positive": ("STRING", {"default": "", "multiline": True}),
|
||||
"positive_token_normalization": (["none", "mean", "length", "length+mean"],),
|
||||
"positive_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
|
||||
|
||||
"optional_negative": ("STRING", {"default": "", "multiline": True}),
|
||||
"negative_token_normalization": (["none", "mean", "length", "length+mean"],),
|
||||
"negative_weight_interpretation": (["comfy", "A1111", "comfy++", "compel", "fixed attention"],),
|
||||
|
||||
"a1111_prompt_style": ("BOOLEAN", {"default": False}),
|
||||
"conditioning_mode": (['replace', 'concat', 'combine', 'average', 'timestep'], {"default": "replace"}),
|
||||
"average_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
"old_cond_start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"old_cond_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"new_cond_start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
"new_cond_end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
||||
},
|
||||
"optional": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"model": ("MODEL",),
|
||||
"pos": ("CONDITIONING",),
|
||||
"neg": ("CONDITIONING",),
|
||||
"latent": ("LATENT",),
|
||||
"vae": ("VAE",),
|
||||
"clip": ("CLIP",),
|
||||
"image": ("IMAGE",),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID", "prompt":"PROMPT"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE")
|
||||
RETURN_NAMES = ("pipe", "model", "pos", "neg", "latent", "vae", "clip", "image")
|
||||
FUNCTION = "edit"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def edit(self, clip_skip, optional_positive, positive_token_normalization, positive_weight_interpretation, optional_negative, negative_token_normalization, negative_weight_interpretation, a1111_prompt_style, conditioning_mode, average_strength, old_cond_start, old_cond_end, new_cond_start, new_cond_end, pipe=None, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, image=None, my_unique_id=None, prompt=None):
|
||||
|
||||
model = model if model is not None else pipe.get("model")
|
||||
if model is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Model missing from pipeLine")
|
||||
vae = vae if vae is not None else pipe.get("vae")
|
||||
if vae is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "VAE missing from pipeLine")
|
||||
clip = clip if clip is not None else pipe.get("clip")
|
||||
if clip is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Clip missing from pipeLine")
|
||||
if image is None:
|
||||
image = pipe.get("images") if pipe is not None else None
|
||||
samples = latent if latent is not None else pipe.get("samples")
|
||||
if samples is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Latent missing from pipeLine")
|
||||
else:
|
||||
batch_size = pipe["loader_settings"]["batch_size"] if "batch_size" in pipe["loader_settings"] else 1
|
||||
samples = {"samples": vae.encode(image[:, :, :, :3])}
|
||||
samples = RepeatLatentBatch().repeat(samples, batch_size)[0]
|
||||
|
||||
pipe_lora_stack = pipe.get("lora_stack") if pipe is not None and "lora_stack" in pipe else []
|
||||
|
||||
steps = pipe["loader_settings"]["steps"] if "steps" in pipe["loader_settings"] else 1
|
||||
if pos is None and optional_positive != '':
|
||||
pos, positive_wildcard_prompt, model, clip = prompt_to_cond('positive', model, clip, clip_skip,
|
||||
pipe_lora_stack, optional_positive, positive_token_normalization,positive_weight_interpretation,
|
||||
a1111_prompt_style, my_unique_id, prompt, easyCache, True, steps)
|
||||
pos = set_cond(pipe['positive'], pos, conditioning_mode, average_strength, old_cond_start, old_cond_end, new_cond_start, new_cond_end)
|
||||
pipe['loader_settings']['positive'] = positive_wildcard_prompt
|
||||
pipe['loader_settings']['positive_token_normalization'] = positive_token_normalization
|
||||
pipe['loader_settings']['positive_weight_interpretation'] = positive_weight_interpretation
|
||||
if a1111_prompt_style:
|
||||
pipe['loader_settings']['a1111_prompt_style'] = True
|
||||
else:
|
||||
pos = pipe.get("positive")
|
||||
if pos is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Pos Conditioning missing from pipeLine")
|
||||
|
||||
if neg is None and optional_negative != '':
|
||||
neg, negative_wildcard_prompt, model, clip = prompt_to_cond("negative", model, clip, clip_skip, pipe_lora_stack, optional_negative,
|
||||
negative_token_normalization, negative_weight_interpretation,
|
||||
a1111_prompt_style, my_unique_id, prompt, easyCache, True, steps)
|
||||
neg = set_cond(pipe['negative'], neg, conditioning_mode, average_strength, old_cond_start, old_cond_end, new_cond_start, new_cond_end)
|
||||
pipe['loader_settings']['negative'] = negative_wildcard_prompt
|
||||
pipe['loader_settings']['negative_token_normalization'] = negative_token_normalization
|
||||
pipe['loader_settings']['negative_weight_interpretation'] = negative_weight_interpretation
|
||||
if a1111_prompt_style:
|
||||
pipe['loader_settings']['a1111_prompt_style'] = True
|
||||
else:
|
||||
neg = pipe.get("negative")
|
||||
if neg is None:
|
||||
log_node_warn(f'pipeIn[{my_unique_id}]', "Neg Conditioning missing from pipeLine")
|
||||
if pipe is None:
|
||||
pipe = {"loader_settings": {"positive": "", "negative": "", "xyplot": None}}
|
||||
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"model": model,
|
||||
"positive": pos,
|
||||
"negative": neg,
|
||||
"vae": vae,
|
||||
"clip": clip,
|
||||
|
||||
"samples": samples,
|
||||
"images": image,
|
||||
"seed": pipe.get('seed') if pipe is not None and "seed" in pipe else None,
|
||||
"loader_settings":{
|
||||
**pipe["loader_settings"]
|
||||
}
|
||||
}
|
||||
del pipe
|
||||
|
||||
return (new_pipe, model,pos, neg, latent, vae, clip, image)
|
||||
|
||||
# 编辑节点束提示词
|
||||
class pipeEditPrompt:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"positive": ("STRING", {"default": "", "multiline": True}),
|
||||
"negative": ("STRING", {"default": "", "multiline": True}),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID", "prompt": "PROMPT"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
FUNCTION = "edit"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def edit(self, pipe, positive, negative, my_unique_id=None, prompt=None):
|
||||
model = pipe.get("model")
|
||||
if model is None:
|
||||
log_node_warn(f'pipeEdit[{my_unique_id}]', "Model missing from pipeLine")
|
||||
|
||||
from ..modules.kolors.loader import is_kolors_model
|
||||
model_type = get_sd_version(model)
|
||||
if model_type == 'sdxl' and is_kolors_model(model):
|
||||
from ..modules.kolors.text_encode import chatglm3_adv_text_encode
|
||||
auto_clean_gpu = pipe["loader_settings"]["auto_clean_gpu"] if "auto_clean_gpu" in pipe["loader_settings"] else False
|
||||
chatglm3_model = pipe["chatglm3_model"] if "chatglm3_model" in pipe else None
|
||||
# text encode
|
||||
log_node_warn("Positive encoding...")
|
||||
positive_embeddings_final = chatglm3_adv_text_encode(chatglm3_model, positive, auto_clean_gpu)
|
||||
log_node_warn("Negative encoding...")
|
||||
negative_embeddings_final = chatglm3_adv_text_encode(chatglm3_model, negative, auto_clean_gpu)
|
||||
else:
|
||||
clip_skip = pipe["loader_settings"]["clip_skip"] if "clip_skip" in pipe["loader_settings"] else -1
|
||||
lora_stack = pipe.get("lora_stack") if pipe is not None and "lora_stack" in pipe else []
|
||||
clip = pipe.get("clip") if pipe is not None and "clip" in pipe else None
|
||||
positive_token_normalization = pipe["loader_settings"]["positive_token_normalization"] if "positive_token_normalization" in pipe["loader_settings"] else "none"
|
||||
positive_weight_interpretation = pipe["loader_settings"]["positive_weight_interpretation"] if "positive_weight_interpretation" in pipe["loader_settings"] else "comfy"
|
||||
negative_token_normalization = pipe["loader_settings"]["negative_token_normalization"] if "negative_token_normalization" in pipe["loader_settings"] else "none"
|
||||
negative_weight_interpretation = pipe["loader_settings"]["negative_weight_interpretation"] if "negative_weight_interpretation" in pipe["loader_settings"] else "comfy"
|
||||
a1111_prompt_style = pipe["loader_settings"]["a1111_prompt_style"] if "a1111_prompt_style" in pipe["loader_settings"] else False
|
||||
# Prompt to Conditioning
|
||||
positive_embeddings_final, positive_wildcard_prompt, model, clip = prompt_to_cond('positive', model, clip,
|
||||
clip_skip, lora_stack,
|
||||
positive,
|
||||
positive_token_normalization,
|
||||
positive_weight_interpretation,
|
||||
a1111_prompt_style,
|
||||
my_unique_id, prompt,
|
||||
easyCache,
|
||||
model_type=model_type)
|
||||
negative_embeddings_final, negative_wildcard_prompt, model, clip = prompt_to_cond('negative', model, clip,
|
||||
clip_skip, lora_stack,
|
||||
negative,
|
||||
negative_token_normalization,
|
||||
negative_weight_interpretation,
|
||||
a1111_prompt_style,
|
||||
my_unique_id, prompt,
|
||||
easyCache,
|
||||
model_type=model_type)
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"model": model,
|
||||
"positive": positive_embeddings_final,
|
||||
"negative": negative_embeddings_final,
|
||||
}
|
||||
del pipe
|
||||
|
||||
return (new_pipe,)
|
||||
|
||||
|
||||
# 节点束到基础节点束(pipe to ComfyUI-Impack-pack's basic_pipe)
|
||||
class pipeToBasicPipe:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("BASIC_PIPE",)
|
||||
RETURN_NAMES = ("basic_pipe",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def doit(self, pipe, my_unique_id=None):
|
||||
new_pipe = (pipe.get('model'), pipe.get('clip'), pipe.get('vae'), pipe.get('positive'), pipe.get('negative'))
|
||||
del pipe
|
||||
return (new_pipe,)
|
||||
|
||||
# 批次索引
|
||||
class pipeBatchIndex:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"pipe": ("PIPE_LINE",),
|
||||
"batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
|
||||
"length": ("INT", {"default": 1, "min": 1, "max": 64}),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"},}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def doit(self, pipe, batch_index, length, my_unique_id=None):
|
||||
samples = pipe["samples"]
|
||||
new_samples, = LatentFromBatch().frombatch(samples, batch_index, length)
|
||||
new_pipe = {
|
||||
**pipe,
|
||||
"samples": new_samples
|
||||
}
|
||||
del pipe
|
||||
return (new_pipe,)
|
||||
|
||||
# pipeXYPlot
|
||||
class pipeXYPlot:
|
||||
lora_list = ["None"] + folder_paths.get_filename_list("loras")
|
||||
lora_strengths = {"min": -4.0, "max": 4.0, "step": 0.01}
|
||||
token_normalization = ["none", "mean", "length", "length+mean"]
|
||||
weight_interpretation = ["comfy", "A1111", "compel", "comfy++"]
|
||||
|
||||
loader_dict = {
|
||||
"ckpt_name": folder_paths.get_filename_list("checkpoints"),
|
||||
"vae_name": ["Baked-VAE"] + folder_paths.get_filename_list("vae"),
|
||||
"clip_skip": {"min": -24, "max": -1, "step": 1},
|
||||
"lora_name": lora_list,
|
||||
"lora_model_strength": lora_strengths,
|
||||
"lora_clip_strength": lora_strengths,
|
||||
"positive": [],
|
||||
"negative": [],
|
||||
}
|
||||
|
||||
sampler_dict = {
|
||||
"steps": {"min": 1, "max": 100, "step": 1},
|
||||
"cfg": {"min": 0.0, "max": 100.0, "step": 1.0},
|
||||
"sampler_name": comfy.samplers.KSampler.SAMPLERS,
|
||||
"scheduler": comfy.samplers.KSampler.SCHEDULERS,
|
||||
"denoise": {"min": 0.0, "max": 1.0, "step": 0.01},
|
||||
"seed": {"min": 0, "max": MAX_SEED_NUM},
|
||||
}
|
||||
|
||||
plot_dict = {**sampler_dict, **loader_dict}
|
||||
|
||||
plot_values = ["None", ]
|
||||
plot_values.append("---------------------")
|
||||
for k in sampler_dict:
|
||||
plot_values.append(f'preSampling: {k}')
|
||||
plot_values.append("---------------------")
|
||||
for k in loader_dict:
|
||||
plot_values.append(f'loader: {k}')
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
rejected = ["None", "---------------------", "Nothing"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"grid_spacing": ("INT", {"min": 0, "max": 500, "step": 5, "default": 0, }),
|
||||
"output_individuals": (["False", "True"], {"default": "False"}),
|
||||
"flip_xy": (["False", "True"], {"default": "False"}),
|
||||
"x_axis": (pipeXYPlot.plot_values, {"default": 'None'}),
|
||||
"x_values": (
|
||||
"STRING", {"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}),
|
||||
"y_axis": (pipeXYPlot.plot_values, {"default": 'None'}),
|
||||
"y_values": (
|
||||
"STRING", {"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}),
|
||||
},
|
||||
"optional": {
|
||||
"pipe": ("PIPE_LINE",)
|
||||
},
|
||||
"hidden": {
|
||||
"plot_dict": (pipeXYPlot.plot_dict,),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
FUNCTION = "plot"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def plot(self, grid_spacing, output_individuals, flip_xy, x_axis, x_values, y_axis, y_values, pipe=None, font_path=None):
|
||||
def clean_values(values):
|
||||
original_values = values.split("; ")
|
||||
cleaned_values = []
|
||||
|
||||
for value in original_values:
|
||||
# Strip the semi-colon
|
||||
cleaned_value = value.strip(';').strip()
|
||||
|
||||
if cleaned_value == "":
|
||||
continue
|
||||
|
||||
# Try to convert the cleaned_value back to int or float if possible
|
||||
try:
|
||||
cleaned_value = int(cleaned_value)
|
||||
except ValueError:
|
||||
try:
|
||||
cleaned_value = float(cleaned_value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Append the cleaned_value to the list
|
||||
cleaned_values.append(cleaned_value)
|
||||
|
||||
return cleaned_values
|
||||
|
||||
if x_axis in self.rejected:
|
||||
x_axis = "None"
|
||||
x_values = []
|
||||
else:
|
||||
x_values = clean_values(x_values)
|
||||
|
||||
if y_axis in self.rejected:
|
||||
y_axis = "None"
|
||||
y_values = []
|
||||
else:
|
||||
y_values = clean_values(y_values)
|
||||
|
||||
if flip_xy == "True":
|
||||
x_axis, y_axis = y_axis, x_axis
|
||||
x_values, y_values = y_values, x_values
|
||||
|
||||
|
||||
xy_plot = {"x_axis": x_axis,
|
||||
"x_vals": x_values,
|
||||
"y_axis": y_axis,
|
||||
"y_vals": y_values,
|
||||
"custom_font": font_path,
|
||||
"grid_spacing": grid_spacing,
|
||||
"output_individuals": output_individuals}
|
||||
|
||||
if pipe is not None:
|
||||
new_pipe = pipe.copy()
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"xyplot": xy_plot
|
||||
}
|
||||
del pipe
|
||||
return (new_pipe, xy_plot,)
|
||||
|
||||
# pipeXYPlotAdvanced
|
||||
import platform
|
||||
class pipeXYPlotAdvanced:
|
||||
if platform.system() == "Windows":
|
||||
system_root = os.environ.get("SystemRoot")
|
||||
user_root = os.environ.get("USERPROFILE")
|
||||
font_dir = os.path.join(system_root, "Fonts") if system_root else None
|
||||
user_font_dir = os.path.join(user_root, "AppData","Local","Microsoft","Windows", "Fonts") if user_root else None
|
||||
|
||||
# Default debian-based Linux & MacOS font dirs
|
||||
elif platform.system() == "Linux":
|
||||
font_dir = "/usr/share/fonts/truetype"
|
||||
user_font_dir = None
|
||||
elif platform.system() == "Darwin":
|
||||
font_dir = "/System/Library/Fonts"
|
||||
user_font_dir = None
|
||||
else:
|
||||
font_dir = None
|
||||
user_font_dir = None
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
files_list = []
|
||||
if s.font_dir and os.path.exists(s.font_dir):
|
||||
font_dir = s.font_dir
|
||||
files_list = files_list + [f for f in os.listdir(font_dir) if os.path.isfile(os.path.join(font_dir, f)) and f.lower().endswith(".ttf")]
|
||||
|
||||
if s.user_font_dir and os.path.exists(s.user_font_dir):
|
||||
files_list = files_list + [f for f in os.listdir(s.user_font_dir) if os.path.isfile(os.path.join(s.user_font_dir, f)) and f.lower().endswith(".ttf")]
|
||||
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
"grid_spacing": ("INT", {"min": 0, "max": 500, "step": 5, "default": 0, }),
|
||||
"output_individuals": (["False", "True"], {"default": "False"}),
|
||||
"flip_xy": (["False", "True"], {"default": "False"}),
|
||||
},
|
||||
"optional": {
|
||||
"X": ("X_Y",),
|
||||
"Y": ("X_Y",),
|
||||
"font": (["None"] + files_list,)
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("PIPE_LINE",)
|
||||
RETURN_NAMES = ("pipe",)
|
||||
FUNCTION = "plot"
|
||||
|
||||
CATEGORY = "EasyUse/Pipe"
|
||||
|
||||
def plot(self, pipe, grid_spacing, output_individuals, flip_xy, X=None, Y=None, font=None, my_unique_id=None):
|
||||
font_path = os.path.join(self.font_dir, font) if font != "None" else None
|
||||
if font_path and not os.path.exists(font_path):
|
||||
font_path = os.path.join(self.user_font_dir, font)
|
||||
|
||||
if X != None:
|
||||
x_axis = X.get('axis')
|
||||
x_values = X.get('values')
|
||||
else:
|
||||
x_axis = "Nothing"
|
||||
x_values = [""]
|
||||
if Y != None:
|
||||
y_axis = Y.get('axis')
|
||||
y_values = Y.get('values')
|
||||
else:
|
||||
y_axis = "Nothing"
|
||||
y_values = [""]
|
||||
|
||||
if pipe is not None:
|
||||
new_pipe = pipe.copy()
|
||||
positive = pipe["loader_settings"]["positive"] if "positive" in pipe["loader_settings"] else ""
|
||||
negative = pipe["loader_settings"]["negative"] if "negative" in pipe["loader_settings"] else ""
|
||||
|
||||
if x_axis == 'advanced: ModelMergeBlocks':
|
||||
models = X.get('models')
|
||||
vae_use = X.get('vae_use')
|
||||
if models is None:
|
||||
raise Exception("models is not found")
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"models": models,
|
||||
"vae_use": vae_use
|
||||
}
|
||||
if y_axis == 'advanced: ModelMergeBlocks':
|
||||
models = Y.get('models')
|
||||
vae_use = Y.get('vae_use')
|
||||
if models is None:
|
||||
raise Exception("models is not found")
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"models": models,
|
||||
"vae_use": vae_use
|
||||
}
|
||||
|
||||
if x_axis in ['advanced: Lora', 'advanced: Checkpoint']:
|
||||
lora_stack = X.get('lora_stack')
|
||||
_lora_stack = []
|
||||
if lora_stack is not None:
|
||||
for lora in lora_stack:
|
||||
_lora_stack.append(
|
||||
{"lora_name": lora[0], "model": pipe['model'], "clip": pipe['clip'], "model_strength": lora[1],
|
||||
"clip_strength": lora[2]})
|
||||
del lora_stack
|
||||
x_values = "; ".join(x_values)
|
||||
lora_stack = pipe['lora_stack'] + _lora_stack if 'lora_stack' in pipe else _lora_stack
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"lora_stack": lora_stack,
|
||||
}
|
||||
|
||||
if y_axis in ['advanced: Lora', 'advanced: Checkpoint']:
|
||||
lora_stack = Y.get('lora_stack')
|
||||
_lora_stack = []
|
||||
if lora_stack is not None:
|
||||
for lora in lora_stack:
|
||||
_lora_stack.append(
|
||||
{"lora_name": lora[0], "model": pipe['model'], "clip": pipe['clip'], "model_strength": lora[1],
|
||||
"clip_strength": lora[2]})
|
||||
del lora_stack
|
||||
y_values = "; ".join(y_values)
|
||||
lora_stack = pipe['lora_stack'] + _lora_stack if 'lora_stack' in pipe else _lora_stack
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"lora_stack": lora_stack,
|
||||
}
|
||||
|
||||
if x_axis == 'advanced: Seeds++ Batch':
|
||||
if new_pipe['seed']:
|
||||
value = x_values
|
||||
x_values = []
|
||||
for index in range(value):
|
||||
x_values.append(str(new_pipe['seed'] + index))
|
||||
x_values = "; ".join(x_values)
|
||||
if y_axis == 'advanced: Seeds++ Batch':
|
||||
if new_pipe['seed']:
|
||||
value = y_values
|
||||
y_values = []
|
||||
for index in range(value):
|
||||
y_values.append(str(new_pipe['seed'] + index))
|
||||
y_values = "; ".join(y_values)
|
||||
|
||||
if x_axis == 'advanced: Positive Prompt S/R':
|
||||
if positive:
|
||||
x_value = x_values
|
||||
x_values = []
|
||||
for index, value in enumerate(x_value):
|
||||
search_txt, replace_txt, replace_all = value
|
||||
if replace_all:
|
||||
txt = replace_txt if replace_txt is not None else positive
|
||||
x_values.append(txt)
|
||||
else:
|
||||
txt = positive.replace(search_txt, replace_txt, 1) if replace_txt is not None else positive
|
||||
x_values.append(txt)
|
||||
x_values = "; ".join(x_values)
|
||||
if y_axis == 'advanced: Positive Prompt S/R':
|
||||
if positive:
|
||||
y_value = y_values
|
||||
y_values = []
|
||||
for index, value in enumerate(y_value):
|
||||
search_txt, replace_txt, replace_all = value
|
||||
if replace_all:
|
||||
txt = replace_txt if replace_txt is not None else positive
|
||||
y_values.append(txt)
|
||||
else:
|
||||
txt = positive.replace(search_txt, replace_txt, 1) if replace_txt is not None else positive
|
||||
y_values.append(txt)
|
||||
y_values = "; ".join(y_values)
|
||||
|
||||
if x_axis == 'advanced: Negative Prompt S/R':
|
||||
if negative:
|
||||
x_value = x_values
|
||||
x_values = []
|
||||
for index, value in enumerate(x_value):
|
||||
search_txt, replace_txt, replace_all = value
|
||||
if replace_all:
|
||||
txt = replace_txt if replace_txt is not None else negative
|
||||
x_values.append(txt)
|
||||
else:
|
||||
txt = negative.replace(search_txt, replace_txt, 1) if replace_txt is not None else negative
|
||||
x_values.append(txt)
|
||||
x_values = "; ".join(x_values)
|
||||
if y_axis == 'advanced: Negative Prompt S/R':
|
||||
if negative:
|
||||
y_value = y_values
|
||||
y_values = []
|
||||
for index, value in enumerate(y_value):
|
||||
search_txt, replace_txt, replace_all = value
|
||||
if replace_all:
|
||||
txt = replace_txt if replace_txt is not None else negative
|
||||
y_values.append(txt)
|
||||
else:
|
||||
txt = negative.replace(search_txt, replace_txt, 1) if replace_txt is not None else negative
|
||||
y_values.append(txt)
|
||||
y_values = "; ".join(y_values)
|
||||
|
||||
if "advanced: ControlNet" in x_axis:
|
||||
x_value = x_values
|
||||
x_values = []
|
||||
cnet = []
|
||||
for index, value in enumerate(x_value):
|
||||
cnet.append(value)
|
||||
x_values.append(str(index))
|
||||
x_values = "; ".join(x_values)
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"cnet_stack": cnet,
|
||||
}
|
||||
|
||||
if "advanced: ControlNet" in y_axis:
|
||||
y_value = y_values
|
||||
y_values = []
|
||||
cnet = []
|
||||
for index, value in enumerate(y_value):
|
||||
cnet.append(value)
|
||||
y_values.append(str(index))
|
||||
y_values = "; ".join(y_values)
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"cnet_stack": cnet,
|
||||
}
|
||||
|
||||
if "advanced: Pos Condition" in x_axis:
|
||||
x_values = "; ".join(x_values)
|
||||
cond = X.get('cond')
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"positive_cond_stack": cond,
|
||||
}
|
||||
if "advanced: Pos Condition" in y_axis:
|
||||
y_values = "; ".join(y_values)
|
||||
cond = Y.get('cond')
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"positive_cond_stack": cond,
|
||||
}
|
||||
|
||||
if "advanced: Neg Condition" in x_axis:
|
||||
x_values = "; ".join(x_values)
|
||||
cond = X.get('cond')
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"negative_cond_stack": cond,
|
||||
}
|
||||
if "advanced: Neg Condition" in y_axis:
|
||||
y_values = "; ".join(y_values)
|
||||
cond = Y.get('cond')
|
||||
new_pipe['loader_settings'] = {
|
||||
**pipe['loader_settings'],
|
||||
"negative_cond_stack": cond,
|
||||
}
|
||||
|
||||
del pipe
|
||||
|
||||
return pipeXYPlot().plot(grid_spacing, output_individuals, flip_xy, x_axis, x_values, y_axis, y_values, new_pipe, font_path)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy pipeIn": pipeIn,
|
||||
"easy pipeOut": pipeOut,
|
||||
"easy pipeEdit": pipeEdit,
|
||||
"easy pipeEditPrompt": pipeEditPrompt,
|
||||
"easy pipeToBasicPipe": pipeToBasicPipe,
|
||||
"easy pipeBatchIndex": pipeBatchIndex,
|
||||
"easy XYPlot": pipeXYPlot,
|
||||
"easy XYPlotAdvanced": pipeXYPlotAdvanced
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy pipeIn": "Pipe In",
|
||||
"easy pipeOut": "Pipe Out",
|
||||
"easy pipeEdit": "Pipe Edit",
|
||||
"easy pipeEditPrompt": "Pipe Edit Prompt",
|
||||
"easy pipeBatchIndex": "Pipe Batch Index",
|
||||
"easy pipeToBasicPipe": "Pipe -> BasicPipe",
|
||||
"easy XYPlot": "XY Plot",
|
||||
"easy XYPlotAdvanced": "XY Plot Advanced"
|
||||
}
|
||||
1002
custom_nodes/ComfyUI-Easy-Use/py/nodes/preSampling.py
Normal file
1002
custom_nodes/ComfyUI-Easy-Use/py/nodes/preSampling.py
Normal file
File diff suppressed because it is too large
Load Diff
790
custom_nodes/ComfyUI-Easy-Use/py/nodes/prompt.py
Normal file
790
custom_nodes/ComfyUI-Easy-Use/py/nodes/prompt.py
Normal file
@@ -0,0 +1,790 @@
|
||||
import json
|
||||
import os
|
||||
from urllib.request import urlopen
|
||||
import folder_paths
|
||||
|
||||
from .. import easyCache
|
||||
from ..config import FOOOCUS_STYLES_DIR, MAX_SEED_NUM, PROMPT_TEMPLATE, RESOURCES_DIR
|
||||
from ..libs.log import log_node_info
|
||||
from ..libs.wildcards import WildcardProcessor, get_wildcard_list, process
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
# 正面提示词
|
||||
class positivePrompt(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy positive",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("positive", default="", multiline=True, placeholder="Positive"),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(id="output_positive", display_name="positive"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive):
|
||||
return io.NodeOutput(positive)
|
||||
|
||||
# 通配符提示词
|
||||
class wildcardsPrompt(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
wildcard_list = get_wildcard_list()
|
||||
return io.Schema(
|
||||
node_id="easy wildcards",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("text", default="", multiline=True, dynamic_prompts=False, placeholder="(Support wildcard)"),
|
||||
io.Combo.Input("Select to add LoRA", options=["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras")),
|
||||
io.Combo.Input("Select to add Wildcard", options=["Select the Wildcard to add to the text"] + wildcard_list),
|
||||
io.Int.Input("seed", default=0, min=0, max=MAX_SEED_NUM),
|
||||
io.Boolean.Input("multiline_mode", default=False),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(id="output_text", display_name="text", is_output_list=True),
|
||||
io.String.Output(id="populated_text", display_name="populated_text", is_output_list=True),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.extra_pnginfo,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, text, seed, multiline_mode, **kwargs):
|
||||
prompt = cls.hidden.prompt
|
||||
|
||||
# Clean loaded_objects
|
||||
if prompt:
|
||||
easyCache.update_loaded_objects(prompt)
|
||||
|
||||
if multiline_mode:
|
||||
populated_text = []
|
||||
_text = []
|
||||
text_lines = text.split("\n")
|
||||
for t in text_lines:
|
||||
_text.append(t)
|
||||
populated_text.append(process(t, seed))
|
||||
text = _text
|
||||
else:
|
||||
populated_text = [process(text, seed)]
|
||||
text = [text]
|
||||
return io.NodeOutput(text, populated_text, ui={"value": [seed]})
|
||||
|
||||
# 通配符提示词矩阵,会按顺序返回包含通配符的提示词所生成的所有可能
|
||||
class wildcardsPromptMatrix(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
wildcard_list = get_wildcard_list()
|
||||
return io.Schema(
|
||||
node_id="easy wildcardsMatrix",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("text", default="", multiline=True, dynamic_prompts=False, placeholder="(Support Lora Block Weight and wildcard)"),
|
||||
io.Combo.Input("Select to add LoRA", options=["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras")),
|
||||
io.Combo.Input("Select to add Wildcard", options=["Select the Wildcard to add to the text"] + wildcard_list),
|
||||
io.Int.Input("offset", default=0, min=0, max=MAX_SEED_NUM, step=1, control_after_generate=True),
|
||||
io.Int.Input("output_limit", default=1, min=-1, step=1, tooltip="Output All Probilities", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output("populated_text", is_output_list=True),
|
||||
io.Int.Output("total"),
|
||||
io.Int.Output("factors", is_output_list=True),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.extra_pnginfo,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, text, offset, output_limit=1, **kwargs):
|
||||
prompt = cls.hidden.prompt
|
||||
# Clean loaded_objects
|
||||
if prompt:
|
||||
easyCache.update_loaded_objects(prompt)
|
||||
|
||||
p = WildcardProcessor(text)
|
||||
total = p.total()
|
||||
limit = total if output_limit > total or output_limit == -1 else output_limit
|
||||
offset = 0 if output_limit == -1 else offset
|
||||
populated_text = p.getmany(limit, offset) if output_limit != 1 else [p.getn(offset)]
|
||||
return io.NodeOutput(populated_text, p.total(), list(p.placeholder_choices.values()), ui={"value": [offset]})
|
||||
|
||||
# 负面提示词
|
||||
class negativePrompt(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy negative",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("negative", default="", multiline=True, placeholder="Negative"),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(id="output_negative", display_name="negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, negative):
|
||||
return io.NodeOutput(negative)
|
||||
|
||||
# 风格提示词选择器
|
||||
class stylesPromptSelector(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
styles = ["fooocus_styles"]
|
||||
styles_dir = FOOOCUS_STYLES_DIR
|
||||
for file_name in os.listdir(styles_dir):
|
||||
file = os.path.join(styles_dir, file_name)
|
||||
if os.path.isfile(file) and file_name.endswith(".json"):
|
||||
if file_name != "fooocus_styles.json":
|
||||
styles.append(file_name.split(".")[0])
|
||||
|
||||
return io.Schema(
|
||||
node_id="easy stylesSelector",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.Combo.Input("styles", options=styles, default="fooocus_styles"),
|
||||
io.String.Input("positive", default="", force_input=True, optional=True),
|
||||
io.String.Input("negative", default="", force_input=True, optional=True),
|
||||
io.Custom(io_type="EASY_PROMPT_STYLES").Input("select_styles", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(id="output_positive", display_name="positive"),
|
||||
io.String.Output(id="output_negative", display_name="negative"),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.extra_pnginfo,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, styles, positive='', negative='', select_styles=None, **kwargs):
|
||||
values = []
|
||||
all_styles = {}
|
||||
positive_prompt, negative_prompt = '', negative
|
||||
fooocus_custom_dir = os.path.join(FOOOCUS_STYLES_DIR, 'fooocus_styles.json')
|
||||
if styles == "fooocus_styles" and not os.path.exists(fooocus_custom_dir):
|
||||
file = os.path.join(RESOURCES_DIR, styles + '.json')
|
||||
else:
|
||||
file = os.path.join(FOOOCUS_STYLES_DIR, styles + '.json')
|
||||
f = open(file, 'r', encoding='utf-8')
|
||||
data = json.load(f)
|
||||
f.close()
|
||||
for d in data:
|
||||
all_styles[d['name']] = d
|
||||
# if my_unique_id in prompt:
|
||||
# if prompt[my_unique_id]["inputs"]['select_styles']:
|
||||
# values = prompt[my_unique_id]["inputs"]['select_styles'].split(',')
|
||||
|
||||
if isinstance(select_styles, str):
|
||||
values = select_styles.split(',')
|
||||
else:
|
||||
values = select_styles if select_styles else []
|
||||
|
||||
has_prompt = False
|
||||
if len(values) == 0:
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
for index, val in enumerate(values):
|
||||
if val not in all_styles:
|
||||
continue
|
||||
if 'prompt' in all_styles[val]:
|
||||
if "{prompt}" in all_styles[val]['prompt'] and has_prompt == False:
|
||||
positive_prompt = all_styles[val]['prompt'].replace('{prompt}', positive)
|
||||
has_prompt = True
|
||||
elif "{prompt}" in all_styles[val]['prompt']:
|
||||
positive_prompt += ', ' + all_styles[val]['prompt'].replace(', {prompt}', '').replace('{prompt}', '')
|
||||
else:
|
||||
positive_prompt = all_styles[val]['prompt'] if positive_prompt == '' else positive_prompt + ', ' + all_styles[val]['prompt']
|
||||
if 'negative_prompt' in all_styles[val]:
|
||||
negative_prompt += ', ' + all_styles[val]['negative_prompt'] if negative_prompt else all_styles[val]['negative_prompt']
|
||||
|
||||
if has_prompt == False and positive:
|
||||
positive_prompt = positive + positive_prompt + ', '
|
||||
|
||||
return io.NodeOutput(positive_prompt, negative_prompt)
|
||||
|
||||
#prompt
|
||||
class prompt(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy prompt",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("text", default="", multiline=True, placeholder="Prompt"),
|
||||
io.Combo.Input("prefix", options=["Select the prefix add to the text"] + PROMPT_TEMPLATE["prefix"], default="Select the prefix add to the text"),
|
||||
io.Combo.Input("subject", options=["👤Select the subject add to the text"] + PROMPT_TEMPLATE["subject"], default="👤Select the subject add to the text"),
|
||||
io.Combo.Input("action", options=["🎬Select the action add to the text"] + PROMPT_TEMPLATE["action"], default="🎬Select the action add to the text"),
|
||||
io.Combo.Input("clothes", options=["👚Select the clothes add to the text"] + PROMPT_TEMPLATE["clothes"], default="👚Select the clothes add to the text"),
|
||||
io.Combo.Input("environment", options=["☀️Select the illumination environment add to the text"] + PROMPT_TEMPLATE["environment"], default="☀️Select the illumination environment add to the text"),
|
||||
io.Combo.Input("background", options=["🎞️Select the background add to the text"] + PROMPT_TEMPLATE["background"], default="🎞️Select the background add to the text"),
|
||||
io.Combo.Input("nsfw", options=["🔞Select the nsfw add to the text"] + PROMPT_TEMPLATE["nsfw"], default="🔞️Select the nsfw add to the text"),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output("prompt"),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.extra_pnginfo,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, text, **kwargs):
|
||||
return io.NodeOutput(text)
|
||||
|
||||
#promptList
|
||||
class promptList(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy promptList",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("prompt_1", multiline=True, default=""),
|
||||
io.String.Input("prompt_2", multiline=True, default=""),
|
||||
io.String.Input("prompt_3", multiline=True, default=""),
|
||||
io.String.Input("prompt_4", multiline=True, default=""),
|
||||
io.String.Input("prompt_5", multiline=True, default=""),
|
||||
io.Custom(io_type="LIST").Input("optional_prompt_list", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Custom(io_type="LIST").Output("prompt_list"),
|
||||
io.String.Output("prompt_strings", is_output_list=True),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, prompt_1="", prompt_2="", prompt_3="", prompt_4="", prompt_5="", optional_prompt_list=None, **kwargs):
|
||||
prompts = []
|
||||
|
||||
if optional_prompt_list:
|
||||
for l in optional_prompt_list:
|
||||
prompts.append(l)
|
||||
|
||||
# Add individual prompts
|
||||
for p in [prompt_1, prompt_2, prompt_3, prompt_4, prompt_5]:
|
||||
if isinstance(p, str) and p != '':
|
||||
prompts.append(p)
|
||||
|
||||
return io.NodeOutput(prompts, prompts)
|
||||
|
||||
#promptLine
|
||||
class promptLine(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy promptLine",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("prompt", multiline=True, default="text"),
|
||||
io.Int.Input("start_index", default=0, min=0, max=9999),
|
||||
io.Int.Input("max_rows", default=1000, min=1, max=9999),
|
||||
io.Boolean.Input("remove_empty_lines", default=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output("STRING", is_output_list=True),
|
||||
io.Combo.Output("COMBO", is_output_list=True),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, prompt, start_index, max_rows, remove_empty_lines=True, **kwargs):
|
||||
lines = prompt.split('\n')
|
||||
|
||||
if remove_empty_lines:
|
||||
lines = [line for line in lines if line.strip()]
|
||||
|
||||
start_index = max(0, min(start_index, len(lines) - 1))
|
||||
|
||||
end_index = min(start_index + max_rows, len(lines))
|
||||
|
||||
rows = lines[start_index:end_index]
|
||||
|
||||
return io.NodeOutput(rows, rows)
|
||||
|
||||
import comfy.utils
|
||||
from server import PromptServer
|
||||
from ..libs.messages import MessageCancelled, Message
|
||||
class promptAwait(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy promptAwait",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.AnyType.Input("now"),
|
||||
io.String.Input("prompt", multiline=True, default="", placeholder="Enter a prompt or use voice to enter to text"),
|
||||
io.Custom(io_type="EASY_PROMPT_AWAIT_BAR").Input("toolbar"),
|
||||
io.AnyType.Input("prev", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.AnyType.Output(id="output", display_name="output"),
|
||||
io.String.Output(id="output_prompt", display_name="prompt"),
|
||||
io.Boolean.Output("continue"),
|
||||
io.Int.Output("seed"),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.unique_id,
|
||||
io.Hidden.extra_pnginfo,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, now, prompt, toolbar, prev=None, **kwargs):
|
||||
id = cls.hidden.unique_id
|
||||
id = id.split('.')[len(id.split('.')) - 1] if "." in id else id
|
||||
if ":" in id:
|
||||
id = id.split(":")[0]
|
||||
pbar = comfy.utils.ProgressBar(100)
|
||||
pbar.update_absolute(30)
|
||||
PromptServer.instance.send_sync('easyuse_prompt_await', {"id": id})
|
||||
try:
|
||||
res = Message.waitForMessage(id, asList=False)
|
||||
if res is None or res == "-1":
|
||||
result = (now, prompt, False, 0)
|
||||
else:
|
||||
input = now if res['select'] == 'now' or prev is None else prev
|
||||
result = (input, res['prompt'], False if res['result'] == -1 else True, res['seed'] if res['unlock'] else res['last_seed'])
|
||||
pbar.update_absolute(100)
|
||||
return io.NodeOutput(*result)
|
||||
except MessageCancelled:
|
||||
pbar.update_absolute(100)
|
||||
raise comfy.model_management.InterruptProcessingException()
|
||||
|
||||
class promptConcat(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy promptConcat",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("prompt1", multiline=False, default="", force_input=True, optional=True),
|
||||
io.String.Input("prompt2", multiline=False, default="", force_input=True, optional=True),
|
||||
io.String.Input("separator", multiline=False, default="", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output("prompt"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, prompt1="", prompt2="", separator=""):
|
||||
return io.NodeOutput(prompt1 + separator + prompt2)
|
||||
|
||||
class promptReplace(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy promptReplace",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.String.Input("prompt", multiline=True, default="", force_input=True),
|
||||
io.String.Input("find1", multiline=False, default="", optional=True),
|
||||
io.String.Input("replace1", multiline=False, default="", optional=True),
|
||||
io.String.Input("find2", multiline=False, default="", optional=True),
|
||||
io.String.Input("replace2", multiline=False, default="", optional=True),
|
||||
io.String.Input("find3", multiline=False, default="", optional=True),
|
||||
io.String.Input("replace3", multiline=False, default="", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(id="output_prompt",display_name="prompt"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, prompt, find1="", replace1="", find2="", replace2="", find3="", replace3=""):
|
||||
prompt = prompt.replace(find1, replace1)
|
||||
prompt = prompt.replace(find2, replace2)
|
||||
prompt = prompt.replace(find3, replace3)
|
||||
|
||||
return io.NodeOutput(prompt)
|
||||
|
||||
|
||||
# 肖像大师
|
||||
# Created by AI Wiz Art (Stefano Flore)
|
||||
# Version: 2.2
|
||||
# https://stefanoflore.it
|
||||
# https://ai-wiz.art
|
||||
class portraitMaster(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
max_float_value = 1.95
|
||||
prompt_path = os.path.join(RESOURCES_DIR, 'portrait_prompt.json')
|
||||
if not os.path.exists(prompt_path):
|
||||
response = urlopen('https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/main/resources/portrait_prompt.json')
|
||||
temp_prompt = json.loads(response.read())
|
||||
prompt_serialized = json.dumps(temp_prompt, indent=4)
|
||||
with open(prompt_path, "w") as f:
|
||||
f.write(prompt_serialized)
|
||||
del response, temp_prompt
|
||||
# Load local
|
||||
with open(prompt_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
inputs = []
|
||||
# Shot
|
||||
inputs.append(io.Combo.Input("shot", options=['-'] + data['shot_list']))
|
||||
inputs.append(io.Float.Input("shot_weight", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Gender and age
|
||||
inputs.append(io.Combo.Input("gender", options=['-'] + data['gender_list'], default="Woman"))
|
||||
inputs.append(io.Int.Input("age", default=30, min=18, max=90, step=1, display_mode=io.NumberDisplay.slider))
|
||||
# Nationality
|
||||
inputs.append(io.Combo.Input("nationality_1", options=['-'] + data['nationality_list'], default="Chinese"))
|
||||
inputs.append(io.Combo.Input("nationality_2", options=['-'] + data['nationality_list']))
|
||||
inputs.append(io.Float.Input("nationality_mix", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Body
|
||||
inputs.append(io.Combo.Input("body_type", options=['-'] + data['body_type_list']))
|
||||
inputs.append(io.Float.Input("body_type_weight", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Combo.Input("model_pose", options=['-'] + data['model_pose_list']))
|
||||
inputs.append(io.Combo.Input("eyes_color", options=['-'] + data['eyes_color_list']))
|
||||
# Face
|
||||
inputs.append(io.Combo.Input("facial_expression", options=['-'] + data['face_expression_list']))
|
||||
inputs.append(io.Float.Input("facial_expression_weight", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Combo.Input("face_shape", options=['-'] + data['face_shape_list']))
|
||||
inputs.append(io.Float.Input("face_shape_weight", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("facial_asymmetry", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Hair
|
||||
inputs.append(io.Combo.Input("hair_style", options=['-'] + data['hair_style_list']))
|
||||
inputs.append(io.Combo.Input("hair_color", options=['-'] + data['hair_color_list']))
|
||||
inputs.append(io.Float.Input("disheveled", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Combo.Input("beard", options=['-'] + data['beard_list']))
|
||||
# Skin details
|
||||
inputs.append(io.Float.Input("skin_details", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("skin_pores", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("dimples", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("freckles", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("moles", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("skin_imperfections", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("skin_acne", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("tanned_skin", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Eyes
|
||||
inputs.append(io.Float.Input("eyes_details", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("iris_details", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("circular_iris", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
inputs.append(io.Float.Input("circular_pupil", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Light
|
||||
inputs.append(io.Combo.Input("light_type", options=['-'] + data['light_type_list']))
|
||||
inputs.append(io.Combo.Input("light_direction", options=['-'] + data['light_direction_list']))
|
||||
inputs.append(io.Float.Input("light_weight", default=0, step=0.05, min=0, max=max_float_value, display_mode=io.NumberDisplay.slider))
|
||||
# Additional
|
||||
inputs.append(io.Combo.Input("photorealism_improvement", options=["enable", "disable"]))
|
||||
inputs.append(io.String.Input("prompt_start", multiline=True, default="raw photo, (realistic:1.5)"))
|
||||
inputs.append(io.String.Input("prompt_additional", multiline=True, default=""))
|
||||
inputs.append(io.String.Input("prompt_end", multiline=True, default=""))
|
||||
inputs.append(io.String.Input("negative_prompt", multiline=True, default=""))
|
||||
|
||||
return io.Schema(
|
||||
node_id="easy portraitMaster",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.String.Output("positive"),
|
||||
io.String.Output("negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, shot="-", shot_weight=1, gender="-", body_type="-", body_type_weight=0, eyes_color="-",
|
||||
facial_expression="-", facial_expression_weight=0, face_shape="-", face_shape_weight=0,
|
||||
nationality_1="-", nationality_2="-", nationality_mix=0.5, age=30, hair_style="-", hair_color="-",
|
||||
disheveled=0, dimples=0, freckles=0, skin_pores=0, skin_details=0, moles=0, skin_imperfections=0,
|
||||
wrinkles=0, tanned_skin=0, eyes_details=1, iris_details=1, circular_iris=1, circular_pupil=1,
|
||||
facial_asymmetry=0, prompt_additional="", prompt_start="", prompt_end="", light_type="-",
|
||||
light_direction="-", light_weight=0, negative_prompt="", photorealism_improvement="disable", beard="-",
|
||||
model_pose="-", skin_acne=0):
|
||||
|
||||
prompt = []
|
||||
|
||||
if gender == "-":
|
||||
gender = ""
|
||||
else:
|
||||
if age <= 25 and gender == 'Woman':
|
||||
gender = 'girl'
|
||||
if age <= 25 and gender == 'Man':
|
||||
gender = 'boy'
|
||||
gender = " " + gender + " "
|
||||
|
||||
if nationality_1 != '-' and nationality_2 != '-':
|
||||
nationality = f"[{nationality_1}:{nationality_2}:{round(nationality_mix, 2)}]"
|
||||
elif nationality_1 != '-':
|
||||
nationality = nationality_1 + " "
|
||||
elif nationality_2 != '-':
|
||||
nationality = nationality_2 + " "
|
||||
else:
|
||||
nationality = ""
|
||||
|
||||
if prompt_start != "":
|
||||
prompt.append(f"{prompt_start}")
|
||||
|
||||
if shot != "-" and shot_weight > 0:
|
||||
prompt.append(f"({shot}:{round(shot_weight, 2)})")
|
||||
|
||||
prompt.append(f"({nationality}{gender}{round(age)}-years-old:1.5)")
|
||||
|
||||
if body_type != "-" and body_type_weight > 0:
|
||||
prompt.append(f"({body_type}, {body_type} body:{round(body_type_weight, 2)})")
|
||||
|
||||
if model_pose != "-":
|
||||
prompt.append(f"({model_pose}:1.5)")
|
||||
|
||||
if eyes_color != "-":
|
||||
prompt.append(f"({eyes_color} eyes:1.25)")
|
||||
|
||||
if facial_expression != "-" and facial_expression_weight > 0:
|
||||
prompt.append(
|
||||
f"({facial_expression}, {facial_expression} expression:{round(facial_expression_weight, 2)})")
|
||||
|
||||
if face_shape != "-" and face_shape_weight > 0:
|
||||
prompt.append(f"({face_shape} shape face:{round(face_shape_weight, 2)})")
|
||||
|
||||
if hair_style != "-":
|
||||
prompt.append(f"({hair_style} hairstyle:1.25)")
|
||||
|
||||
if hair_color != "-":
|
||||
prompt.append(f"({hair_color} hair:1.25)")
|
||||
|
||||
if beard != "-":
|
||||
prompt.append(f"({beard}:1.15)")
|
||||
|
||||
if disheveled != "-" and disheveled > 0:
|
||||
prompt.append(f"(disheveled:{round(disheveled, 2)})")
|
||||
|
||||
if prompt_additional != "":
|
||||
prompt.append(f"{prompt_additional}")
|
||||
|
||||
if skin_details > 0:
|
||||
prompt.append(f"(skin details, skin texture:{round(skin_details, 2)})")
|
||||
|
||||
if skin_pores > 0:
|
||||
prompt.append(f"(skin pores:{round(skin_pores, 2)})")
|
||||
|
||||
if skin_imperfections > 0:
|
||||
prompt.append(f"(skin imperfections:{round(skin_imperfections, 2)})")
|
||||
|
||||
if skin_acne > 0:
|
||||
prompt.append(f"(acne, skin with acne:{round(skin_acne, 2)})")
|
||||
|
||||
if wrinkles > 0:
|
||||
prompt.append(f"(skin imperfections:{round(wrinkles, 2)})")
|
||||
|
||||
if tanned_skin > 0:
|
||||
prompt.append(f"(tanned skin:{round(tanned_skin, 2)})")
|
||||
|
||||
if dimples > 0:
|
||||
prompt.append(f"(dimples:{round(dimples, 2)})")
|
||||
|
||||
if freckles > 0:
|
||||
prompt.append(f"(freckles:{round(freckles, 2)})")
|
||||
|
||||
if moles > 0:
|
||||
prompt.append(f"(skin pores:{round(moles, 2)})")
|
||||
|
||||
if eyes_details > 0:
|
||||
prompt.append(f"(eyes details:{round(eyes_details, 2)})")
|
||||
|
||||
if iris_details > 0:
|
||||
prompt.append(f"(iris details:{round(iris_details, 2)})")
|
||||
|
||||
if circular_iris > 0:
|
||||
prompt.append(f"(circular iris:{round(circular_iris, 2)})")
|
||||
|
||||
if circular_pupil > 0:
|
||||
prompt.append(f"(circular pupil:{round(circular_pupil, 2)})")
|
||||
|
||||
if facial_asymmetry > 0:
|
||||
prompt.append(f"(facial asymmetry, face asymmetry:{round(facial_asymmetry, 2)})")
|
||||
|
||||
if light_type != '-' and light_weight > 0:
|
||||
if light_direction != '-':
|
||||
prompt.append(f"({light_type} {light_direction}:{round(light_weight, 2)})")
|
||||
else:
|
||||
prompt.append(f"({light_type}:{round(light_weight, 2)})")
|
||||
|
||||
if prompt_end != "":
|
||||
prompt.append(f"{prompt_end}")
|
||||
|
||||
prompt = ", ".join(prompt)
|
||||
prompt = prompt.lower()
|
||||
|
||||
if photorealism_improvement == "enable":
|
||||
prompt = prompt + ", (professional photo, balanced photo, balanced exposure:1.2), (film grain:1.15)"
|
||||
|
||||
if photorealism_improvement == "enable":
|
||||
negative_prompt = negative_prompt + ", (shinny skin, reflections on the skin, skin reflections:1.25)"
|
||||
|
||||
log_node_info("Portrait Master as generate the prompt:", prompt)
|
||||
|
||||
return io.NodeOutput(prompt, negative_prompt)
|
||||
|
||||
# 多角度
|
||||
class multiAngle(io.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="easy multiAngle",
|
||||
category="EasyUse/Prompt",
|
||||
inputs=[
|
||||
io.Custom(io_type="EASY_MULTI_ANGLE").Input("multi_angle", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output("prompt", is_output_list=True),
|
||||
io.Custom(io_type="EASY_MULTI_ANGLE").Output("params"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, multi_angle=None, **kwargs):
|
||||
if multi_angle is None:
|
||||
return io.NodeOutput([""])
|
||||
|
||||
if isinstance(multi_angle, str):
|
||||
try:
|
||||
multi_angle = json.loads(multi_angle)
|
||||
except:
|
||||
raise Exception(f"Invalid multi angle: {multi_angle}")
|
||||
|
||||
prompts = []
|
||||
for angle_data in multi_angle:
|
||||
rotate = angle_data.get("rotate", 0)
|
||||
vertical = angle_data.get("vertical", 0)
|
||||
zoom = angle_data.get("zoom", 5)
|
||||
add_angle_prompt = angle_data.get("add_angle_prompt", True)
|
||||
|
||||
# Validate input ranges
|
||||
rotate = max(0, min(360, int(rotate)))
|
||||
vertical = max(-90, min(90, int(vertical)))
|
||||
zoom = max(0.0, min(10.0, float(zoom)))
|
||||
|
||||
h_angle = rotate % 360
|
||||
|
||||
# Horizontal direction mapping
|
||||
h_suffix = "" if add_angle_prompt else " quarter"
|
||||
if h_angle < 22.5 or h_angle >= 337.5: h_direction = "front view"
|
||||
elif h_angle < 67.5: h_direction = f"front-right{h_suffix} view"
|
||||
elif h_angle < 112.5: h_direction = "right side view"
|
||||
elif h_angle < 157.5: h_direction = f"back-right{h_suffix} view"
|
||||
elif h_angle < 202.5: h_direction = "back view"
|
||||
elif h_angle < 247.5: h_direction = f"back-left{h_suffix} view"
|
||||
elif h_angle < 292.5: h_direction = "left side view"
|
||||
else: h_direction = f"front-left{h_suffix} view"
|
||||
|
||||
# Vertical direction mapping
|
||||
if add_angle_prompt:
|
||||
if vertical == -90:
|
||||
v_direction = "bottom-looking-up perspective, extreme worm's eye view, focus subject bottom"
|
||||
elif vertical < -75:
|
||||
v_direction = "bottom-looking-up perspective, extreme worm's eye view"
|
||||
elif vertical < -45:
|
||||
v_direction = "ultra-low angle"
|
||||
elif vertical < -15:
|
||||
v_direction = "low angle"
|
||||
elif vertical < 15:
|
||||
v_direction = "eye level"
|
||||
elif vertical < 45:
|
||||
v_direction = "high angle"
|
||||
elif vertical < 75:
|
||||
v_direction = "bird's eye view"
|
||||
elif vertical < 90:
|
||||
v_direction = "top-down perspective, looking straight down at the top of the subject"
|
||||
else:
|
||||
v_direction = "top-down perspective, looking straight down at the top of the subject, face not visible, focus on subject head"
|
||||
else:
|
||||
if vertical < -15:
|
||||
v_direction = "low-angle shot"
|
||||
elif vertical < 15:
|
||||
v_direction = "eye-level shot"
|
||||
elif vertical < 45:
|
||||
v_direction = "elevated shot"
|
||||
elif vertical < 75:
|
||||
v_direction = "high-angle shot"
|
||||
elif vertical < 90:
|
||||
v_direction = "top-down perspective, looking straight down at the top of the subject"
|
||||
else:
|
||||
v_direction = "top-down perspective, looking straight down at the top of the subject, face not visible, focus on subject head"
|
||||
|
||||
# Distance/zoom mapping
|
||||
if add_angle_prompt:
|
||||
if zoom < 2: distance = "extreme wide shot"
|
||||
elif zoom < 4: distance = "wide shot"
|
||||
elif zoom < 6: distance = "medium shot"
|
||||
elif zoom < 8: distance = "close-up"
|
||||
else: distance = "extreme close-up"
|
||||
else:
|
||||
if zoom < 2: distance = "extreme wide shot"
|
||||
elif zoom < 4: distance = "wide shot"
|
||||
elif zoom < 6: distance = "medium shot"
|
||||
elif zoom < 8: distance = "close-up"
|
||||
else: distance = "extreme close-up"
|
||||
|
||||
# Build prompt
|
||||
if add_angle_prompt:
|
||||
prompt = f"{h_direction}, {v_direction}, {distance} (horizontal: {rotate}, vertical: {vertical}, zoom: {zoom:.1f})"
|
||||
else:
|
||||
prompt = f"{h_direction} {v_direction} {distance}"
|
||||
|
||||
prompts.append(prompt)
|
||||
|
||||
return io.NodeOutput(prompts, multi_angle)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy positive": positivePrompt,
|
||||
"easy negative": negativePrompt,
|
||||
"easy wildcards": wildcardsPrompt,
|
||||
"easy wildcardsMatrix": wildcardsPromptMatrix,
|
||||
"easy prompt": prompt,
|
||||
"easy promptList": promptList,
|
||||
"easy promptLine": promptLine,
|
||||
"easy promptAwait": promptAwait,
|
||||
"easy promptConcat": promptConcat,
|
||||
"easy promptReplace": promptReplace,
|
||||
"easy stylesSelector": stylesPromptSelector,
|
||||
"easy portraitMaster": portraitMaster,
|
||||
"easy multiAngle": multiAngle,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy positive": "Positive",
|
||||
"easy negative": "Negative",
|
||||
"easy wildcards": "Wildcards",
|
||||
"easy wildcardsMatrix": "Wildcards Matrix",
|
||||
"easy prompt": "Prompt",
|
||||
"easy promptList": "PromptList",
|
||||
"easy promptLine": "PromptLine",
|
||||
"easy promptAwait": "PromptAwait",
|
||||
"easy promptConcat": "PromptConcat",
|
||||
"easy promptReplace": "PromptReplace",
|
||||
"easy stylesSelector": "Styles Selector",
|
||||
"easy portraitMaster": "Portrait Master",
|
||||
"easy multiAngle": "Multi Angle",
|
||||
}
|
||||
1360
custom_nodes/ComfyUI-Easy-Use/py/nodes/samplers.py
Normal file
1360
custom_nodes/ComfyUI-Easy-Use/py/nodes/samplers.py
Normal file
File diff suppressed because it is too large
Load Diff
106
custom_nodes/ComfyUI-Easy-Use/py/nodes/seed.py
Normal file
106
custom_nodes/ComfyUI-Easy-Use/py/nodes/seed.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from ..config import MAX_SEED_NUM
|
||||
import hashlib
|
||||
import random
|
||||
|
||||
class easySeed:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("INT",)
|
||||
RETURN_NAMES = ("seed",)
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Seed"
|
||||
|
||||
def doit(self, seed=0, prompt=None, extra_pnginfo=None, my_unique_id=None):
|
||||
return seed,
|
||||
|
||||
class seedList:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"min_num": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}),
|
||||
"max_num": ("INT", {"default": MAX_SEED_NUM, "max": MAX_SEED_NUM, "min": 0 }),
|
||||
"method": (["random", "increment", "decrement"], {"default": "random"}),
|
||||
"total": ("INT", {"default": 1, "min": 1, "max": 100000}),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM,}),
|
||||
},
|
||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("INT", "INT")
|
||||
RETURN_NAMES = ("seed", "total")
|
||||
FUNCTION = "doit"
|
||||
DESCRIPTION = "Random number seed that can be used in a for loop, by connecting index and easy indexAny node to realize different seed values in the loop."
|
||||
|
||||
CATEGORY = "EasyUse/Seed"
|
||||
|
||||
def doit(self, min_num, max_num, method, total, seed=0, prompt=None, extra_pnginfo=None, my_unique_id=None):
|
||||
random.seed(seed)
|
||||
|
||||
seed_list = []
|
||||
if min_num > max_num:
|
||||
min_num, max_num = max_num, min_num
|
||||
for i in range(total):
|
||||
if method == 'random':
|
||||
s = random.randint(min_num, max_num)
|
||||
elif method == 'increment':
|
||||
s = min_num + i
|
||||
if s > max_num:
|
||||
s = max_num
|
||||
elif method == 'decrement':
|
||||
s = max_num - i
|
||||
if s < min_num:
|
||||
s = min_num
|
||||
seed_list.append(s)
|
||||
return seed_list, total
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(s, seed, **kwargs):
|
||||
m = hashlib.sha256()
|
||||
m.update(seed)
|
||||
return m.digest().hex()
|
||||
|
||||
# 全局随机种
|
||||
class globalSeed:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"value": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}),
|
||||
"mode": ("BOOLEAN", {"default": True, "label_on": "control_before_generate", "label_off": "control_after_generate"}),
|
||||
"action": (["fixed", "increment", "decrement", "randomize",
|
||||
"increment for each node", "decrement for each node", "randomize for each node"], ),
|
||||
"last_seed": ("STRING", {"default": ""}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
FUNCTION = "doit"
|
||||
|
||||
CATEGORY = "EasyUse/Seed"
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
def doit(self, **kwargs):
|
||||
return {}
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy seed": easySeed,
|
||||
"easy seedList": seedList,
|
||||
"easy globalSeed": globalSeed,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy seed": "EasySeed",
|
||||
"easy seedList": "EasySeedList",
|
||||
"easy globalSeed": "EasyGlobalSeed",
|
||||
}
|
||||
142
custom_nodes/ComfyUI-Easy-Use/py/nodes/util.py
Normal file
142
custom_nodes/ComfyUI-Easy-Use/py/nodes/util.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import os
|
||||
import folder_paths
|
||||
from ..libs.utils import AlwaysEqualProxy
|
||||
|
||||
class showLoaderSettingsNames:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"pipe": ("PIPE_LINE",),
|
||||
},
|
||||
"hidden": {
|
||||
"unique_id": "UNIQUE_ID",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO",
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING", "STRING", "STRING",)
|
||||
RETURN_NAMES = ("ckpt_name", "vae_name", "lora_name")
|
||||
|
||||
FUNCTION = "notify"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "EasyUse/Util"
|
||||
|
||||
def notify(self, pipe, names=None, unique_id=None, extra_pnginfo=None):
|
||||
if unique_id and extra_pnginfo and "workflow" in extra_pnginfo:
|
||||
workflow = extra_pnginfo["workflow"]
|
||||
node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id), None)
|
||||
if node:
|
||||
ckpt_name = pipe['loader_settings']['ckpt_name'] if 'ckpt_name' in pipe['loader_settings'] else ''
|
||||
vae_name = pipe['loader_settings']['vae_name'] if 'vae_name' in pipe['loader_settings'] else ''
|
||||
lora_name = pipe['loader_settings']['lora_name'] if 'lora_name' in pipe['loader_settings'] else ''
|
||||
|
||||
if ckpt_name:
|
||||
ckpt_name = os.path.basename(os.path.splitext(ckpt_name)[0])
|
||||
if vae_name:
|
||||
vae_name = os.path.basename(os.path.splitext(vae_name)[0])
|
||||
if lora_name:
|
||||
lora_name = os.path.basename(os.path.splitext(lora_name)[0])
|
||||
|
||||
names = "ckpt_name: " + ckpt_name + '\n' + "vae_name: " + vae_name + '\n' + "lora_name: " + lora_name
|
||||
node["widgets_values"] = names
|
||||
|
||||
return {"ui": {"text": [names]}, "result": (ckpt_name, vae_name, lora_name)}
|
||||
|
||||
class sliderControl:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"mode": (['ipadapter layer weights'],),
|
||||
"model_type": (['sdxl', 'sd1'],),
|
||||
},
|
||||
"hidden": {
|
||||
"prompt": "PROMPT",
|
||||
"my_unique_id": "UNIQUE_ID",
|
||||
"extra_pnginfo": "EXTRA_PNGINFO",
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("layer_weights",)
|
||||
|
||||
FUNCTION = "control"
|
||||
|
||||
CATEGORY = "EasyUse/Util"
|
||||
|
||||
def control(self, mode, model_type, prompt=None, my_unique_id=None, extra_pnginfo=None):
|
||||
values = ''
|
||||
if my_unique_id in prompt:
|
||||
if 'values' in prompt[my_unique_id]["inputs"]:
|
||||
values = prompt[my_unique_id]["inputs"]['values']
|
||||
|
||||
return (values,)
|
||||
|
||||
class setCkptName:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"ckpt_name": (folder_paths.get_filename_list("checkpoints"),),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (AlwaysEqualProxy('*'),)
|
||||
RETURN_NAMES = ("ckpt_name",)
|
||||
FUNCTION = "set_name"
|
||||
CATEGORY = "EasyUse/Util"
|
||||
|
||||
def set_name(self, ckpt_name):
|
||||
return (ckpt_name,)
|
||||
|
||||
class setControlName:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"controlnet_name": (folder_paths.get_filename_list("controlnet"),),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (AlwaysEqualProxy('*'),)
|
||||
RETURN_NAMES = ("controlnet_name",)
|
||||
FUNCTION = "set_name"
|
||||
CATEGORY = "EasyUse/Util"
|
||||
|
||||
def set_name(self, controlnet_name):
|
||||
return (controlnet_name,)
|
||||
|
||||
class setLoraName:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"lora_name": (folder_paths.get_filename_list("loras"),),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (AlwaysEqualProxy('*'),)
|
||||
RETURN_NAMES = ("lora_name",)
|
||||
FUNCTION = "set_name"
|
||||
CATEGORY = "EasyUse/Util"
|
||||
|
||||
def set_name(self, lora_name):
|
||||
return (lora_name,)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy showLoaderSettingsNames": showLoaderSettingsNames,
|
||||
"easy sliderControl": sliderControl,
|
||||
"easy ckptNames": setCkptName,
|
||||
"easy controlnetNames": setControlName,
|
||||
"easy loraNames": setLoraName,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy showLoaderSettingsNames": "Show Loader Settings Names",
|
||||
"easy sliderControl": "Easy Slider Control",
|
||||
"easy ckptNames": "Ckpt Names",
|
||||
"easy controlnetNames": "ControlNet Names",
|
||||
"easy loraNames": "Lora Names",
|
||||
}
|
||||
699
custom_nodes/ComfyUI-Easy-Use/py/nodes/xyplot.py
Normal file
699
custom_nodes/ComfyUI-Easy-Use/py/nodes/xyplot.py
Normal file
@@ -0,0 +1,699 @@
|
||||
import os
|
||||
import json
|
||||
import comfy
|
||||
import folder_paths
|
||||
from ..config import RESOURCES_DIR
|
||||
from ..libs.utils import getMetadata
|
||||
def load_preset(filename):
|
||||
path = os.path.join(RESOURCES_DIR, filename)
|
||||
path = os.path.abspath(path)
|
||||
preset_list = []
|
||||
|
||||
if os.path.exists(path):
|
||||
with open(path, 'r') as file:
|
||||
for line in file:
|
||||
preset_list.append(line.strip())
|
||||
|
||||
return preset_list
|
||||
else:
|
||||
return []
|
||||
def generate_floats(batch_count, first_float, last_float):
|
||||
if batch_count > 1:
|
||||
interval = (last_float - first_float) / (batch_count - 1)
|
||||
values = [str(round(first_float + i * interval, 3)) for i in range(batch_count)]
|
||||
else:
|
||||
values = [str(first_float)] if batch_count == 1 else []
|
||||
return "; ".join(values)
|
||||
|
||||
def generate_ints(batch_count, first_int, last_int):
|
||||
if batch_count > 1:
|
||||
interval = (last_int - first_int) / (batch_count - 1)
|
||||
values = [str(int(first_int + i * interval)) for i in range(batch_count)]
|
||||
else:
|
||||
values = [str(first_int)] if batch_count == 1 else []
|
||||
# values = list(set(values)) # Remove duplicates
|
||||
# values.sort() # Sort in ascending order
|
||||
return "; ".join(values)
|
||||
|
||||
# Seed++ Batch
|
||||
class XYplot_SeedsBatch:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {"required": {
|
||||
"batch_count": ("INT", {"default": 3, "min": 1, "max": 50}), },
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, batch_count):
|
||||
|
||||
axis = "advanced: Seeds++ Batch"
|
||||
xy_values = {"axis": axis, "values": batch_count}
|
||||
return (xy_values,)
|
||||
|
||||
# Step Values
|
||||
class XYplot_Steps:
|
||||
parameters = ["steps", "start_at_step", "end_at_step",]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"target_parameter": (cls.parameters,),
|
||||
"batch_count": ("INT", {"default": 3, "min": 0, "max": 50}),
|
||||
"first_step": ("INT", {"default": 10, "min": 1, "max": 10000}),
|
||||
"last_step": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"first_start_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
|
||||
"last_start_step": ("INT", {"default": 10, "min": 0, "max": 10000}),
|
||||
"first_end_step": ("INT", {"default": 10, "min": 0, "max": 10000}),
|
||||
"last_end_step": ("INT", {"default": 20, "min": 0, "max": 10000}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, target_parameter, batch_count, first_step, last_step, first_start_step, last_start_step,
|
||||
first_end_step, last_end_step,):
|
||||
|
||||
axis, xy_first, xy_last = None, None, None
|
||||
|
||||
if target_parameter == "steps":
|
||||
axis = "advanced: Steps"
|
||||
xy_first = first_step
|
||||
xy_last = last_step
|
||||
elif target_parameter == "start_at_step":
|
||||
axis = "advanced: StartStep"
|
||||
xy_first = first_start_step
|
||||
xy_last = last_start_step
|
||||
elif target_parameter == "end_at_step":
|
||||
axis = "advanced: EndStep"
|
||||
xy_first = first_end_step
|
||||
xy_last = last_end_step
|
||||
|
||||
values = generate_ints(batch_count, xy_first, xy_last)
|
||||
return ({"axis": axis, "values": values},) if values is not None else (None,)
|
||||
|
||||
class XYplot_CFG:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"batch_count": ("INT", {"default": 3, "min": 0, "max": 50}),
|
||||
"first_cfg": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0}),
|
||||
"last_cfg": ("FLOAT", {"default": 9.0, "min": 0.0, "max": 100.0}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, batch_count, first_cfg, last_cfg):
|
||||
axis = "advanced: CFG Scale"
|
||||
values = generate_floats(batch_count, first_cfg, last_cfg)
|
||||
return ({"axis": axis, "values": values},) if values else (None,)
|
||||
|
||||
class XYplot_FluxGuidance:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"batch_count": ("INT", {"default": 3, "min": 0, "max": 50}),
|
||||
"first_guidance": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
|
||||
"last_guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, batch_count, first_guidance, last_guidance):
|
||||
axis = "advanced: Flux Guidance"
|
||||
values = generate_floats(batch_count, first_guidance, last_guidance)
|
||||
return ({"axis": axis, "values": values},) if values else (None,)
|
||||
|
||||
# Step Values
|
||||
class XYplot_Sampler_Scheduler:
|
||||
parameters = ["sampler", "scheduler", "sampler & scheduler"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
samplers = ["None"] + comfy.samplers.KSampler.SAMPLERS
|
||||
schedulers = ["None"] + comfy.samplers.KSampler.SCHEDULERS
|
||||
inputs = {
|
||||
"required": {
|
||||
"target_parameter": (cls.parameters,),
|
||||
"input_count": ("INT", {"default": 1, "min": 1, "max": 30, "step": 1})
|
||||
}
|
||||
}
|
||||
for i in range(1, 30 + 1):
|
||||
inputs["required"][f"sampler_{i}"] = (samplers,)
|
||||
inputs["required"][f"scheduler_{i}"] = (schedulers,)
|
||||
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, target_parameter, input_count, **kwargs):
|
||||
axis, values, = None, None,
|
||||
if target_parameter == "scheduler":
|
||||
axis = "advanced: Scheduler"
|
||||
schedulers = [kwargs.get(f"scheduler_{i}") for i in range(1, input_count + 1)]
|
||||
values = [scheduler for scheduler in schedulers if scheduler != "None"]
|
||||
elif target_parameter == "sampler":
|
||||
axis = "advanced: Sampler"
|
||||
samplers = [kwargs.get(f"sampler_{i}") for i in range(1, input_count + 1)]
|
||||
values = [sampler for sampler in samplers if sampler != "None"]
|
||||
else:
|
||||
axis = "advanced: Sampler&Scheduler"
|
||||
samplers = [kwargs.get(f"sampler_{i}") for i in range(1, input_count + 1)]
|
||||
schedulers = [kwargs.get(f"scheduler_{i}") for i in range(1, input_count + 1)]
|
||||
values = []
|
||||
for sampler, scheduler in zip(samplers, schedulers):
|
||||
sampler = sampler if sampler else 'None'
|
||||
scheduler = scheduler if scheduler else 'None'
|
||||
values.append(sampler +','+ scheduler)
|
||||
values = "; ".join(values)
|
||||
return ({"axis": axis, "values": values},) if values else (None,)
|
||||
|
||||
class XYplot_Denoise:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"batch_count": ("INT", {"default": 3, "min": 0, "max": 50}),
|
||||
"first_denoise": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1}),
|
||||
"last_denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, batch_count, first_denoise, last_denoise):
|
||||
axis = "advanced: Denoise"
|
||||
values = generate_floats(batch_count, first_denoise, last_denoise)
|
||||
return ({"axis": axis, "values": values},) if values else (None,)
|
||||
|
||||
# PromptSR
|
||||
class XYplot_PromptSR:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"required": {
|
||||
"target_prompt": (["positive", "negative"],),
|
||||
"search_txt": ("STRING", {"default": "", "multiline": False}),
|
||||
"replace_all_text": ("BOOLEAN", {"default": False}),
|
||||
"replace_count": ("INT", {"default": 3, "min": 1, "max": 30 - 1}),
|
||||
}
|
||||
}
|
||||
|
||||
# Dynamically add replace_X inputs
|
||||
for i in range(1, 30):
|
||||
replace_key = f"replace_{i}"
|
||||
inputs["required"][replace_key] = ("STRING", {"default": "", "multiline": False, "placeholder": replace_key})
|
||||
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, target_prompt, search_txt, replace_all_text, replace_count, **kwargs):
|
||||
axis = None
|
||||
|
||||
if target_prompt == "positive":
|
||||
axis = "advanced: Positive Prompt S/R"
|
||||
elif target_prompt == "negative":
|
||||
axis = "advanced: Negative Prompt S/R"
|
||||
|
||||
# Create base entry
|
||||
values = [(search_txt, None, replace_all_text)]
|
||||
|
||||
if replace_count > 0:
|
||||
# Append additional entries based on replace_count
|
||||
values.extend([(search_txt, kwargs.get(f"replace_{i+1}"), replace_all_text) for i in range(replace_count)])
|
||||
return ({"axis": axis, "values": values},) if values is not None else (None,)
|
||||
|
||||
# XYPlot Pos Condition
|
||||
class XYplot_Positive_Cond:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"optional": {
|
||||
"positive_1": ("CONDITIONING",),
|
||||
"positive_2": ("CONDITIONING",),
|
||||
"positive_3": ("CONDITIONING",),
|
||||
"positive_4": ("CONDITIONING",),
|
||||
}
|
||||
}
|
||||
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, positive_1=None, positive_2=None, positive_3=None, positive_4=None):
|
||||
axis = "advanced: Pos Condition"
|
||||
values = []
|
||||
cond = []
|
||||
# Create base entry
|
||||
if positive_1 is not None:
|
||||
values.append("0")
|
||||
cond.append(positive_1)
|
||||
if positive_2 is not None:
|
||||
values.append("1")
|
||||
cond.append(positive_2)
|
||||
if positive_3 is not None:
|
||||
values.append("2")
|
||||
cond.append(positive_3)
|
||||
if positive_4 is not None:
|
||||
values.append("3")
|
||||
cond.append(positive_4)
|
||||
|
||||
return ({"axis": axis, "values": values, "cond": cond},) if values is not None else (None,)
|
||||
|
||||
# XYPlot Neg Condition
|
||||
class XYplot_Negative_Cond:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
inputs = {
|
||||
"optional": {
|
||||
"negative_1": ("CONDITIONING",),
|
||||
"negative_2": ("CONDITIONING",),
|
||||
"negative_3": ("CONDITIONING",),
|
||||
"negative_4": ("CONDITIONING",),
|
||||
}
|
||||
}
|
||||
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, negative_1=None, negative_2=None, negative_3=None, negative_4=None):
|
||||
axis = "advanced: Neg Condition"
|
||||
values = []
|
||||
cond = []
|
||||
# Create base entry
|
||||
if negative_1 is not None:
|
||||
values.append(0)
|
||||
cond.append(negative_1)
|
||||
if negative_2 is not None:
|
||||
values.append(1)
|
||||
cond.append(negative_2)
|
||||
if negative_3 is not None:
|
||||
values.append(2)
|
||||
cond.append(negative_3)
|
||||
if negative_4 is not None:
|
||||
values.append(3)
|
||||
cond.append(negative_4)
|
||||
|
||||
return ({"axis": axis, "values": values, "cond": cond},) if values is not None else (None,)
|
||||
|
||||
# XYPlot Pos Condition List
|
||||
class XYplot_Positive_Cond_List:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"positive": ("CONDITIONING",),
|
||||
}
|
||||
}
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, positive):
|
||||
axis = "advanced: Pos Condition"
|
||||
values = []
|
||||
cond = []
|
||||
for index, c in enumerate(positive):
|
||||
values.append(str(index))
|
||||
cond.append(c)
|
||||
|
||||
return ({"axis": axis, "values": values, "cond": cond},) if values is not None else (None,)
|
||||
|
||||
# XYPlot Neg Condition List
|
||||
class XYplot_Negative_Cond_List:
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"negative": ("CONDITIONING",),
|
||||
}
|
||||
}
|
||||
|
||||
INPUT_IS_LIST = True
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, negative):
|
||||
axis = "advanced: Neg Condition"
|
||||
values = []
|
||||
cond = []
|
||||
for index, c in enumerate(negative):
|
||||
values.append(index)
|
||||
cond.append(c)
|
||||
|
||||
return ({"axis": axis, "values": values, "cond": cond},) if values is not None else (None,)
|
||||
|
||||
# XY Plot: ControlNet
|
||||
class XYplot_Control_Net:
|
||||
parameters = ["strength", "start_percent", "end_percent"]
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
def get_file_list(filenames):
|
||||
return [file for file in filenames if file != "put_models_here.txt" and "lllite" not in file]
|
||||
|
||||
return {
|
||||
"required": {
|
||||
"control_net_name": (get_file_list(folder_paths.get_filename_list("controlnet")),),
|
||||
"image": ("IMAGE",),
|
||||
"target_parameter": (cls.parameters,),
|
||||
"batch_count": ("INT", {"default": 3, "min": 1, "max": 30}),
|
||||
"first_strength": ("FLOAT", {"default": 0.0, "min": 0.00, "max": 10.0, "step": 0.01}),
|
||||
"last_strength": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 10.0, "step": 0.01}),
|
||||
"first_start_percent": ("FLOAT", {"default": 0.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
"last_start_percent": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
"first_end_percent": ("FLOAT", {"default": 0.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
"last_end_percent": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
"strength": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 10.0, "step": 0.01}),
|
||||
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.00, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
"optional": {
|
||||
"control_net": ("CONTROL_NET",),
|
||||
},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, control_net_name, image, target_parameter, batch_count, first_strength, last_strength, first_start_percent,
|
||||
last_start_percent, first_end_percent, last_end_percent, strength, start_percent, end_percent, control_net=None):
|
||||
|
||||
axis, = None,
|
||||
|
||||
values = []
|
||||
|
||||
if target_parameter == "strength":
|
||||
axis = "advanced: ControlNetStrength"
|
||||
|
||||
values.append([(control_net_name, image, first_strength, start_percent, end_percent, control_net)])
|
||||
strength_increment = (last_strength - first_strength) / (batch_count - 1) if batch_count > 1 else 0
|
||||
for i in range(1, batch_count - 1):
|
||||
values.append([(control_net_name, image, first_strength + i * strength_increment, start_percent,
|
||||
end_percent, control_net)])
|
||||
if batch_count > 1:
|
||||
values.append([(control_net_name, image, last_strength, start_percent, end_percent, control_net)])
|
||||
|
||||
elif target_parameter == "start_percent":
|
||||
axis = "advanced: ControlNetStart%"
|
||||
|
||||
percent_increment = (last_start_percent - first_start_percent) / (batch_count - 1) if batch_count > 1 else 0
|
||||
values.append([(control_net_name, image, strength, first_start_percent, end_percent, control_net)])
|
||||
for i in range(1, batch_count - 1):
|
||||
values.append([(control_net_name, image, strength, first_start_percent + i * percent_increment,
|
||||
end_percent, control_net)])
|
||||
|
||||
# Always add the last start_percent if batch_count is more than 1.
|
||||
if batch_count > 1:
|
||||
values.append([(control_net_name, image, strength, last_start_percent, end_percent, control_net)])
|
||||
|
||||
elif target_parameter == "end_percent":
|
||||
axis = "advanced: ControlNetEnd%"
|
||||
|
||||
percent_increment = (last_end_percent - first_end_percent) / (batch_count - 1) if batch_count > 1 else 0
|
||||
values.append([(control_net_name, image, strength, start_percent, first_end_percent, control_net)])
|
||||
for i in range(1, batch_count - 1):
|
||||
values.append([(control_net_name, image, strength, start_percent,
|
||||
first_end_percent + i * percent_increment, control_net)])
|
||||
|
||||
if batch_count > 1:
|
||||
values.append([(control_net_name, image, strength, start_percent, last_end_percent, control_net)])
|
||||
|
||||
|
||||
return ({"axis": axis, "values": values},)
|
||||
|
||||
|
||||
#Checkpoints
|
||||
class XYplot_Checkpoint:
|
||||
|
||||
modes = ["Ckpt Names", "Ckpt Names+ClipSkip", "Ckpt Names+ClipSkip+VAE"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
|
||||
checkpoints = ["None"] + folder_paths.get_filename_list("checkpoints")
|
||||
vaes = ["Baked VAE"] + folder_paths.get_filename_list("vae")
|
||||
|
||||
inputs = {
|
||||
"required": {
|
||||
"input_mode": (cls.modes,),
|
||||
"ckpt_count": ("INT", {"default": 3, "min": 0, "max": 10, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
for i in range(1, 10 + 1):
|
||||
inputs["required"][f"ckpt_name_{i}"] = (checkpoints,)
|
||||
inputs["required"][f"clip_skip_{i}"] = ("INT", {"default": -1, "min": -24, "max": -1, "step": 1})
|
||||
inputs["required"][f"vae_name_{i}"] = (vaes,)
|
||||
|
||||
inputs["optional"] = {
|
||||
"optional_lora_stack": ("LORA_STACK",)
|
||||
}
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, input_mode, ckpt_count, **kwargs):
|
||||
|
||||
axis = "advanced: Checkpoint"
|
||||
|
||||
checkpoints = [kwargs.get(f"ckpt_name_{i}") for i in range(1, ckpt_count + 1)]
|
||||
clip_skips = [kwargs.get(f"clip_skip_{i}") for i in range(1, ckpt_count + 1)]
|
||||
vaes = [kwargs.get(f"vae_name_{i}") for i in range(1, ckpt_count + 1)]
|
||||
|
||||
# Set None for Clip Skip and/or VAE if not correct modes
|
||||
for i in range(ckpt_count):
|
||||
if "ClipSkip" not in input_mode:
|
||||
clip_skips[i] = 'None'
|
||||
if "VAE" not in input_mode:
|
||||
vaes[i] = 'None'
|
||||
|
||||
# Extend each sub-array with lora_stack if it's not None
|
||||
values = [checkpoint.replace(',', '*')+','+str(clip_skip)+','+vae.replace(',', '*') for checkpoint, clip_skip, vae in zip(checkpoints, clip_skips, vaes) if
|
||||
checkpoint != "None"]
|
||||
|
||||
optional_lora_stack = kwargs.get("optional_lora_stack") if "optional_lora_stack" in kwargs else []
|
||||
|
||||
xy_values = {"axis": axis, "values": values, "lora_stack": optional_lora_stack}
|
||||
return (xy_values,)
|
||||
|
||||
#Loras
|
||||
class XYplot_Lora:
|
||||
|
||||
modes = ["Lora Names", "Lora Names+Weights"]
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
loras = ["None"] + folder_paths.get_filename_list("loras")
|
||||
|
||||
inputs = {
|
||||
"required": {
|
||||
"input_mode": (cls.modes,),
|
||||
"lora_count": ("INT", {"default": 3, "min": 0, "max": 10, "step": 1}),
|
||||
"model_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
|
||||
"clip_strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
|
||||
}
|
||||
}
|
||||
|
||||
for i in range(1, 10 + 1):
|
||||
inputs["required"][f"lora_name_{i}"] = (loras,)
|
||||
inputs["required"][f"model_str_{i}"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01})
|
||||
inputs["required"][f"clip_str_{i}"] = ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01})
|
||||
|
||||
inputs["optional"] = {
|
||||
"optional_lora_stack": ("LORA_STACK",),
|
||||
"display_trigger_word": ("BOOLEAN", {"display_trigger_word": True, "tooltip": "Trigger words showing lora model pass through the model's metadata, but not necessarily accurately."}),
|
||||
}
|
||||
return inputs
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def sort_tags_by_frequency(self, meta_tags):
|
||||
if meta_tags is None:
|
||||
return []
|
||||
if "ss_tag_frequency" in meta_tags:
|
||||
meta_tags = meta_tags["ss_tag_frequency"]
|
||||
meta_tags = json.loads(meta_tags)
|
||||
sorted_tags = {}
|
||||
for _, dataset in meta_tags.items():
|
||||
for tag, count in dataset.items():
|
||||
tag = str(tag).strip()
|
||||
if tag in sorted_tags:
|
||||
sorted_tags[tag] = sorted_tags[tag] + count
|
||||
else:
|
||||
sorted_tags[tag] = count
|
||||
# sort tags by training frequency. Most seen tags firsts
|
||||
sorted_tags = dict(sorted(sorted_tags.items(), key=lambda item: item[1], reverse=True))
|
||||
return list(sorted_tags.keys())
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_trigger_words(self, lora_name, display=False):
|
||||
if not display:
|
||||
return ""
|
||||
|
||||
file_path = folder_paths.get_full_path('loras', lora_name)
|
||||
if not file_path:
|
||||
return ''
|
||||
header = getMetadata(file_path)
|
||||
header_json = json.loads(header)
|
||||
meta = header_json["__metadata__"] if "__metadata__" in header_json else None
|
||||
tags = self.sort_tags_by_frequency(meta)
|
||||
return ' '+ tags[0] if len(tags) > 0 else ''
|
||||
def xy_value(self, input_mode, lora_count, model_strength, clip_strength, display_trigger_words=True, **kwargs):
|
||||
|
||||
axis = "advanced: Lora"
|
||||
# Extract values from kwargs
|
||||
loras = [kwargs.get(f"lora_name_{i}") for i in range(1, lora_count + 1)]
|
||||
model_strs = [kwargs.get(f"model_str_{i}", model_strength) for i in range(1, lora_count + 1)]
|
||||
clip_strs = [kwargs.get(f"clip_str_{i}", clip_strength) for i in range(1, lora_count + 1)]
|
||||
|
||||
# Use model_strength and clip_strength for the loras where values are not provided
|
||||
if "Weights" not in input_mode:
|
||||
for i in range(lora_count):
|
||||
model_strs[i] = model_strength
|
||||
clip_strs[i] = clip_strength
|
||||
|
||||
# Extend each sub-array with lora_stack if it's not None
|
||||
values = [lora.replace(',', '*')+','+str(model_str)+','+str(clip_str) +',' + self.get_trigger_words(lora, display_trigger_words) for lora, model_str, clip_str
|
||||
in zip(loras, model_strs, clip_strs) if lora != "None"]
|
||||
|
||||
optional_lora_stack = kwargs.get("optional_lora_stack") if "optional_lora_stack" in kwargs else []
|
||||
|
||||
xy_values = {"axis": axis, "values": values, "lora_stack": optional_lora_stack}
|
||||
return (xy_values,)
|
||||
|
||||
# 模型叠加
|
||||
class XYplot_ModelMergeBlocks:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
checkpoints = folder_paths.get_filename_list("checkpoints")
|
||||
vae = ["Use Model 1", "Use Model 2"] + folder_paths.get_filename_list("vae")
|
||||
|
||||
preset = ["Preset"] # 20
|
||||
preset += load_preset("mmb-preset.txt")
|
||||
preset += load_preset("mmb-preset.custom.txt")
|
||||
|
||||
default_vectors = "1,0,0; \n0,1,0; \n0,0,1; \n1,1,0; \n1,0,1; \n0,1,1; "
|
||||
return {
|
||||
"required": {
|
||||
"ckpt_name_1": (checkpoints,),
|
||||
"ckpt_name_2": (checkpoints,),
|
||||
"vae_use": (vae, {"default": "Use Model 1"}),
|
||||
"preset": (preset, {"default": "preset"}),
|
||||
"values": ("STRING", {"default": default_vectors, "multiline": True, "placeholder": 'Support 2 methods:\n\n1.input, middle, out in same line and insert values seperated by "; "\n\n2.model merge block number seperated by ", " in same line and insert values seperated by "; "'}),
|
||||
},
|
||||
"hidden": {"my_unique_id": "UNIQUE_ID"}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("X_Y",)
|
||||
RETURN_NAMES = ("X or Y",)
|
||||
FUNCTION = "xy_value"
|
||||
|
||||
CATEGORY = "EasyUse/XY Inputs"
|
||||
|
||||
def xy_value(self, ckpt_name_1, ckpt_name_2, vae_use, preset, values, my_unique_id=None):
|
||||
|
||||
axis = "advanced: ModelMergeBlocks"
|
||||
if ckpt_name_1 is None:
|
||||
raise Exception("ckpt_name_1 is not found")
|
||||
if ckpt_name_2 is None:
|
||||
raise Exception("ckpt_name_2 is not found")
|
||||
|
||||
models = (ckpt_name_1, ckpt_name_2)
|
||||
|
||||
xy_values = {"axis":axis, "values":values, "models":models, "vae_use": vae_use}
|
||||
return (xy_values,)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"easy XYInputs: Seeds++ Batch": XYplot_SeedsBatch,
|
||||
"easy XYInputs: Steps": XYplot_Steps,
|
||||
"easy XYInputs: CFG Scale": XYplot_CFG,
|
||||
"easy XYInputs: FluxGuidance": XYplot_FluxGuidance,
|
||||
"easy XYInputs: Sampler/Scheduler": XYplot_Sampler_Scheduler,
|
||||
"easy XYInputs: Denoise": XYplot_Denoise,
|
||||
"easy XYInputs: Checkpoint": XYplot_Checkpoint,
|
||||
"easy XYInputs: Lora": XYplot_Lora,
|
||||
"easy XYInputs: ModelMergeBlocks": XYplot_ModelMergeBlocks,
|
||||
"easy XYInputs: PromptSR": XYplot_PromptSR,
|
||||
"easy XYInputs: ControlNet": XYplot_Control_Net,
|
||||
"easy XYInputs: PositiveCond": XYplot_Positive_Cond,
|
||||
"easy XYInputs: PositiveCondList": XYplot_Positive_Cond_List,
|
||||
"easy XYInputs: NegativeCond": XYplot_Negative_Cond,
|
||||
"easy XYInputs: NegativeCondList": XYplot_Negative_Cond_List,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"easy XYInputs: Seeds++ Batch": "XY Inputs: Seeds++ Batch //EasyUse",
|
||||
"easy XYInputs: Steps": "XY Inputs: Steps //EasyUse",
|
||||
"easy XYInputs: CFG Scale": "XY Inputs: CFG Scale //EasyUse",
|
||||
"easy XYInputs: FluxGuidance": "XY Inputs: Flux Guidance //EasyUse",
|
||||
"easy XYInputs: Sampler/Scheduler": "XY Inputs: Sampler/Scheduler //EasyUse",
|
||||
"easy XYInputs: Denoise": "XY Inputs: Denoise //EasyUse",
|
||||
"easy XYInputs: Checkpoint": "XY Inputs: Checkpoint //EasyUse",
|
||||
"easy XYInputs: Lora": "XY Inputs: Lora //EasyUse",
|
||||
"easy XYInputs: ModelMergeBlocks": "XY Inputs: ModelMergeBlocks //EasyUse",
|
||||
"easy XYInputs: PromptSR": "XY Inputs: PromptSR //EasyUse",
|
||||
"easy XYInputs: ControlNet": "XY Inputs: Controlnet //EasyUse",
|
||||
"easy XYInputs: PositiveCond": "XY Inputs: PosCond //EasyUse",
|
||||
"easy XYInputs: PositiveCondList": "XY Inputs: PosCondList //EasyUse",
|
||||
"easy XYInputs: NegativeCond": "XY Inputs: NegCond //EasyUse",
|
||||
"easy XYInputs: NegativeCondList": "XY Inputs: NegCondList //EasyUse",
|
||||
}
|
||||
Reference in New Issue
Block a user