Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,246 @@
|
||||
"""
|
||||
Tests a common workflow for UltimateSDUpscale.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import pathlib
|
||||
import torch
|
||||
|
||||
from setup_utils import execute
|
||||
from tensor_utils import img_tensor_mae, blur
|
||||
from io_utils import save_image, load_image
|
||||
from configs import DirectoryConfig
|
||||
from fixtures_images import EXT
|
||||
|
||||
# Image file names
|
||||
CATEGORY = pathlib.Path(pathlib.Path(__file__).stem.removeprefix("test_"))
|
||||
IMAGE_1 = CATEGORY / ("main1_sd15_upscaled" + EXT)
|
||||
IMAGE_2 = CATEGORY / ("main2_sd15_upscaled" + EXT)
|
||||
NO_UPSCALE_IMAGE_1 = CATEGORY / ("main1_sd15_upscaled_no_upscale" + EXT)
|
||||
NO_UPSCALE_IMAGE_2 = CATEGORY / ("main2_sd15_upscaled_no_upscale" + EXT)
|
||||
CUSTOM_SAMPLER_IMAGE_1 = CATEGORY / ("main1_sd15_upscaled_custom_sampler" + EXT)
|
||||
CUSTOM_SAMPLER_IMAGE_2 = CATEGORY / ("main2_sd15_upscaled_custom_sampler" + EXT)
|
||||
|
||||
|
||||
class TestMainWorkflow:
|
||||
"""Integration tests for the main upscaling workflow."""
|
||||
|
||||
def test_upscale(
|
||||
self,
|
||||
base_image,
|
||||
loaded_checkpoint,
|
||||
upscale_model,
|
||||
node_classes,
|
||||
seed,
|
||||
test_dirs: DirectoryConfig,
|
||||
):
|
||||
"""Generate upscaled images using standard workflow."""
|
||||
image, positive, negative = base_image
|
||||
model, clip, vae = loaded_checkpoint
|
||||
|
||||
with torch.inference_mode():
|
||||
usdu = node_classes["UltimateSDUpscale"]
|
||||
(upscaled,) = usdu().upscale(
|
||||
image=image,
|
||||
model=model,
|
||||
positive=positive,
|
||||
negative=negative,
|
||||
vae=vae,
|
||||
upscale_by=2.00000004, # Test small float difference doesn't add extra tiles
|
||||
seed=seed,
|
||||
steps=10,
|
||||
cfg=8,
|
||||
sampler_name="euler",
|
||||
scheduler="normal",
|
||||
denoise=0.2,
|
||||
upscale_model=upscale_model,
|
||||
mode_type="Chess",
|
||||
tile_width=512,
|
||||
tile_height=512,
|
||||
mask_blur=8,
|
||||
tile_padding=32,
|
||||
seam_fix_mode="None",
|
||||
seam_fix_denoise=1.0,
|
||||
seam_fix_width=64,
|
||||
seam_fix_mask_blur=8,
|
||||
seam_fix_padding=16,
|
||||
force_uniform_tiles=True,
|
||||
tiled_decode=False,
|
||||
)
|
||||
# Save images
|
||||
sample_dir = test_dirs.sample_images
|
||||
upscaled_img1_path = sample_dir / IMAGE_1
|
||||
upscaled_img2_path = sample_dir / IMAGE_2
|
||||
save_image(upscaled[0], upscaled_img1_path)
|
||||
save_image(upscaled[1], upscaled_img2_path)
|
||||
# Load to account for compression
|
||||
upscaled = torch.cat(
|
||||
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
|
||||
)
|
||||
# Verify results
|
||||
logger = logging.getLogger("test_upscale")
|
||||
test_image_dir = test_dirs.test_images
|
||||
im1_upscaled = upscaled[0]
|
||||
im2_upscaled = upscaled[1]
|
||||
|
||||
test_im1_upscaled = load_image(test_image_dir / IMAGE_1)
|
||||
test_im2_upscaled = load_image(test_image_dir / IMAGE_2)
|
||||
|
||||
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
|
||||
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
|
||||
|
||||
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
|
||||
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
|
||||
assert diff1 < 0.05, "Upscaled Image 1 doesn't match its test image."
|
||||
assert diff2 < 0.05, "Upscaled Image 2 doesn't match its test image."
|
||||
|
||||
def test_upscale_no_upscale(
|
||||
self,
|
||||
base_image,
|
||||
loaded_checkpoint,
|
||||
node_classes,
|
||||
seed,
|
||||
test_dirs: DirectoryConfig,
|
||||
):
|
||||
"""Generate upscaled images using standard workflow using the no upscale node."""
|
||||
image, positive, negative = base_image
|
||||
model, clip, vae = loaded_checkpoint
|
||||
(image,) = execute(
|
||||
node_classes["ImageScaleBy"],
|
||||
image=image,
|
||||
upscale_method="lanczos",
|
||||
scale_by=2.0,
|
||||
)
|
||||
|
||||
with torch.inference_mode():
|
||||
usdu = node_classes["UltimateSDUpscaleNoUpscale"]
|
||||
(upscaled,) = usdu().upscale(
|
||||
upscaled_image=image,
|
||||
model=model,
|
||||
positive=positive,
|
||||
negative=negative,
|
||||
vae=vae,
|
||||
seed=seed,
|
||||
steps=10,
|
||||
cfg=8,
|
||||
sampler_name="euler",
|
||||
scheduler="normal",
|
||||
denoise=0.2,
|
||||
mode_type="Chess",
|
||||
tile_width=512,
|
||||
tile_height=512,
|
||||
mask_blur=8,
|
||||
tile_padding=32,
|
||||
seam_fix_mode="None",
|
||||
seam_fix_denoise=1.0,
|
||||
seam_fix_width=64,
|
||||
seam_fix_mask_blur=8,
|
||||
seam_fix_padding=16,
|
||||
force_uniform_tiles=True,
|
||||
tiled_decode=False,
|
||||
)
|
||||
# Save images
|
||||
sample_dir = test_dirs.sample_images
|
||||
upscaled_img1_path = sample_dir / NO_UPSCALE_IMAGE_1
|
||||
upscaled_img2_path = sample_dir / NO_UPSCALE_IMAGE_2
|
||||
save_image(upscaled[0], upscaled_img1_path)
|
||||
save_image(upscaled[1], upscaled_img2_path)
|
||||
# Load to account for compression
|
||||
upscaled = torch.cat(
|
||||
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
|
||||
)
|
||||
# Verify results
|
||||
logger = logging.getLogger("test_upscale_no_upscale")
|
||||
test_image_dir = test_dirs.test_images
|
||||
im1_upscaled = upscaled[0]
|
||||
im2_upscaled = upscaled[1]
|
||||
|
||||
test_im1_upscaled = load_image(test_image_dir / NO_UPSCALE_IMAGE_1)
|
||||
test_im2_upscaled = load_image(test_image_dir / NO_UPSCALE_IMAGE_2)
|
||||
|
||||
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
|
||||
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
|
||||
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
|
||||
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
|
||||
assert diff1 < 0.05, "No Upscale Image 1 doesn't match its test image."
|
||||
assert diff2 < 0.05, "No Upscale Image 2 doesn't match its test image."
|
||||
|
||||
def test_upscale_with_custom_sampler(
|
||||
self,
|
||||
base_image,
|
||||
loaded_checkpoint,
|
||||
upscale_model,
|
||||
node_classes,
|
||||
seed,
|
||||
test_dirs: DirectoryConfig,
|
||||
):
|
||||
"""Generate upscaled images using standard workflow using the custom sampler node."""
|
||||
image, positive, negative = base_image
|
||||
model, clip, vae = loaded_checkpoint
|
||||
|
||||
with torch.inference_mode():
|
||||
# Setup custom scheduler and sampler
|
||||
custom_scheduler = node_classes["KarrasScheduler"]
|
||||
(sigmas,) = execute(custom_scheduler, 20, 14.614642, 0.0291675, 7.0)
|
||||
(_, sigmas) = execute(node_classes["SplitSigmasDenoise"], sigmas, 0.15)
|
||||
|
||||
custom_sampler = node_classes["KSamplerSelect"]
|
||||
(sampler,) = execute(custom_sampler, "dpmpp_2m")
|
||||
|
||||
# Run upscale
|
||||
usdu = node_classes["UltimateSDUpscaleCustomSample"]
|
||||
(upscaled,) = usdu().upscale(
|
||||
image=image,
|
||||
model=model,
|
||||
positive=positive,
|
||||
negative=negative,
|
||||
vae=vae,
|
||||
upscale_by=2.0,
|
||||
seed=seed,
|
||||
steps=10,
|
||||
cfg=8,
|
||||
sampler_name="euler",
|
||||
scheduler="normal",
|
||||
denoise=0.2,
|
||||
upscale_model=upscale_model,
|
||||
mode_type="Chess",
|
||||
tile_width=512,
|
||||
tile_height=512,
|
||||
mask_blur=8,
|
||||
tile_padding=32,
|
||||
seam_fix_mode="None",
|
||||
seam_fix_denoise=1.0,
|
||||
seam_fix_width=64,
|
||||
seam_fix_mask_blur=8,
|
||||
seam_fix_padding=16,
|
||||
force_uniform_tiles=True,
|
||||
tiled_decode=False,
|
||||
custom_sampler=sampler,
|
||||
custom_sigmas=sigmas,
|
||||
)
|
||||
# Save images
|
||||
sample_dir = test_dirs.sample_images
|
||||
upscaled_img1_path = sample_dir / CUSTOM_SAMPLER_IMAGE_1
|
||||
upscaled_img2_path = sample_dir / CUSTOM_SAMPLER_IMAGE_2
|
||||
save_image(upscaled[0], upscaled_img1_path)
|
||||
save_image(upscaled[1], upscaled_img2_path)
|
||||
# Load to account for compression
|
||||
upscaled = torch.cat(
|
||||
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
|
||||
)
|
||||
# Verify results
|
||||
logger = logging.getLogger("test_upscale_with_custom_sampler")
|
||||
test_image_dir = test_dirs.test_images
|
||||
im1_upscaled = upscaled[0]
|
||||
im2_upscaled = upscaled[1]
|
||||
|
||||
test_im1_upscaled = load_image(test_image_dir / CUSTOM_SAMPLER_IMAGE_1)
|
||||
test_im2_upscaled = load_image(test_image_dir / CUSTOM_SAMPLER_IMAGE_2)
|
||||
|
||||
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
|
||||
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
|
||||
|
||||
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
|
||||
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
|
||||
assert diff1 < 0.05, "Upscaled Image 1 doesn't match its test image."
|
||||
assert diff2 < 0.05, "Upscaled Image 2 doesn't match its test image."
|
||||
Reference in New Issue
Block a user