Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled

Includes 30 custom nodes committed directly, 7 Civitai-exclusive
loras stored via Git LFS, and a setup script that installs all
dependencies and downloads HuggingFace-hosted models on vast.ai.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-09 00:55:26 +00:00
parent 2b70ab9ad0
commit f09734b0ee
2274 changed files with 748556 additions and 3 deletions

View File

@@ -0,0 +1 @@
sample_images/

View File

@@ -0,0 +1,49 @@
# Running Tests
This directory contains tests for ComfyUI_UltimateSDUpscale.
## Prerequisites
- These tests assume that ComfyUI is installed using a virtual environment
- Activate the ComfyUI virtual environment before running tests
- The checkpoint `v1-5-pruned-emaonly-fp16.safetensors` is available
- The upscale model `4x-UltraSharp.pth` is available
## Running Tests
### Using the convenience scripts (works from repo root or test directory):
**Linux/Mac (Bash):**
```bash
./test/run_tests.sh # From repo root
./run_tests.sh # From test directory
```
run_tests.sh will forward all arguments into pytest.
### Using pytest directly (must be in test directory):
```bash
cd test
pytest # Run all tests
pytest -v # Verbose
```
### Common pytest options:
- `-v` - Verbose output
- `-s` - Show print statements
- `--log-cli-level=INFO` - Show info-level logs
- `-k PATTERN` - Run tests matching pattern
- `--lf` - Run last failed tests
## Test Structure
- `conftest.py` - Pytest configuration, fixtures, and path setup
- `sample_images/` - Generated test images for visual inspection
- `test_images/` - Reference images used as inputs or expected outputs
## Troubleshooting
If you encounter import errors:
1. Make sure you're running from the `test/` directory
2. Verify the virtual environment is activated

View File

@@ -0,0 +1,9 @@
import pathlib
class DirectoryConfig:
"""Helper class for test directories."""
def __init__(self, test_images: pathlib.Path, sample_images: pathlib.Path):
self.test_images = test_images
self.sample_images = sample_images

View File

@@ -0,0 +1,174 @@
"""
Setup for the ComfyUI engine and shared test fixtures.
"""
import os
import sys
from pathlib import Path
import pytest
import asyncio
import logging
from setup_utils import SilenceLogs, execute
from hf_downloader import download_test_images
from configs import DirectoryConfig
# Because of manipulations to sys.path, non-packaged imports should be delayed to avoid import issues
#
# # Configuration
#
TEST_CHECKPOINT = "v1-5-pruned-emaonly-fp16.safetensors"
TEST_UPSCALE_MODEL = "4x-UltraSharp.pth"
SAMPLE_IMAGE_SUBDIR = "sample_images"
TEST_IMAGE_SUBDIR = "test_images"
# conftest.py is in repo_root/test/ directory
REPO_ROOT = Path(__file__).parent.parent.resolve()
COMFYUI_ROOT = REPO_ROOT.parent.parent.resolve()
# Make sure the repo root is in sys.path for imports
# Ensure submodule root is in path for test imports
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
# Ensure ComfyUI path is set up
if str(COMFYUI_ROOT) not in sys.path:
sys.path.insert(0, str(COMFYUI_ROOT))
# Session scoped fixtures
from fixtures_images import base_image
def pytest_configure(config):
"""Called before test collection begins."""
# Download test images
download_test_images(
repo_id="ssitu/ultimatesdupscale_test",
save_dir=(REPO_ROOT / "test" / "test_images").resolve(),
repo_folder="test_images",
)
from comfy.cli_args import args
# args.cpu = True # Force CPU mode for tests
# args.force_fp16 = True # Force float16 mode for tests
args.disable_all_custom_nodes = True
# Assumes the name of the custom node folder is ComfyUI_UltimateSDUpscale
args.whitelist_custom_nodes = ["ComfyUI_UltimateSDUpscale"]
#
# # Path Setup
#
def _setup_comfyui_paths():
"""Configure ComfyUI folder paths for testing."""
# Ensure modules containing a utils.py are NOT in sys.path
# The comfy directory must be removed to prevent comfy/utils.py from shadowing
# ComfyUI's utils/ package directory when we import utils.extra_config
to_remove = [
str(COMFYUI_ROOT / "comfy"),
]
for path_to_remove in to_remove:
while path_to_remove in sys.path:
sys.path.remove(path_to_remove)
# Ensure ComfyUI is in path
if str(COMFYUI_ROOT) not in sys.path:
sys.path.insert(0, str(COMFYUI_ROOT))
# Apply custom paths
# main.py will trigger a warning that torch was already imported, probably by pytest. Shouldn't be a problem as far as I know.
from main import apply_custom_paths
apply_custom_paths()
#
# # Fixtures
#
@pytest.fixture(scope="session")
def comfyui_initialized():
"""Initialize ComfyUI nodes once per test session."""
from nodes import init_extra_nodes
_setup_comfyui_paths()
async def _init():
with SilenceLogs():
await init_extra_nodes(init_api_nodes=False)
asyncio.run(_init())
yield True
@pytest.fixture(scope="session")
def node_classes(comfyui_initialized):
"""Get ComfyUI node class mappings."""
from nodes import NODE_CLASS_MAPPINGS
return NODE_CLASS_MAPPINGS
@pytest.fixture(scope="session")
def test_checkpoint():
"""Find and return a valid test checkpoint."""
import folder_paths
checkpoints = folder_paths.get_filename_list("checkpoints")
# TODO: Should probably use a hash instead of matching the filename
if TEST_CHECKPOINT not in checkpoints:
pytest.skip(f"No test checkpoint found. Please add {TEST_CHECKPOINT}")
return TEST_CHECKPOINT
@pytest.fixture(scope="session")
def loaded_checkpoint(comfyui_initialized, test_checkpoint, node_classes):
"""Load checkpoint and return (model, clip, vae) tuple."""
import torch
with torch.inference_mode():
CheckpointLoaderSimple = node_classes["CheckpointLoaderSimple"]
model, clip, vae = execute(CheckpointLoaderSimple, test_checkpoint)
return model, clip, vae
@pytest.fixture(scope="session")
def upscale_model(comfyui_initialized, node_classes):
"""Load the first available upscale model."""
import torch
import folder_paths
UpscaleModelLoader = node_classes["UpscaleModelLoader"]
upscale_models = folder_paths.get_filename_list("upscale_models")
# TODO: Should probably use a hash instead of matching the filename
if TEST_UPSCALE_MODEL not in upscale_models:
pytest.skip("No upscale models found")
model_name = upscale_models[0]
with torch.inference_mode():
(model,) = execute(UpscaleModelLoader, model_name)
return model
@pytest.fixture(scope="session")
def test_dirs():
"""Return paths to test and sample image directories."""
test_dir = REPO_ROOT / "test"
test_image_dir = test_dir / TEST_IMAGE_SUBDIR
sample_image_dir = test_dir / SAMPLE_IMAGE_SUBDIR
sample_image_dir.mkdir(exist_ok=True)
return DirectoryConfig(
test_images=test_image_dir,
sample_images=sample_image_dir,
)
@pytest.fixture(scope="session")
def seed():
"""Default seed for reproducible tests."""
return 1

View File

@@ -0,0 +1,70 @@
"""
Fixtures for base images.
"""
import pathlib
import pytest
import torch
from setup_utils import execute
from io_utils import save_image, load_image
from configs import DirectoryConfig
# Image file names
EXT = ".jpg"
CATEGORY = pathlib.Path("base_images")
BASE_IMAGE_1_NAME = "main1_sd15" + EXT
BASE_IMAGE_2_NAME = "main2_sd15" + EXT
# Prepend category path
BASE_IMAGE_1 = CATEGORY / BASE_IMAGE_1_NAME
BASE_IMAGE_2 = CATEGORY / BASE_IMAGE_2_NAME
@pytest.fixture(scope="session")
def base_image(loaded_checkpoint, seed, test_dirs: DirectoryConfig, node_classes):
"""Generate a base image for upscaling tests."""
EmptyLatentImage = node_classes["EmptyLatentImage"]
CLIPTextEncode = node_classes["CLIPTextEncode"]
KSampler = node_classes["KSampler"]
VAEDecode = node_classes["VAEDecode"]
model, clip, vae = loaded_checkpoint
with torch.inference_mode():
(empty_latent,) = execute(EmptyLatentImage, width=512, height=512, batch_size=2)
(positive,) = execute(
CLIPTextEncode,
text="beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
clip=clip,
)
(negative,) = execute(CLIPTextEncode, text="text, watermark", clip=clip)
(samples,) = execute(
KSampler,
model=model,
positive=positive,
negative=negative,
latent_image=empty_latent,
seed=seed,
steps=10,
cfg=8,
sampler_name="dpmpp_2m",
scheduler="karras",
denoise=1.0,
)
(image,) = execute(VAEDecode, samples=samples, vae=vae)
# Save base images
sample_dir = test_dirs.sample_images
base_img1_path = sample_dir / BASE_IMAGE_1
base_img2_path = sample_dir / BASE_IMAGE_2
save_image(image[0:1], base_img1_path)
save_image(image[1:2], base_img2_path)
# Load images back as tensors to account for compression
image = torch.cat([load_image(base_img1_path), load_image(base_img2_path)])
return image, positive, negative

View File

@@ -0,0 +1,85 @@
import logging
import re
import urllib.parse
import urllib.request
from pathlib import Path
logging.basicConfig(level=logging.INFO)
def _fetch_hf_html(repo_id: str, folder_path: str) -> str:
"""Fetch HTML from HuggingFace tree page."""
url = f"https://huggingface.co/datasets/{repo_id}/tree/main/{folder_path}"
with urllib.request.urlopen(url) as response:
return response.read().decode("utf-8")
def list_hf_subfolders(repo_id: str, folder_path: str) -> list[str]:
"""List subfolders in a HuggingFace dataset folder."""
try:
html = _fetch_hf_html(repo_id, folder_path)
pattern = rf'/datasets/{repo_id}/tree/main/({folder_path}/[^"/?]+)'
return sorted(set(re.findall(pattern, html)))
except Exception as e:
logging.error(f"Failed to list subfolders in {folder_path}: {e}")
return []
def list_hf_files(
repo_id: str,
folder_path: str,
extensions: tuple = (".jpg", ".jpeg", ".png", ".webp"),
) -> list[str]:
"""List image files in a HuggingFace dataset folder."""
try:
html = _fetch_hf_html(repo_id, folder_path)
pattern = rf'/datasets/{repo_id}/blob/main/({folder_path}/[^"]+?({"|".join(e for e in extensions)}))'
return [urllib.parse.unquote(match[0]) for match in re.findall(pattern, html)]
except Exception as e:
logging.error(f"Failed to list files in {folder_path}: {e}")
return []
def download_test_images(save_dir: Path, repo_folder: str, repo_id: str) -> Path:
"""Download the test_images/ folder from the HF test dataset repo"""
# Discover all subfolders and collect files
subfolders = list_hf_subfolders(repo_id, repo_folder)
if not subfolders:
logging.warning(f"No subfolders found in {repo_folder}")
return save_dir
all_files = [f for folder in subfolders for f in list_hf_files(repo_id, folder)]
if not all_files:
logging.warning(f"No image files found in {repo_folder}")
return save_dir
logging.info(f"Found {len(all_files)} files from {len(subfolders)} folders")
# Download files, preserving folder structure
save_dir_path = Path(save_dir)
downloaded = 0
skipped = 0
for file_path in all_files:
relative_path = Path(file_path).relative_to(repo_folder)
save_path = save_dir_path / relative_path
if save_path.exists():
logging.info(f"Skipping {relative_path} (already exists)")
skipped += 1
continue
save_path.parent.mkdir(parents=True, exist_ok=True)
url = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{file_path}"
logging.info(f"Downloading {relative_path}...")
urllib.request.urlretrieve(url, save_path)
downloaded += 1
logging.info(f"Downloaded {downloaded} files, skipped {skipped} existing files")
return save_dir_path
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
download_test_images(
repo_id="ssitu/ultimatesdupscale_test",
save_dir=Path("./test/test_images/"),
repo_folder="test_images",
)

View File

@@ -0,0 +1,20 @@
import pathlib
from PIL import Image
import usdu_utils
def save_image(tensor, path: pathlib.Path):
"""The goto function to save a tensor image to the sampled images directory."""
assert tensor.ndim == 3 or (tensor.ndim == 4 and tensor.shape[0] == 1), (
f"Expected a 3D tensor (H, W, C) or (1, H, W, C), got {tensor.ndim=}"
)
if tensor.ndim == 3:
tensor = tensor.unsqueeze(0)
image = usdu_utils.tensor_to_pil(tensor.cpu())
path.parent.mkdir(parents=True, exist_ok=True)
image.save(path, quality=75, optimize=True)
def load_image(path: pathlib.Path, device=None):
"""Load an image from disk and convert it to a tensor."""
return usdu_utils.pil_to_tensor(Image.open(path)).to(device=device)

View File

@@ -0,0 +1,9 @@
[pytest]
# Filter out warnings that are unavoidable or from external libraries
filterwarnings =
# Ignore CUDA compatibility warnings (hardware limitation)
ignore::UserWarning:torch.cuda
# Ignore Swig type warnings from importlib by pytest
ignore:builtin type.*has no __module__ attribute:DeprecationWarning:importlib._bootstrap:488
ignore:builtin type.*has no __module__ attribute:DeprecationWarning:sys:0

View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Can be run from either the repo root or the test directory
# Example usage: sh ./run_tests.sh [additional pytest arguments]
# Get the script directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Determine the test directory
if [[ "$(basename "$SCRIPT_DIR")" == "test" ]]; then
# Script is in test directory
TEST_DIR="$SCRIPT_DIR"
else
# Script is in repo root
TEST_DIR="$SCRIPT_DIR/test"
fi
cd "$TEST_DIR"
python -m pytest "$@"

View File

@@ -0,0 +1,20 @@
import logging
class SilenceLogs:
"""Context manager to temporarily silence logging."""
def __enter__(self):
logging.disable(logging.CRITICAL)
return self
def __exit__(self, exc_type, exc_value, traceback):
logging.disable(logging.NOTSET)
def execute(node, *args, **kwargs):
"""Execute a ComfyUI node, handling both V3 and legacy schemas."""
if hasattr(node, "execute"):
return node.execute(*args, **kwargs)
else:
return getattr(node(), node.FUNCTION)(*args, **kwargs)

View File

@@ -0,0 +1,27 @@
import torchvision.transforms.functional as TF
def img_tensor_mae(tensor1, tensor2):
"""Calculate the mean absolute difference between two image tensors."""
# Remove batch dimensions if present
tensor1 = tensor1.squeeze(0).cpu()
tensor2 = tensor2.squeeze(0).cpu()
if tensor1.shape != tensor2.shape:
raise ValueError(
f"Tensors must have the same shape for comparison. Got {tensor1.shape=} and {tensor2.shape=}."
)
return (tensor1 - tensor2).abs().mean().item()
def blur(tensor, kernel_size=9, sigma=None):
"""Apply Gaussian blur to an image tensor."""
# [1, H, W, C] -> [1, C, H, W]
if tensor.ndim == 4:
tensor = tensor.permute(0, 3, 1, 2)
elif tensor.ndim == 3:
tensor = tensor.permute(2, 0, 1).unsqueeze(0)
else:
raise ValueError(f"Expected a 3D or 4D tensor, got {tensor.ndim=}")
return TF.gaussian_blur(tensor, kernel_size=kernel_size, sigma=sigma).permute( # type: ignore
0, 2, 3, 1
)

View File

@@ -0,0 +1,31 @@
"""
Tests for base image generation.
"""
import logging
from configs import DirectoryConfig
from tensor_utils import img_tensor_mae, blur
from io_utils import load_image
from fixtures_images import BASE_IMAGE_1, BASE_IMAGE_2
def test_base_image_matches_reference(base_image, test_dirs: DirectoryConfig):
"""
Verify generated base images match reference images.
This is just to check if the checkpoint and generation pipeline are as expected for the tests dependent on their behavior.
"""
logger = logging.getLogger("test_base_image_matches_reference")
image, _, _ = base_image
test_image_dir = test_dirs.test_images
im1 = image[0:1]
im2 = image[1:2]
test_im1 = load_image(test_image_dir / BASE_IMAGE_1)
test_im2 = load_image(test_image_dir / BASE_IMAGE_2)
# Reduce high-frequency noise differences with gaussian blur. Using perceptual metrics are probably overkill.
diff1 = img_tensor_mae(blur(im1), blur(test_im1))
diff2 = img_tensor_mae(blur(im2), blur(test_im2))
logger.info(f"Base Image Diff1: {diff1}, Diff2: {diff2}")
assert diff1 < 0.05, "Image 1 does not match its test image."
assert diff2 < 0.05, "Image 2 does not match its test image."

View File

@@ -0,0 +1,96 @@
"""
Test using controlnet in the upscaling workflow.
"""
import logging
import pathlib
import pytest
import torch
from setup_utils import execute
from tensor_utils import img_tensor_mae, blur
from io_utils import save_image, load_image
from configs import DirectoryConfig
from fixtures_images import EXT
CATEGORY = pathlib.Path(pathlib.Path(__file__).stem.removeprefix("test_"))
CONTROLNET_TILE_OUTPUT_IMAGE = "controlnet_tile" + EXT
TEST_CONTROLNET_TILE_MODEL = "control_v11f1e_sd15_tile.pth"
class TestControlNet:
"""Integration tests for the upscaling workflow with ControlNet."""
@pytest.fixture(scope="class")
def controlnet_upscaled_image(
self,
base_image,
loaded_checkpoint,
upscale_model,
node_classes,
seed,
test_dirs,
):
"""Generate upscaled images using ControlNet."""
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
image = image[0:1]
(controlnet_tile_model,) = execute(
node_classes["ControlNetLoader"], TEST_CONTROLNET_TILE_MODEL
)
(positive,) = execute(
node_classes["ControlNetApply"], positive, controlnet_tile_model, image, 1.0
)
with torch.inference_mode():
# Run upscale with ControlNet
usdu = node_classes["UltimateSDUpscale"]
(upscaled,) = usdu().upscale(
image=image,
model=model,
positive=positive,
negative=negative,
vae=vae,
upscale_by=2.0,
seed=seed,
steps=5,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=1.0,
upscale_model=None,
mode_type="Chess",
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=32,
seam_fix_mode="None",
seam_fix_denoise=1.0,
seam_fix_width=64,
seam_fix_mask_blur=8,
seam_fix_padding=16,
force_uniform_tiles=True,
tiled_decode=False,
)
# Save and reload sample image
sample_dir = test_dirs.sample_images
filename = CATEGORY / CONTROLNET_TILE_OUTPUT_IMAGE
save_image(upscaled[0], sample_dir / filename)
upscaled = load_image(sample_dir / filename)
return upscaled
def test_controlnet_upscaled_image_matches_reference(
self, controlnet_upscaled_image, test_dirs: DirectoryConfig
):
"""
Verify ControlNet upscaled images match reference images.
"""
logger = logging.getLogger("test_controlnet_upscaled_image_matches_reference")
test_img_dir = test_dirs.test_images
test_img = load_image(test_img_dir / CATEGORY / CONTROLNET_TILE_OUTPUT_IMAGE)
# Reduce high-frequency noise differences with gaussian blur
diff = img_tensor_mae(blur(controlnet_upscaled_image), blur(test_img))
logger.info(f"ControlNet Upscaled Image Diff: {diff}")
assert diff < 0.05, "ControlNet upscaled image does not match its test image."

View File

@@ -0,0 +1,246 @@
"""
Tests a common workflow for UltimateSDUpscale.
"""
import logging
import pathlib
import torch
from setup_utils import execute
from tensor_utils import img_tensor_mae, blur
from io_utils import save_image, load_image
from configs import DirectoryConfig
from fixtures_images import EXT
# Image file names
CATEGORY = pathlib.Path(pathlib.Path(__file__).stem.removeprefix("test_"))
IMAGE_1 = CATEGORY / ("main1_sd15_upscaled" + EXT)
IMAGE_2 = CATEGORY / ("main2_sd15_upscaled" + EXT)
NO_UPSCALE_IMAGE_1 = CATEGORY / ("main1_sd15_upscaled_no_upscale" + EXT)
NO_UPSCALE_IMAGE_2 = CATEGORY / ("main2_sd15_upscaled_no_upscale" + EXT)
CUSTOM_SAMPLER_IMAGE_1 = CATEGORY / ("main1_sd15_upscaled_custom_sampler" + EXT)
CUSTOM_SAMPLER_IMAGE_2 = CATEGORY / ("main2_sd15_upscaled_custom_sampler" + EXT)
class TestMainWorkflow:
"""Integration tests for the main upscaling workflow."""
def test_upscale(
self,
base_image,
loaded_checkpoint,
upscale_model,
node_classes,
seed,
test_dirs: DirectoryConfig,
):
"""Generate upscaled images using standard workflow."""
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
with torch.inference_mode():
usdu = node_classes["UltimateSDUpscale"]
(upscaled,) = usdu().upscale(
image=image,
model=model,
positive=positive,
negative=negative,
vae=vae,
upscale_by=2.00000004, # Test small float difference doesn't add extra tiles
seed=seed,
steps=10,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=0.2,
upscale_model=upscale_model,
mode_type="Chess",
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=32,
seam_fix_mode="None",
seam_fix_denoise=1.0,
seam_fix_width=64,
seam_fix_mask_blur=8,
seam_fix_padding=16,
force_uniform_tiles=True,
tiled_decode=False,
)
# Save images
sample_dir = test_dirs.sample_images
upscaled_img1_path = sample_dir / IMAGE_1
upscaled_img2_path = sample_dir / IMAGE_2
save_image(upscaled[0], upscaled_img1_path)
save_image(upscaled[1], upscaled_img2_path)
# Load to account for compression
upscaled = torch.cat(
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
)
# Verify results
logger = logging.getLogger("test_upscale")
test_image_dir = test_dirs.test_images
im1_upscaled = upscaled[0]
im2_upscaled = upscaled[1]
test_im1_upscaled = load_image(test_image_dir / IMAGE_1)
test_im2_upscaled = load_image(test_image_dir / IMAGE_2)
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
assert diff1 < 0.05, "Upscaled Image 1 doesn't match its test image."
assert diff2 < 0.05, "Upscaled Image 2 doesn't match its test image."
def test_upscale_no_upscale(
self,
base_image,
loaded_checkpoint,
node_classes,
seed,
test_dirs: DirectoryConfig,
):
"""Generate upscaled images using standard workflow using the no upscale node."""
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
(image,) = execute(
node_classes["ImageScaleBy"],
image=image,
upscale_method="lanczos",
scale_by=2.0,
)
with torch.inference_mode():
usdu = node_classes["UltimateSDUpscaleNoUpscale"]
(upscaled,) = usdu().upscale(
upscaled_image=image,
model=model,
positive=positive,
negative=negative,
vae=vae,
seed=seed,
steps=10,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=0.2,
mode_type="Chess",
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=32,
seam_fix_mode="None",
seam_fix_denoise=1.0,
seam_fix_width=64,
seam_fix_mask_blur=8,
seam_fix_padding=16,
force_uniform_tiles=True,
tiled_decode=False,
)
# Save images
sample_dir = test_dirs.sample_images
upscaled_img1_path = sample_dir / NO_UPSCALE_IMAGE_1
upscaled_img2_path = sample_dir / NO_UPSCALE_IMAGE_2
save_image(upscaled[0], upscaled_img1_path)
save_image(upscaled[1], upscaled_img2_path)
# Load to account for compression
upscaled = torch.cat(
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
)
# Verify results
logger = logging.getLogger("test_upscale_no_upscale")
test_image_dir = test_dirs.test_images
im1_upscaled = upscaled[0]
im2_upscaled = upscaled[1]
test_im1_upscaled = load_image(test_image_dir / NO_UPSCALE_IMAGE_1)
test_im2_upscaled = load_image(test_image_dir / NO_UPSCALE_IMAGE_2)
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
assert diff1 < 0.05, "No Upscale Image 1 doesn't match its test image."
assert diff2 < 0.05, "No Upscale Image 2 doesn't match its test image."
def test_upscale_with_custom_sampler(
self,
base_image,
loaded_checkpoint,
upscale_model,
node_classes,
seed,
test_dirs: DirectoryConfig,
):
"""Generate upscaled images using standard workflow using the custom sampler node."""
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
with torch.inference_mode():
# Setup custom scheduler and sampler
custom_scheduler = node_classes["KarrasScheduler"]
(sigmas,) = execute(custom_scheduler, 20, 14.614642, 0.0291675, 7.0)
(_, sigmas) = execute(node_classes["SplitSigmasDenoise"], sigmas, 0.15)
custom_sampler = node_classes["KSamplerSelect"]
(sampler,) = execute(custom_sampler, "dpmpp_2m")
# Run upscale
usdu = node_classes["UltimateSDUpscaleCustomSample"]
(upscaled,) = usdu().upscale(
image=image,
model=model,
positive=positive,
negative=negative,
vae=vae,
upscale_by=2.0,
seed=seed,
steps=10,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=0.2,
upscale_model=upscale_model,
mode_type="Chess",
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=32,
seam_fix_mode="None",
seam_fix_denoise=1.0,
seam_fix_width=64,
seam_fix_mask_blur=8,
seam_fix_padding=16,
force_uniform_tiles=True,
tiled_decode=False,
custom_sampler=sampler,
custom_sigmas=sigmas,
)
# Save images
sample_dir = test_dirs.sample_images
upscaled_img1_path = sample_dir / CUSTOM_SAMPLER_IMAGE_1
upscaled_img2_path = sample_dir / CUSTOM_SAMPLER_IMAGE_2
save_image(upscaled[0], upscaled_img1_path)
save_image(upscaled[1], upscaled_img2_path)
# Load to account for compression
upscaled = torch.cat(
[load_image(upscaled_img1_path), load_image(upscaled_img2_path)]
)
# Verify results
logger = logging.getLogger("test_upscale_with_custom_sampler")
test_image_dir = test_dirs.test_images
im1_upscaled = upscaled[0]
im2_upscaled = upscaled[1]
test_im1_upscaled = load_image(test_image_dir / CUSTOM_SAMPLER_IMAGE_1)
test_im2_upscaled = load_image(test_image_dir / CUSTOM_SAMPLER_IMAGE_2)
diff1 = img_tensor_mae(blur(im1_upscaled), blur(test_im1_upscaled))
diff2 = img_tensor_mae(blur(im2_upscaled), blur(test_im2_upscaled))
# This tolerance is enough to handle both cpu and gpu as the device, as well as jpg compression differences.
logger.info(f"Diff1: {diff1}, Diff2: {diff2}")
assert diff1 < 0.05, "Upscaled Image 1 doesn't match its test image."
assert diff2 < 0.05, "Upscaled Image 2 doesn't match its test image."

View File

@@ -0,0 +1,69 @@
"""
Test for other settings included in the upscaling nodes.
"""
import logging
import pathlib
import pytest
import torch
from tensor_utils import img_tensor_mae, blur
from io_utils import save_image, load_image
from configs import DirectoryConfig
from fixtures_images import EXT
# Image file names
CATEGORY = pathlib.Path(pathlib.Path(__file__).stem.removeprefix("test_"))
def test_minimal_tile_sizes(
base_image, loaded_checkpoint, node_classes, seed, test_dirs: DirectoryConfig
):
"""Test upscaling with minimal tile sizes."""
filename = "non_uniform_tiles"
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
with torch.inference_mode():
usdu = node_classes["UltimateSDUpscale"]
(upscaled,) = usdu().upscale(
image=image[0:1],
model=model,
positive=positive,
negative=negative,
vae=vae,
upscale_by=1.5,
seed=seed,
steps=5,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=0.15,
upscale_model=None,
mode_type="Chess",
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=8,
seam_fix_mode="None",
seam_fix_denoise=1.0,
seam_fix_width=16,
seam_fix_mask_blur=8,
seam_fix_padding=4,
force_uniform_tiles=False,
tiled_decode=False,
)
# Save and reload sample image
sample_dir = test_dirs.sample_images
filename_path = CATEGORY / (filename + EXT)
save_image(upscaled[0], sample_dir / filename_path)
upscaled = load_image(sample_dir / filename_path)
# Compare with reference
test_image_dir = test_dirs.test_images
test_image = load_image(test_image_dir / filename_path)
diff = img_tensor_mae(blur(upscaled), blur(test_image))
logger = logging.getLogger(__name__)
logger.info(f"{filename} MAE: {diff}")
assert diff < 0.05, f"{filename} output doesn't match reference"

View File

@@ -0,0 +1,134 @@
"""
Tests for different upscaling modes and seam fix modes.
"""
import logging
import pathlib
import pytest
import torch
from tensor_utils import img_tensor_mae, blur
from io_utils import save_image, load_image
from configs import DirectoryConfig
from fixtures_images import EXT
# Image file names
CATEGORY = pathlib.Path(pathlib.Path(__file__).stem.removeprefix("test_"))
def image_name_format(prefix: str, mode: str) -> str:
"""Helper for the image name format for the tests below."""
return f"{prefix}_{mode.lower().replace(' ', '_')}{EXT}"
class TestTilingModes:
def _test_upscale_variant(
self,
base_image,
loaded_checkpoint,
node_classes,
seed,
test_dirs: DirectoryConfig,
mode_type,
seam_fix_mode,
seam_fix_denoise,
filename_prefix,
):
"""Helper method to test upscale variants with different parameters."""
logger = logging.getLogger(f"test_{filename_prefix}")
image, positive, negative = base_image
model, clip, vae = loaded_checkpoint
with torch.inference_mode():
usdu = node_classes["UltimateSDUpscale"]
(upscaled,) = usdu().upscale(
image=image[0:1],
model=model,
positive=positive,
negative=negative,
vae=vae,
upscale_by=2.0,
seed=seed,
steps=3,
cfg=8,
sampler_name="euler",
scheduler="normal",
denoise=0.2,
upscale_model=None,
mode_type=mode_type,
tile_width=512,
tile_height=512,
mask_blur=8,
tile_padding=32,
seam_fix_mode=seam_fix_mode,
seam_fix_denoise=seam_fix_denoise,
seam_fix_width=64,
seam_fix_mask_blur=8,
seam_fix_padding=16,
force_uniform_tiles=True,
tiled_decode=False,
)
# Save and reload sample image
sample_dir = test_dirs.sample_images
filename = CATEGORY / filename_prefix
save_image(upscaled[0], sample_dir / filename)
upscaled = load_image(sample_dir / filename)
# Compare with reference
test_image_dir = test_dirs.test_images
test_image = load_image(test_image_dir / filename)
diff = img_tensor_mae(blur(upscaled), blur(test_image))
logger.info(f"{filename_prefix} MAE: {diff}")
assert diff < 0.05, f"{filename_prefix} output doesn't match reference"
# "Chess" is tested in the main workflow test
@pytest.mark.parametrize("mode_type", ["Linear", "None"])
def test_mode_types(
self,
base_image,
loaded_checkpoint,
node_classes,
seed,
mode_type,
test_dirs: DirectoryConfig,
):
"""Test different tiling mode types."""
filename = image_name_format("mode", mode_type)
self._test_upscale_variant(
base_image,
loaded_checkpoint,
node_classes,
seed,
test_dirs,
mode_type=mode_type,
seam_fix_mode="None",
seam_fix_denoise=1.0,
filename_prefix=filename,
)
@pytest.mark.parametrize(
"seam_fix_mode", ["None", "Band Pass", "Half Tile", "Half Tile + Intersections"]
)
def test_seam_fix_modes(
self,
base_image,
loaded_checkpoint,
node_classes,
seed,
seam_fix_mode,
test_dirs: DirectoryConfig,
):
"""Test different seam fix modes."""
filename = image_name_format("seamfix", seam_fix_mode)
self._test_upscale_variant(
base_image,
loaded_checkpoint,
node_classes,
seed,
test_dirs,
mode_type="None",
seam_fix_mode=seam_fix_mode,
seam_fix_denoise=0.5,
filename_prefix=filename,
)