Files
ComfyUI/setup-vastai.sh
jaidaken 700d6ead21
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Add --upgrade to pip installs in setup script
Ensures packages are updated if already present on vast.ai instances.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 01:58:21 +00:00

182 lines
8.4 KiB
Bash
Executable File

#!/usr/bin/env bash
set -euo pipefail
# ── ComfyUI vast.ai Setup Script ──────────────────────────────
# Run from inside the cloned ComfyUI directory.
# Usage: git clone <repo> && cd ComfyUI && bash setup-vastai.sh
#
# Expects: NVIDIA GPU, Python 3.10+, git, wget
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
MODELS_DIR="$SCRIPT_DIR/models"
# ── Helpers ───────────────────────────────────────────────────
log() { echo -e "\033[1;32m[setup]\033[0m $*"; }
err() { echo -e "\033[1;31m[error]\033[0m $*" >&2; }
dl() {
local url="$1" dest="$2"
if [ -f "$dest" ]; then
log " Already exists: $(basename "$dest")"
return 0
fi
mkdir -p "$(dirname "$dest")"
log " Downloading: $(basename "$dest")"
wget -q --show-progress -O "$dest" "$url"
}
cd "$SCRIPT_DIR"
# ── Git LFS ──────────────────────────────────────────────────
if command -v git-lfs &>/dev/null || git lfs version &>/dev/null; then
log "Pulling LFS files (Civitai loras)..."
git lfs install > /dev/null 2>&1
git lfs pull
else
log "git-lfs not found, installing..."
apt-get update -qq && apt-get install -y -qq git-lfs > /dev/null 2>&1
git lfs install > /dev/null 2>&1
git lfs pull
fi
# ── Python Environment ───────────────────────────────────────
log "Setting up Python environment..."
if [ ! -d "$SCRIPT_DIR/venv" ]; then
python3 -m venv venv
fi
source venv/bin/activate
# Install PyTorch with CUDA
log "Installing PyTorch..."
pip install --quiet --upgrade torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
# Install ComfyUI requirements
log "Installing ComfyUI requirements..."
pip install --quiet --upgrade -r requirements.txt
# ── Custom Node Requirements ─────────────────────────────────
log "Installing custom node requirements..."
for req in custom_nodes/*/requirements.txt; do
[ -f "$req" ] || continue
node_name="$(basename "$(dirname "$req")")"
log " $node_name"
pip install --quiet --upgrade -r "$req" 2>/dev/null || err " Failed some deps in $node_name (may be OK)"
done
# ── Download Models ───────────────────────────────────────────
log "Downloading models from HuggingFace..."
# Checkpoints
log "Checkpoints..."
dl "https://huggingface.co/Comfy-Org/flux1-dev/resolve/main/flux1-dev-fp8.safetensors" \
"$MODELS_DIR/checkpoints/flux1-dev-fp8.safetensors"
# CLIP / Text Encoders
log "CLIP & Text Encoders..."
dl "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/clip_l.safetensors" \
"$MODELS_DIR/clip/clip_l.safetensors"
dl "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors" \
"$MODELS_DIR/clip/t5xxl_fp8_e4m3fn.safetensors"
dl "https://huggingface.co/QuanSun/EVA-CLIP/resolve/main/EVA02_CLIP_L_336_psz14_s6B.pt" \
"$MODELS_DIR/clip/EVA02_CLIP_L_336_psz14_s6B.pt"
# Duplicate clip/text_encoders (same files, different paths)
mkdir -p "$MODELS_DIR/text_encoders"
ln -sf "$MODELS_DIR/clip/clip_l.safetensors" "$MODELS_DIR/text_encoders/clip_l.safetensors"
ln -sf "$MODELS_DIR/clip/t5xxl_fp8_e4m3fn.safetensors" "$MODELS_DIR/text_encoders/t5xxl_fp8_e4m3fn.safetensors"
# CLIP Vision
log "CLIP Vision..."
dl "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors" \
"$MODELS_DIR/clip_vision/sigclip_vision_patch14_384.safetensors"
# ControlNet
log "ControlNet..."
dl "https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro/resolve/main/diffusion_pytorch_model.safetensors" \
"$MODELS_DIR/controlnet/flux_controlnet_pro.safetensors"
# VAE
log "VAE..."
dl "https://huggingface.co/black-forest-labs/FLUX.1-schnell/resolve/main/ae.safetensors" \
"$MODELS_DIR/vae/ae.safetensors"
# Upscale
log "Upscale models..."
dl "https://huggingface.co/uwg/upscaler/resolve/main/ESRGAN/4x_NMKD-Siax_200k.pth" \
"$MODELS_DIR/upscale_models/4x_NMKD-Siax_200k.pth"
# PuLID
log "PuLID..."
dl "https://huggingface.co/guozinan/PuLID/resolve/main/pulid_flux_v0.9.0.safetensors" \
"$MODELS_DIR/pulid/pulid_flux_v0.9.0.safetensors"
# SAM / SAM2
log "SAM models..."
dl "https://huggingface.co/Kijai/sam2-safetensors/resolve/main/sam2_hiera_base_plus.safetensors" \
"$MODELS_DIR/sam2/sam2_hiera_base_plus.safetensors"
dl "https://huggingface.co/datasets/Gourieff/ReActor/resolve/main/models/sams/sam_vit_b_01ec64.pth" \
"$MODELS_DIR/sams/sam_vit_b_01ec64.pth"
# Facexlib
log "Facexlib..."
dl "https://huggingface.co/leonelhs/facexlib/resolve/main/detection_Resnet50_Final.pth" \
"$MODELS_DIR/facexlib/detection_Resnet50_Final.pth"
dl "https://huggingface.co/leonelhs/facexlib/resolve/main/parsing_bisenet.pth" \
"$MODELS_DIR/facexlib/parsing_bisenet.pth"
# InsightFace (antelopev2)
log "InsightFace (antelopev2)..."
mkdir -p "$MODELS_DIR/insightface/models/antelopev2"
dl "https://huggingface.co/DIAMONIK7777/antelopev2/resolve/main/1k3d68.onnx" \
"$MODELS_DIR/insightface/models/antelopev2/1k3d68.onnx"
dl "https://huggingface.co/DIAMONIK7777/antelopev2/resolve/main/2d106det.onnx" \
"$MODELS_DIR/insightface/models/antelopev2/2d106det.onnx"
dl "https://huggingface.co/DIAMONIK7777/antelopev2/resolve/main/genderage.onnx" \
"$MODELS_DIR/insightface/models/antelopev2/genderage.onnx"
dl "https://huggingface.co/DIAMONIK7777/antelopev2/resolve/main/glintr100.onnx" \
"$MODELS_DIR/insightface/models/antelopev2/glintr100.onnx"
dl "https://huggingface.co/DIAMONIK7777/antelopev2/resolve/main/scrfd_10g_bnkps.onnx" \
"$MODELS_DIR/insightface/models/antelopev2/scrfd_10g_bnkps.onnx"
# Ultralytics (YOLO)
log "Ultralytics..."
dl "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt" \
"$MODELS_DIR/ultralytics/face_yolov8m.pt"
dl "https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt" \
"$MODELS_DIR/ultralytics/hand_yolov8s.pt"
dl "https://huggingface.co/Bingsu/adetailer/resolve/main/person_yolov8m-seg.pt" \
"$MODELS_DIR/ultralytics/person_yolov8m-seg.pt"
# USO (LoRA + projector)
log "USO models..."
dl "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/loras/uso-flux1-dit-lora-v1.safetensors" \
"$MODELS_DIR/loras/uso-flux1-dit-lora-v1.safetensors"
dl "https://huggingface.co/Comfy-Org/USO_1.0_Repackaged/resolve/main/split_files/model_patches/uso-flux1-projector-v1.safetensors" \
"$MODELS_DIR/model_patches/uso-flux1-projector-v1.safetensors"
# Florence-2-base (LLM)
log "Florence-2-base..."
mkdir -p "$MODELS_DIR/LLM/Florence-2-base"
FLORENCE_BASE="https://huggingface.co/microsoft/Florence-2-base/resolve/main"
for f in model.safetensors pytorch_model.bin config.json configuration_florence2.py \
modeling_florence2.py preprocessor_config.json processing_florence2.py \
tokenizer.json tokenizer_config.json vocab.json; do
dl "$FLORENCE_BASE/$f" "$MODELS_DIR/LLM/Florence-2-base/$f"
done
# ── Create Symlinks (matching local layout) ──────────────────
log "Creating model symlinks..."
mkdir -p "$MODELS_DIR/diffusion_models/FLUX" "$MODELS_DIR/unet" "$MODELS_DIR/xlabs/controlnets"
ln -sf "$MODELS_DIR/checkpoints/flux1-dev-fp8.safetensors" "$MODELS_DIR/diffusion_models/flux1-dev-fp8.safetensors"
ln -sf "$MODELS_DIR/checkpoints/flux1-dev-fp8.safetensors" "$MODELS_DIR/diffusion_models/FLUX/flux1-dev-fp8.safetensors"
ln -sf "$MODELS_DIR/checkpoints/flux1-dev-fp8.safetensors" "$MODELS_DIR/unet/flux1-dev-fp8.safetensors"
ln -sf "$MODELS_DIR/controlnet/flux_controlnet_pro.safetensors" "$MODELS_DIR/diffusion_models/flux_controlnet_pro.safetensors"
ln -sf "$MODELS_DIR/controlnet/flux_controlnet_pro.safetensors" "$MODELS_DIR/xlabs/controlnets/flux_controlnet_pro.safetensors"
# ── Done ──────────────────────────────────────────────────────
log "Setup complete!"
log ""
log "To start ComfyUI:"
log " cd $SCRIPT_DIR"
log " source venv/bin/activate"
log " python main.py --listen 0.0.0.0 --lowvram --fp8_e4m3fn-text-enc"