Add custom nodes, Civitai loras (LFS), and vast.ai setup script
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Has been cancelled
Execution Tests / test (macos-latest) (push) Has been cancelled
Execution Tests / test (ubuntu-latest) (push) Has been cancelled
Execution Tests / test (windows-latest) (push) Has been cancelled
Test server launches without errors / test (push) Has been cancelled
Unit Tests / test (macos-latest) (push) Has been cancelled
Unit Tests / test (ubuntu-latest) (push) Has been cancelled
Unit Tests / test (windows-2022) (push) Has been cancelled
Includes 30 custom nodes committed directly, 7 Civitai-exclusive loras stored via Git LFS, and a setup script that installs all dependencies and downloads HuggingFace-hosted models on vast.ai. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
3
custom_nodes/ComfyUI-Crystools/general/__init__.py
Normal file
3
custom_nodes/ComfyUI-Crystools/general/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .monitor import *
|
||||
from .hdd import *
|
||||
from .gpu import *
|
||||
300
custom_nodes/ComfyUI-Crystools/general/gpu.py
Normal file
300
custom_nodes/ComfyUI-Crystools/general/gpu.py
Normal file
@@ -0,0 +1,300 @@
|
||||
import torch
|
||||
import comfy.model_management
|
||||
from ..core import logger
|
||||
import os
|
||||
import platform
|
||||
|
||||
def is_jetson() -> bool:
|
||||
"""
|
||||
Determines if the Python environment is running on a Jetson device by checking the device model
|
||||
information or the platform release.
|
||||
"""
|
||||
PROC_DEVICE_MODEL = ''
|
||||
try:
|
||||
with open('/proc/device-tree/model', 'r') as f:
|
||||
PROC_DEVICE_MODEL = f.read().strip()
|
||||
logger.info(f"Device model: {PROC_DEVICE_MODEL}")
|
||||
return "NVIDIA" in PROC_DEVICE_MODEL
|
||||
except Exception as e:
|
||||
# logger.warning(f"JETSON: Could not read /proc/device-tree/model: {e} (If you're not using Jetson, ignore this warning)")
|
||||
# If /proc/device-tree/model is not available, check platform.release()
|
||||
platform_release = platform.release()
|
||||
logger.info(f"Platform release: {platform_release}")
|
||||
if 'tegra' in platform_release.lower():
|
||||
logger.info("Detected 'tegra' in platform release. Assuming Jetson device.")
|
||||
return True
|
||||
else:
|
||||
logger.info("JETSON: Not detected.")
|
||||
return False
|
||||
|
||||
IS_JETSON = is_jetson()
|
||||
|
||||
class CGPUInfo:
|
||||
"""
|
||||
This class is responsible for getting information from GPU (ONLY).
|
||||
"""
|
||||
cuda = False
|
||||
pynvmlLoaded = False
|
||||
jtopLoaded = False
|
||||
cudaAvailable = False
|
||||
torchDevice = 'cpu'
|
||||
cudaDevice = 'cpu'
|
||||
cudaDevicesFound = 0
|
||||
switchGPU = True
|
||||
switchVRAM = True
|
||||
switchTemperature = True
|
||||
gpus = []
|
||||
gpusUtilization = []
|
||||
gpusVRAM = []
|
||||
gpusTemperature = []
|
||||
|
||||
def __init__(self):
|
||||
if IS_JETSON:
|
||||
# Try to import jtop for Jetson devices
|
||||
try:
|
||||
from jtop import jtop
|
||||
self.jtopInstance = jtop()
|
||||
self.jtopInstance.start()
|
||||
self.jtopLoaded = True
|
||||
logger.info('jtop initialized on Jetson device.')
|
||||
except ImportError as e:
|
||||
logger.error('jtop is not installed. ' + str(e))
|
||||
except Exception as e:
|
||||
logger.error('Could not initialize jtop. ' + str(e))
|
||||
else:
|
||||
# Try to import pynvml for non-Jetson devices
|
||||
try:
|
||||
import pynvml
|
||||
self.pynvml = pynvml
|
||||
self.pynvml.nvmlInit()
|
||||
self.pynvmlLoaded = True
|
||||
logger.info('pynvml (NVIDIA) initialized.')
|
||||
except ImportError as e:
|
||||
logger.error('pynvml is not installed. ' + str(e))
|
||||
except Exception as e:
|
||||
logger.error('Could not init pynvml (NVIDIA). ' + str(e))
|
||||
|
||||
self.anygpuLoaded = self.pynvmlLoaded or self.jtopLoaded
|
||||
|
||||
try:
|
||||
self.torchDevice = comfy.model_management.get_torch_device_name(comfy.model_management.get_torch_device())
|
||||
except Exception as e:
|
||||
logger.error('Could not pick default device. ' + str(e))
|
||||
|
||||
if self.pynvmlLoaded and not self.jtopLoaded and not self.deviceGetCount():
|
||||
logger.warning('No GPU detected, disabling GPU monitoring.')
|
||||
self.anygpuLoaded = False
|
||||
self.pynvmlLoaded = False
|
||||
self.jtopLoaded = False
|
||||
|
||||
if self.anygpuLoaded:
|
||||
if self.deviceGetCount() > 0:
|
||||
self.cudaDevicesFound = self.deviceGetCount()
|
||||
|
||||
logger.info(f"GPU/s:")
|
||||
|
||||
for deviceIndex in range(self.cudaDevicesFound):
|
||||
deviceHandle = self.deviceGetHandleByIndex(deviceIndex)
|
||||
|
||||
gpuName = self.deviceGetName(deviceHandle, deviceIndex)
|
||||
|
||||
logger.info(f"{deviceIndex}) {gpuName}")
|
||||
|
||||
self.gpus.append({
|
||||
'index': deviceIndex,
|
||||
'name': gpuName,
|
||||
})
|
||||
|
||||
# Same index as gpus, with default values
|
||||
self.gpusUtilization.append(True)
|
||||
self.gpusVRAM.append(True)
|
||||
self.gpusTemperature.append(True)
|
||||
|
||||
self.cuda = True
|
||||
logger.info(self.systemGetDriverVersion())
|
||||
else:
|
||||
logger.warning('No GPU with CUDA detected.')
|
||||
else:
|
||||
logger.warning('No GPU monitoring libraries available.')
|
||||
|
||||
self.cudaDevice = 'cpu' if self.torchDevice == 'cpu' else 'cuda'
|
||||
self.cudaAvailable = torch.cuda.is_available()
|
||||
|
||||
if self.cuda and self.cudaAvailable and self.torchDevice == 'cpu':
|
||||
logger.warning('CUDA is available, but torch is using CPU.')
|
||||
|
||||
def getInfo(self):
|
||||
logger.debug('Getting GPUs info...')
|
||||
return self.gpus
|
||||
|
||||
def getStatus(self):
|
||||
gpuUtilization = -1
|
||||
gpuTemperature = -1
|
||||
vramUsed = -1
|
||||
vramTotal = -1
|
||||
vramPercent = -1
|
||||
|
||||
gpuType = ''
|
||||
gpus = []
|
||||
|
||||
if self.cudaDevice == 'cpu':
|
||||
gpuType = 'cpu'
|
||||
gpus.append({
|
||||
'gpu_utilization': -1,
|
||||
'gpu_temperature': -1,
|
||||
'vram_total': -1,
|
||||
'vram_used': -1,
|
||||
'vram_used_percent': -1,
|
||||
})
|
||||
else:
|
||||
gpuType = self.cudaDevice
|
||||
|
||||
if self.anygpuLoaded and self.cuda and self.cudaAvailable:
|
||||
for deviceIndex in range(self.cudaDevicesFound):
|
||||
deviceHandle = self.deviceGetHandleByIndex(deviceIndex)
|
||||
|
||||
gpuUtilization = -1
|
||||
vramPercent = -1
|
||||
vramUsed = -1
|
||||
vramTotal = -1
|
||||
gpuTemperature = -1
|
||||
|
||||
# GPU Utilization
|
||||
if self.switchGPU and self.gpusUtilization[deviceIndex]:
|
||||
try:
|
||||
gpuUtilization = self.deviceGetUtilizationRates(deviceHandle)
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU utilization. ' + str(e))
|
||||
logger.error('Monitor of GPU is turning off.')
|
||||
self.switchGPU = False
|
||||
|
||||
if self.switchVRAM and self.gpusVRAM[deviceIndex]:
|
||||
try:
|
||||
memory = self.deviceGetMemoryInfo(deviceHandle)
|
||||
vramUsed = memory['used']
|
||||
vramTotal = memory['total']
|
||||
|
||||
# Check if vramTotal is not zero or None
|
||||
if vramTotal and vramTotal != 0:
|
||||
vramPercent = vramUsed / vramTotal * 100
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU memory info. ' + str(e))
|
||||
self.switchVRAM = False
|
||||
|
||||
# Temperature
|
||||
if self.switchTemperature and self.gpusTemperature[deviceIndex]:
|
||||
try:
|
||||
gpuTemperature = self.deviceGetTemperature(deviceHandle)
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU temperature. Turning off this feature. ' + str(e))
|
||||
self.switchTemperature = False
|
||||
|
||||
gpus.append({
|
||||
'gpu_utilization': gpuUtilization,
|
||||
'gpu_temperature': gpuTemperature,
|
||||
'vram_total': vramTotal,
|
||||
'vram_used': vramUsed,
|
||||
'vram_used_percent': vramPercent,
|
||||
})
|
||||
|
||||
return {
|
||||
'device_type': gpuType,
|
||||
'gpus': gpus,
|
||||
}
|
||||
|
||||
def deviceGetCount(self):
|
||||
if self.pynvmlLoaded:
|
||||
return self.pynvml.nvmlDeviceGetCount()
|
||||
elif self.jtopLoaded:
|
||||
# For Jetson devices, we assume there's one GPU
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def deviceGetHandleByIndex(self, index):
|
||||
if self.pynvmlLoaded:
|
||||
return self.pynvml.nvmlDeviceGetHandleByIndex(index)
|
||||
elif self.jtopLoaded:
|
||||
return index # On Jetson, index acts as handle
|
||||
else:
|
||||
return 0
|
||||
|
||||
def deviceGetName(self, deviceHandle, deviceIndex):
|
||||
if self.pynvmlLoaded:
|
||||
gpuName = 'Unknown GPU'
|
||||
|
||||
try:
|
||||
gpuName = self.pynvml.nvmlDeviceGetName(deviceHandle)
|
||||
try:
|
||||
gpuName = gpuName.decode('utf-8', errors='ignore')
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
except UnicodeDecodeError as e:
|
||||
gpuName = 'Unknown GPU (decoding error)'
|
||||
logger.error(f"UnicodeDecodeError: {e}")
|
||||
|
||||
return gpuName
|
||||
elif self.jtopLoaded:
|
||||
# Access the GPU name from self.jtopInstance.gpu
|
||||
try:
|
||||
gpu_info = self.jtopInstance.gpu
|
||||
gpu_name = next(iter(gpu_info.keys()))
|
||||
return gpu_name
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU name. ' + str(e))
|
||||
return 'Unknown GPU'
|
||||
else:
|
||||
return ''
|
||||
|
||||
def systemGetDriverVersion(self):
|
||||
if self.pynvmlLoaded:
|
||||
return f'NVIDIA Driver: {self.pynvml.nvmlSystemGetDriverVersion()}'
|
||||
elif self.jtopLoaded:
|
||||
# No direct method to get driver version from jtop
|
||||
return 'NVIDIA Driver: unknown'
|
||||
else:
|
||||
return 'Driver unknown'
|
||||
|
||||
def deviceGetUtilizationRates(self, deviceHandle):
|
||||
if self.pynvmlLoaded:
|
||||
return self.pynvml.nvmlDeviceGetUtilizationRates(deviceHandle).gpu
|
||||
elif self.jtopLoaded:
|
||||
# GPU utilization from jtop stats
|
||||
try:
|
||||
gpu_util = self.jtopInstance.stats.get('GPU', -1)
|
||||
return gpu_util
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU utilization. ' + str(e))
|
||||
return -1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def deviceGetMemoryInfo(self, deviceHandle):
|
||||
if self.pynvmlLoaded:
|
||||
mem = self.pynvml.nvmlDeviceGetMemoryInfo(deviceHandle)
|
||||
return {'total': mem.total, 'used': mem.used}
|
||||
elif self.jtopLoaded:
|
||||
mem_data = self.jtopInstance.memory['RAM']
|
||||
total = mem_data['tot']
|
||||
used = mem_data['used']
|
||||
return {'total': total, 'used': used}
|
||||
else:
|
||||
return {'total': 1, 'used': 1}
|
||||
|
||||
def deviceGetTemperature(self, deviceHandle):
|
||||
if self.pynvmlLoaded:
|
||||
return self.pynvml.nvmlDeviceGetTemperature(deviceHandle, self.pynvml.NVML_TEMPERATURE_GPU)
|
||||
elif self.jtopLoaded:
|
||||
try:
|
||||
temperature = self.jtopInstance.stats.get('Temp gpu', -1)
|
||||
return temperature
|
||||
except Exception as e:
|
||||
logger.error('Could not get GPU temperature. ' + str(e))
|
||||
return -1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def close(self):
|
||||
if self.jtopLoaded and self.jtopInstance is not None:
|
||||
self.jtopInstance.close()
|
||||
132
custom_nodes/ComfyUI-Crystools/general/hardware.py
Normal file
132
custom_nodes/ComfyUI-Crystools/general/hardware.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import platform
|
||||
import re
|
||||
import cpuinfo
|
||||
from cpuinfo import DataSource
|
||||
import psutil
|
||||
from .gpu import CGPUInfo
|
||||
from .hdd import getDrivesInfo
|
||||
|
||||
from ..core import logger
|
||||
|
||||
|
||||
class CHardwareInfo:
|
||||
"""
|
||||
This is only class to get information from hardware.
|
||||
Specially for share it to other software.
|
||||
"""
|
||||
switchCPU = False
|
||||
switchHDD = False
|
||||
switchRAM = False
|
||||
whichHDD = '/' # breaks linux
|
||||
|
||||
@property
|
||||
def switchGPU(self):
|
||||
return self.GPUInfo.switchGPU
|
||||
@switchGPU.setter
|
||||
def switchGPU(self, value):
|
||||
self.GPUInfo.switchGPU = value
|
||||
|
||||
@property
|
||||
def switchVRAM(self):
|
||||
return self.GPUInfo.switchVRAM
|
||||
@switchVRAM.setter
|
||||
def switchVRAM(self, value):
|
||||
self.GPUInfo.switchVRAM = value
|
||||
|
||||
def __init__(self, switchCPU=False, switchGPU=False, switchHDD=False, switchRAM=False, switchVRAM=False):
|
||||
self.switchCPU = switchCPU
|
||||
self.switchHDD = switchHDD
|
||||
self.switchRAM = switchRAM
|
||||
|
||||
self.print_sys_info()
|
||||
|
||||
self.GPUInfo = CGPUInfo()
|
||||
self.switchGPU = switchGPU
|
||||
self.switchVRAM = switchVRAM
|
||||
|
||||
def print_sys_info(self):
|
||||
brand = None
|
||||
if DataSource.is_windows: # Windows
|
||||
brand = DataSource.winreg_processor_brand().strip()
|
||||
elif DataSource.has_proc_cpuinfo(): # Linux
|
||||
return_code, output = DataSource.cat_proc_cpuinfo()
|
||||
if return_code == 0 and output is not None:
|
||||
for line in output.splitlines():
|
||||
r = re.search(r'model name\s*:\s*(.+)', line)
|
||||
if r:
|
||||
brand = r.group(1)
|
||||
break
|
||||
elif DataSource.has_sysctl(): # macOS
|
||||
return_code, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
|
||||
if return_code == 0 and output is not None:
|
||||
for line in output.splitlines():
|
||||
r = re.search(r'machdep\.cpu\.brand_string\s*:\s*(.+)', line)
|
||||
if r:
|
||||
brand = r.group(1)
|
||||
break
|
||||
|
||||
# fallback to use cpuinfo.get_cpu_info()
|
||||
if not brand:
|
||||
brand = cpuinfo.get_cpu_info().get('brand_raw', "Unknown")
|
||||
|
||||
arch_string_raw = 'Arch unknown'
|
||||
|
||||
try:
|
||||
arch_string_raw = DataSource.arch_string_raw
|
||||
except:
|
||||
pass
|
||||
|
||||
specName = 'CPU: ' + brand
|
||||
specArch = 'Arch: ' + arch_string_raw
|
||||
specOs = 'OS: ' + str(platform.system()) + ' ' + str(platform.release())
|
||||
logger.info(f"{specName} - {specArch} - {specOs}")
|
||||
|
||||
def getHDDsInfo(self):
|
||||
return getDrivesInfo()
|
||||
|
||||
def getGPUInfo(self):
|
||||
return self.GPUInfo.getInfo()
|
||||
|
||||
def getStatus(self):
|
||||
cpu = -1
|
||||
ramTotal = -1
|
||||
ramUsed = -1
|
||||
ramUsedPercent = -1
|
||||
hddTotal = -1
|
||||
hddUsed = -1
|
||||
hddUsedPercent = -1
|
||||
|
||||
if self.switchCPU:
|
||||
cpu = psutil.cpu_percent()
|
||||
|
||||
if self.switchRAM:
|
||||
ram = psutil.virtual_memory()
|
||||
ramTotal = ram.total
|
||||
ramUsed = ram.used
|
||||
ramUsedPercent = ram.percent
|
||||
|
||||
if self.switchHDD:
|
||||
try:
|
||||
hdd = psutil.disk_usage(self.whichHDD)
|
||||
hddTotal = hdd.total
|
||||
hddUsed = hdd.used
|
||||
hddUsedPercent = hdd.percent
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting disk usage for {self.whichHDD}: {e}")
|
||||
hddTotal = -1
|
||||
hddUsed = -1
|
||||
hddUsedPercent = -1
|
||||
|
||||
getStatus = self.GPUInfo.getStatus()
|
||||
|
||||
return {
|
||||
'cpu_utilization': cpu,
|
||||
'ram_total': ramTotal,
|
||||
'ram_used': ramUsed,
|
||||
'ram_used_percent': ramUsedPercent,
|
||||
'hdd_total': hddTotal,
|
||||
'hdd_used': hddUsed,
|
||||
'hdd_used_percent': hddUsedPercent,
|
||||
'device_type': getStatus['device_type'],
|
||||
'gpus': getStatus['gpus'],
|
||||
}
|
||||
10
custom_nodes/ComfyUI-Crystools/general/hdd.py
Normal file
10
custom_nodes/ComfyUI-Crystools/general/hdd.py
Normal file
@@ -0,0 +1,10 @@
|
||||
import psutil
|
||||
from ..core import logger
|
||||
|
||||
def getDrivesInfo():
|
||||
hdds = []
|
||||
logger.debug('Getting HDDs info...')
|
||||
for partition in psutil.disk_partitions():
|
||||
hdds.append(partition.mountpoint)
|
||||
|
||||
return hdds
|
||||
67
custom_nodes/ComfyUI-Crystools/general/monitor.py
Normal file
67
custom_nodes/ComfyUI-Crystools/general/monitor.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import asyncio
|
||||
import server
|
||||
import time
|
||||
import threading
|
||||
from .hardware import CHardwareInfo
|
||||
|
||||
from ..core import logger
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class CMonitor:
|
||||
monitorThread = None
|
||||
threadController = threading.Event()
|
||||
rate = 0
|
||||
hardwareInfo = None
|
||||
|
||||
def __init__(self, rate=5, switchCPU=False, switchGPU=False, switchHDD=False, switchRAM=False, switchVRAM=False):
|
||||
self.rate = rate
|
||||
self.hardwareInfo = CHardwareInfo(switchCPU, switchGPU, switchHDD, switchRAM, switchVRAM)
|
||||
|
||||
self.startMonitor()
|
||||
|
||||
async def send_message(self, data) -> None:
|
||||
# I'm not sure if it is ok, but works ¯\_(ツ)_/¯
|
||||
# I tried to use async with send_json, but eventually that don't send the message
|
||||
server.PromptServer.instance.send_sync('crystools.monitor', data)
|
||||
|
||||
def startMonitorLoop(self):
|
||||
# logger.debug('Starting monitor loop...')
|
||||
asyncio.run(self.MonitorLoop())
|
||||
|
||||
async def MonitorLoop(self):
|
||||
while self.rate > 0 and not self.threadController.is_set():
|
||||
data = self.hardwareInfo.getStatus()
|
||||
# logger.debug('data to send' + str(data))
|
||||
await self.send_message(data)
|
||||
await asyncio.sleep(self.rate)
|
||||
|
||||
def startMonitor(self):
|
||||
if self.monitorThread is not None:
|
||||
self.stopMonitor()
|
||||
logger.debug('Restarting monitor...')
|
||||
else:
|
||||
if self.rate == 0:
|
||||
logger.debug('Monitor rate is 0, not starting monitor.')
|
||||
return None
|
||||
|
||||
logger.debug('Starting monitor...')
|
||||
|
||||
self.threadController.clear()
|
||||
|
||||
if self.monitorThread is None or not self.monitorThread.is_alive():
|
||||
lock.acquire()
|
||||
self.monitorThread = threading.Thread(target=self.startMonitorLoop)
|
||||
lock.release()
|
||||
self.monitorThread.daemon = True
|
||||
self.monitorThread.start()
|
||||
|
||||
|
||||
def stopMonitor(self):
|
||||
logger.debug('Stopping monitor...')
|
||||
self.threadController.set()
|
||||
|
||||
|
||||
cmonitor = CMonitor(1, True, True, True, True, True)
|
||||
|
||||
Reference in New Issue
Block a user