Skip fp4 matrix mult on devices that don't support it. (#11677)

This commit is contained in:
comfyanonymous
2026-01-06 15:07:26 -08:00
committed by GitHub
parent 6e9ee55cdd
commit 2c03884f5f
2 changed files with 27 additions and 4 deletions

View File

@@ -1504,6 +1504,16 @@ def supports_fp8_compute(device=None):
return True
def supports_nvfp4_compute(device=None):
if not is_nvidia():
return False
props = torch.cuda.get_device_properties(device)
if props.major < 10:
return False
return True
def extended_fp16_support():
# TODO: check why some models work with fp16 on newer torch versions but not on older
if torch_version_numeric < (2, 7):