Skip to content

Commit 1d69245

Browse files
Enable pinned memory by default on Nvidia. (#10656)
Removed the --fast pinned_memory flag. You can use --disable-pinned-memory to disable it. Please report if it causes any issues.
1 parent 97f198e commit 1d69245

File tree

2 files changed

+11
-14
lines changed

2 files changed

+11
-14
lines changed

comfy/cli_args.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,11 @@ class PerformanceFeature(enum.Enum):
145145
Fp8MatrixMultiplication = "fp8_matrix_mult"
146146
CublasOps = "cublas_ops"
147147
AutoTune = "autotune"
148-
PinnedMem = "pinned_memory"
149148

150149
parser.add_argument("--fast", nargs="*", type=PerformanceFeature, help="Enable some untested and potentially quality deteriorating optimizations. This is used to test new features so using it might crash your comfyui. --fast with no arguments enables everything. You can pass a list specific optimizations if you only want to enable specific ones. Current valid optimizations: {}".format(" ".join(map(lambda c: c.value, PerformanceFeature))))
151150

151+
parser.add_argument("--disable-pinned-memory", action="store_true", help="Disable pinned memory use.")
152+
152153
parser.add_argument("--mmap-torch-files", action="store_true", help="Use mmap when loading ckpt/pt files.")
153154
parser.add_argument("--disable-mmap", action="store_true", help="Don't use mmap when loading safetensors.")
154155

comfy/model_management.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1085,22 +1085,21 @@ def cast_to_device(tensor, device, dtype, copy=False):
10851085

10861086
PINNED_MEMORY = {}
10871087
TOTAL_PINNED_MEMORY = 0
1088-
if PerformanceFeature.PinnedMem in args.fast:
1089-
if WINDOWS:
1090-
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50%
1091-
else:
1092-
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95
1093-
else:
1094-
MAX_PINNED_MEMORY = -1
1088+
MAX_PINNED_MEMORY = -1
1089+
if not args.disable_pinned_memory:
1090+
if is_nvidia():
1091+
if WINDOWS:
1092+
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50%
1093+
else:
1094+
MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95
1095+
logging.info("Enabled pinned memory {}".format(MAX_PINNED_MEMORY // (1024 * 1024)))
1096+
10951097

10961098
def pin_memory(tensor):
10971099
global TOTAL_PINNED_MEMORY
10981100
if MAX_PINNED_MEMORY <= 0:
10991101
return False
11001102

1101-
if not is_nvidia():
1102-
return False
1103-
11041103
if not is_device_cpu(tensor.device):
11051104
return False
11061105

@@ -1121,9 +1120,6 @@ def unpin_memory(tensor):
11211120
if MAX_PINNED_MEMORY <= 0:
11221121
return False
11231122

1124-
if not is_nvidia():
1125-
return False
1126-
11271123
if not is_device_cpu(tensor.device):
11281124
return False
11291125

0 commit comments

Comments
 (0)