mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-20 03:13:30 +00:00
Add a force argument to soft_empty_cache to force a cache empty.
This commit is contained in:
parent
7746bdf7b0
commit
1938f5c5fe
@ -323,7 +323,7 @@ class CrossAttentionDoggettx(nn.Module):
|
||||
break
|
||||
except model_management.OOM_EXCEPTION as e:
|
||||
if first_op_done == False:
|
||||
model_management.soft_empty_cache()
|
||||
model_management.soft_empty_cache(True)
|
||||
if cleared_cache == False:
|
||||
cleared_cache = True
|
||||
print("out of memory error, emptying cache and trying again")
|
||||
|
@ -186,6 +186,7 @@ def slice_attention(q, k, v):
|
||||
del s2
|
||||
break
|
||||
except model_management.OOM_EXCEPTION as e:
|
||||
model_management.soft_empty_cache(True)
|
||||
steps *= 2
|
||||
if steps > 128:
|
||||
raise e
|
||||
|
@ -639,14 +639,14 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True):
|
||||
|
||||
return True
|
||||
|
||||
def soft_empty_cache():
|
||||
def soft_empty_cache(force=False):
|
||||
global cpu_state
|
||||
if cpu_state == CPUState.MPS:
|
||||
torch.mps.empty_cache()
|
||||
elif is_intel_xpu():
|
||||
torch.xpu.empty_cache()
|
||||
elif torch.cuda.is_available():
|
||||
if is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
|
||||
if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.ipc_collect()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user