mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-02-28 22:51:45 +00:00
Better memory estimation for ROCm that support mem efficient attention.
There is no way to check if the card actually supports it so it assumes that it does if you use --use-pytorch-cross-attention with yours.
This commit is contained in:
parent
1d5d6586f3
commit
8773ccf74d
@ -909,6 +909,8 @@ def pytorch_attention_flash_attention():
|
||||
return True
|
||||
if is_ascend_npu():
|
||||
return True
|
||||
if is_amd():
|
||||
return True #if you have pytorch attention enabled on AMD it probably supports at least mem efficient attention
|
||||
return False
|
||||
|
||||
def mac_version():
|
||||
|
Loading…
Reference in New Issue
Block a user