mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-04-20 11:23:29 +00:00
pytorch xpu should be flash or mem efficient attention?
This commit is contained in:
parent
20447e9ec9
commit
b1fd26fe9e
@ -693,6 +693,8 @@ def pytorch_attention_flash_attention():
|
|||||||
#TODO: more reliable way of checking for flash attention?
|
#TODO: more reliable way of checking for flash attention?
|
||||||
if is_nvidia(): #pytorch flash attention only works on Nvidia
|
if is_nvidia(): #pytorch flash attention only works on Nvidia
|
||||||
return True
|
return True
|
||||||
|
if is_intel_xpu():
|
||||||
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def force_upcast_attention_dtype():
|
def force_upcast_attention_dtype():
|
||||||
|
Loading…
Reference in New Issue
Block a user