ankandrew commited on
Commit
e8a0cf2
·
1 Parent(s): f17ef4c

Try different flash attention version

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -10,7 +10,7 @@ from qwen_vl_utils import process_vision_info
10
  from transformers.utils import is_flash_attn_2_available
11
 
12
  subprocess.run(
13
- "pip install 'flash-attn>=2.2.0' --no-build-isolation",
14
  env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
15
  shell=True,
16
  )
 
10
  from transformers.utils import is_flash_attn_2_available
11
 
12
  subprocess.run(
13
+ "pip install 'flash-attn==2.2.0' --no-build-isolation",
14
  env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
15
  shell=True,
16
  )