Sofia Casadei commited on
Commit
65c9279
·
1 Parent(s): 3375ee2

install flash attention

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -45,10 +45,10 @@ COPY --chown=user requirements.txt .
45
  RUN --mount=type=cache,target=$UV_CACHE_DIR,uid=1000,gid=1000 \
46
  /uv pip install -r requirements.txt
47
 
48
- # Conditionally install flash-attn if CUDA is available
49
  RUN --mount=type=cache,target=$UV_CACHE_DIR,uid=1000,gid=1000 \
50
- python -c "import torch; exit(0 if torch.cuda.is_available() else 1)" && \
51
- /uv pip install flash-attn --no-build-isolation || echo "CUDA not available, skipping flash-attn installation"
52
 
53
  # Copy application code
54
  COPY --chown=user . .
 
45
  RUN --mount=type=cache,target=$UV_CACHE_DIR,uid=1000,gid=1000 \
46
  /uv pip install -r requirements.txt
47
 
48
+ # Try to install flash-attn, but don't fail the build if it errors
49
  RUN --mount=type=cache,target=$UV_CACHE_DIR,uid=1000,gid=1000 \
50
+ echo "Attempting to install flash-attn..." && \
51
+ (/uv pip install flash-attn --no-build-isolation || echo "Failed, continuing build...")
52
 
53
  # Copy application code
54
  COPY --chown=user . .