Spaces:
Build error
Build error
Update Dockerfile
Browse files- Dockerfile +26 -10
Dockerfile
CHANGED
@@ -1,8 +1,9 @@
|
|
1 |
# Use a Python 3.10 base image with Debian Bookworm
|
2 |
FROM python:3.10-bookworm
|
3 |
|
4 |
-
# Set
|
5 |
-
|
|
|
6 |
|
7 |
# Install system dependencies, including OpenBLAS, Git, and build tools
|
8 |
RUN apt-get update && apt-get install -y \
|
@@ -12,8 +13,22 @@ RUN apt-get update && apt-get install -y \
|
|
12 |
libopenblas-dev \
|
13 |
&& rm -rf /var/lib/apt/lists/*
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Clone llama-cpp-python repository with submodules
|
16 |
-
RUN git clone --recursive https://github.com/abetlen/llama-cpp-python.git /app/llama-cpp-python
|
|
|
17 |
|
18 |
# Set working directory to llama-cpp-python
|
19 |
WORKDIR /app/llama-cpp-python
|
@@ -25,20 +40,21 @@ RUN git submodule update --remote vendor/llama.cpp
|
|
25 |
ENV FORCE_CMAKE=1
|
26 |
ENV CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DLLAMA_CURL=OFF"
|
27 |
|
28 |
-
# Install llama-cpp-python from source
|
29 |
-
|
|
|
30 |
|
31 |
# Copy your application code
|
32 |
-
COPY app.py /app/
|
33 |
|
34 |
-
# Install additional Python dependencies for your app
|
35 |
-
RUN pip install gradio huggingface_hub
|
36 |
|
37 |
# Set working directory for the application
|
38 |
WORKDIR /app
|
39 |
|
40 |
-
# Expose port for Gradio
|
41 |
EXPOSE 7860
|
42 |
|
43 |
-
#
|
44 |
CMD ["python", "app.py"]
|
|
|
1 |
# Use a Python 3.10 base image with Debian Bookworm
|
2 |
FROM python:3.10-bookworm
|
3 |
|
4 |
+
# Set environment variables to avoid Python buffering and ensure non-interactive installs
|
5 |
+
ENV PYTHONUNBUFFERED=1 \
|
6 |
+
DEBIAN_FRONTEND=noninteractive
|
7 |
|
8 |
# Install system dependencies, including OpenBLAS, Git, and build tools
|
9 |
RUN apt-get update && apt-get install -y \
|
|
|
13 |
libopenblas-dev \
|
14 |
&& rm -rf /var/lib/apt/lists/*
|
15 |
|
16 |
+
# Create a non-root user with configurable UID/GID
|
17 |
+
ARG USER_ID=1000
|
18 |
+
ARG GROUP_ID=1000
|
19 |
+
RUN groupadd -g ${GROUP_ID} appuser && \
|
20 |
+
useradd -m -u ${USER_ID} -g ${GROUP_ID} -s /bin/bash appuser
|
21 |
+
|
22 |
+
# Set up Hugging Face cache directory with proper permissions
|
23 |
+
RUN mkdir -p /home/appuser/.cache/huggingface && \
|
24 |
+
chown -R appuser:appuser /home/appuser/.cache
|
25 |
+
|
26 |
+
# Set working directory
|
27 |
+
WORKDIR /app
|
28 |
+
|
29 |
# Clone llama-cpp-python repository with submodules
|
30 |
+
RUN git clone --recursive https://github.com/abetlen/llama-cpp-python.git /app/llama-cpp-python && \
|
31 |
+
chown -R appuser:appuser /app/llama-cpp-python
|
32 |
|
33 |
# Set working directory to llama-cpp-python
|
34 |
WORKDIR /app/llama-cpp-python
|
|
|
40 |
ENV FORCE_CMAKE=1
|
41 |
ENV CMAKE_ARGS="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DLLAMA_CURL=OFF"
|
42 |
|
43 |
+
# Install llama-cpp-python from source as appuser
|
44 |
+
USER appuser
|
45 |
+
RUN pip install . --user --upgrade --force-reinstall --no-cache-dir
|
46 |
|
47 |
# Copy your application code
|
48 |
+
COPY --chown=appuser:appuser app.py /app/
|
49 |
|
50 |
+
# Install additional Python dependencies for your app as appuser
|
51 |
+
RUN pip install --user gradio huggingface_hub
|
52 |
|
53 |
# Set working directory for the application
|
54 |
WORKDIR /app
|
55 |
|
56 |
+
# Expose port for Gradio
|
57 |
EXPOSE 7860
|
58 |
|
59 |
+
# Run the application as appuser
|
60 |
CMD ["python", "app.py"]
|