ggml-mllm / Dockerfile
matthoffner's picture
Update Dockerfile
707c746 verified
raw
history blame
1.43 kB
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
# Update and install necessary dependencies
RUN apt update && \
apt install --no-install-recommends -y \
build-essential \
python3 \
python3-pip \
wget \
curl \
git \
cmake \
zlib1g-dev \
libblas-dev && \
apt clean && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k.gguf && \
wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
# Clone and build llava-server with CUDA support
RUN git clone https://github.com/ggerganov/llama.cpp.git && \
cd llama.cpp && \
git submodule init && \
git submodule update && \
make
# Create a non-root user for security reasons
RUN useradd -m -u 1000 user && \
mkdir -p /home/user/app && \
cp /app/ggml-model-q4_k.gguf /home/user/app && \
cp /app/mmproj-model-f16.gguf /home/user/app
RUN chown user:user /home/user/app/ggml-model-q4_k.gguf && \
chown user:user /home/user/app/mmproj-model-f16.gguf
USER user
ENV HOME=/home/user
WORKDIR $HOME/app
# Expose the port
EXPOSE 8080
# Start the llava-server with models
CMD ["/app/llama.cpp/server", "--model", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--threads", "10"]