FROM python:3.11.6-bullseye | |
RUN git clone --recurse-submodules https://github.com/trzy/llava-cpp-server | |
WORKDIR llava-cpp-server | |
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf | |
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q4_k.gguf | |
RUN make | |
CMD ["bin/llava-server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf"] |