# Use the official vLLM image as the base image FROM vllm/vllm-openai:latest # Install debugging tools RUN apt-get update && apt-get install -y procps vim # Set environment variables ENV HUGGING_FACE_HUB_TOKEN="your_hf_token_here" ENV HF_HOME="/tmp/huggingface" ENV XDG_CACHE_HOME="/tmp/cache" ENV NUMBA_CACHE_DIR="/tmp/numba_cache" ENV OUTLINES_CACHE_DIR="/tmp/outlines_cache" ENV VLLM_USE_MODELSCOPE="false" ENV VLLM_DISABLE_USAGE_STATS="true" ENV XDG_CONFIG_HOME="/tmp/config" # Ensure PATH includes common Python locations ENV PATH="/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:$PATH" # Set the working directory WORKDIR /app # Copy your entrypoint script COPY entrypoint.sh /app/entrypoint.sh RUN chmod +x /app/entrypoint.sh # Expose the port the app runs on EXPOSE 8000 # Set the entrypoint ENTRYPOINT ["/app/entrypoint.sh"] # FROM python:3.9 # # Create a non-root user # RUN useradd -m -u 1000 user # # Set the working directory in the container # WORKDIR /app # # Install vLLM # RUN pip install --no-cache-dir vllm # # Copy the entrypoint script # COPY entrypoint.sh /app/entrypoint.sh # # Change ownership of the working directory and entrypoint script to the non-root user # RUN chown -R user:user /app # # Change permissions of the entrypoint script # RUN chmod +x /app/entrypoint.sh # # Switch to the non-root user # USER user # # Set the PATH for the non-root user # ENV PATH="/home/user/.local/bin:$PATH" # EXPOSE 8000 # # Set the entrypoint # ENTRYPOINT ["/app/entrypoint.sh"] # ARG VERSION=v0.5.3.post1 # FROM vllm/vllm-openai:latest # ENV PATH="/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:$PATH" # EXPOSE 8000 # WORKDIR /app # COPY entrypoint.sh /app/entrypoint.sh # RUN chmod +x /app/entrypoint.sh # ENTRYPOINT [ "/app/entrypoint.sh" ]