LLMServer / Dockerfile
AurelioAguirre's picture
Fixing model download issue v9
5de8cee
raw
history blame
1.61 kB
# Use Python 3.10 as base image for better compatibility with ML libraries
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Install git and required system dependencies
RUN apt-get update && \
apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create cache directory and set permissions
RUN mkdir -p /app/.cache/huggingface && \
chmod 777 /app/.cache/huggingface
# Set environment variables for cache
ENV TRANSFORMERS_CACHE=/app/.cache/huggingface/hub
ENV HF_HOME=/app/.cache/huggingface
# Copy requirements first to leverage Docker cache
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Create checkpoints directory with proper permissions
RUN mkdir -p /app/main/checkpoints && \
chmod 777 /app/main/checkpoints
# The token will be passed during build time
ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}
# Download model using litgpt command line with correct checkpoint path
RUN if [ -n "$HF_TOKEN" ]; then \
litgpt download mistralai/Mistral-7B-Instruct-v0.3 --access_token ${HF_TOKEN} --checkpoint_dir /app/main/checkpoints; \
else \
echo "No Hugging Face token provided. Models will need to be downloaded separately."; \
exit 1; \
fi
# Copy the rest of the application
COPY . .
# Set environment variables
ENV LLM_ENGINE_HOST=0.0.0.0
ENV LLM_ENGINE_PORT=7860
ENV MODEL_PATH=/app/main/checkpoints/mistralai/Mistral-7B-Instruct-v0.3
# Expose port 7860 for Hugging Face Spaces
EXPOSE 7860
# Command to run the application
CMD ["python", "main/main.py"]