llm_host / Dockerfile
joe4ai's picture
Update Dockerfile
a52b206 verified
# Use the official Python 3.9 image
FROM python:3.9
# Set the working directory to /code
WORKDIR /code
# Install system dependencies
RUN apt-get update && apt-get install -y \
curl \
git \
&& rm -rf /var/lib/apt/lists/*
# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh
# Copy requirements and install dependencies
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
# Set up a new user named "user" with user ID 1000
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set home environment variables
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH \
OLLAMA_HOME="/tmp/ollama_cache"
# Set working directory
WORKDIR $HOME/app
# Copy application files and set ownership
COPY --chown=user . $HOME/app
# Ensure Ollama has a cache directory
RUN mkdir -p /tmp/ollama_cache
# Start Ollama in the background and pull the model
RUN ollama serve > /tmp/ollama.log 2>&1 & sleep 5 && ollama pull llama3.2
# Start Ollama before running FastAPI
CMD ollama serve > /tmp/ollama.log 2>&1 & sleep 5 && uvicorn main:app --host 0.0.0.0 --port 7860