llm_host / Dockerfile
Bahodir Nematjonov
debuging docker
a4ac1ab
raw
history blame
482 Bytes
# Use the official Python 3.9 image
FROM python:3.9
# Set the working directory
WORKDIR /code
# Copy requirements.txt and install dependencies
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
# Install Ollama
RUN curl -fsSL https://ollama.com/install.sh | sh
# Expose FastAPI's port
EXPOSE 7860
# Start Ollama in the background and then run FastAPI
CMD ollama serve & uvicorn main:app --host 0.0.0.0 --port 7860