Spaces:
Running
Running
File size: 1,157 Bytes
360362a 30e58a1 8af09d7 30e58a1 0442ea7 30e58a1 4b182fe 0aadcb7 6db4178 0aadcb7 8af09d7 360362a 8af09d7 360362a 8af09d7 52d4d06 8af09d7 52d4d06 c6d9b2c 0fb8e0d 360362a 588f99f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# Builder stage
FROM ubuntu:22.04
# Install necessary packages
RUN apt-get update && apt-get install -y \
curl \
wget \
nano \
vim \
neovim \
iputils-ping \
git \
sudo \
procps \
coreutils \
openssh-client \
&& apt-get clean
# Install SSHX
RUN curl -sSf https://sshx.io/get | sh
# Add NVIDIA package repositories
# Install NVIDIA container toolkit (Check for any updated methods or URLs for Ubuntu jammy)
# Install application
RUN curl https://ollama.ai/install.sh | sh
# Below is to fix embedding bug as per
# RUN curl -fsSL https://ollama.com/install.sh | sed 's#https://ollama.com/download#https://github.com/jmorganca/ollama/releases/download/v0.1.29#' | sh
# Create the directory and give appropriate permissions
RUN mkdir -p /.ollama && chmod 777 /.ollama
WORKDIR /.ollama
# Copy the entry point script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Set the entry point script as the default command
ENTRYPOINT ["/entrypoint.sh"]
CMD ["ollama", "serve"]
# Set the model as an environment variable (this can be overridden)
ENV model=${model}
# Expose the server port
EXPOSE 7860 |