pujanpaudel's picture
added libgl
28629e7 verified
# Dockerfile
# 1. Use an official Python runtime as a parent image
# Using python 3.10, but you can choose 3.9, 3.11 etc. based on your needs. Slim is smaller.
FROM python:3.12
# 2. Set the working directory in the container
WORKDIR /code
# 3. Install system dependencies if any (e.g., for OpenCV if you were using it)
# RUN apt-get update && apt-get install -y --no-install-recommends some-package && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y libgl1 libglib2.0-0
# 4. Copy the requirements file into the container
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
ENV TRANSFORMERS_CACHE=/code/.cache
RUN mkdir -p /code/.cache && chmod -R 777 /code/.cache
# 5. Install Python dependencies
# Upgrade pip and install requirements, ensuring CPU PyTorch is used
# Using --no-cache-dir makes the image smaller
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt
# 6. Copy your application code and model file into the container
COPY . .
# Add any other necessary files/folders here (e.g., utility scripts, templates)
# COPY utils/ ./utils/
# 7. Expose the port the app runs on
# Hugging Face Spaces expects port 7860 by default
EXPOSE 7860
# 8. Define the command to run your application
# This command starts the uvicorn server, listening on all interfaces (0.0.0.0) on port 7860
# It will automatically reload the code upon changes if you mount volumes during local dev, but not relevant for HF Spaces deployment itself.
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]