interview-ai-detector / Dockerfile
panduwana's picture
un-gcp-ize
933b7b6
# Use an official Python runtime as a base image
FROM pytorch/pytorch:2.1.2-cuda12.1-cudnn8-runtime
# Set the working directory in the container
WORKDIR /app
# Copy the current directory contents into the container at /app
COPY . /app
# For Huggingface
ENV HF_HOME=/app/.hf_home
RUN mkdir .hf_home && chmod -R 777 .hf_home
# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Download Gemma if needed
# if building locally, use the flag: --secret id=dotenv,src=.env
# if using HF Spaces, define HUGGINGFACE_TOKEN in Settings -> Variables and secrets
RUN --mount=type=secret,id=dotenv \
--mount=type=secret,id=HUGGINGFACE_TOKEN \
python Dockerfile.d/gemma_check.py
# Download NLTK data
RUN python -m nltk.downloader punkt wordnet averaged_perceptron_tagger
RUN mv /root/nltk_data /nltk_data
# Unzip wordnet
RUN apt-get update && apt-get install -y unzip
RUN unzip /nltk_data/corpora/wordnet.zip -d /nltk_data/corpora/
# Run uvicorn
EXPOSE 7860
CMD ["uvicorn", "prediction:app", "--host", "0.0.0.0", "--port", "7860"]