Spaces:
Sleeping
Sleeping
File size: 1,205 Bytes
a2c10b6 7c99655 a2c10b6 ec34f15 6843abb ec34f15 6843abb ec34f15 a2c10b6 7c99655 a2c10b6 c6b0b31 a2c10b6 6843abb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
FROM python:3.10-slim
# Install system dependencies
RUN apt-get update && \
apt-get install -y wget ffmpeg unzip curl gcc && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
ENV HF_HOME=/app/cache
# Create cache directory and set permissions
RUN mkdir -p /app/cache /app/db && chmod -R 777 /app/cache /app/db
# Copy your code into the container
COPY . .
# Install Python dependencies
RUN pip install --upgrade pip && pip install -r requirements.txt
RUN pip install --no-cache-dir torch==2.2.0+cpu torchvision==0.17.0+cpu torchaudio==2.2.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
# Install spaCy model
RUN python -m spacy download en_core_web_lg
# RUN python -m spacy download en_core_web_sm
# Download and unzip the Vosk model
RUN wget https://alphacephei.com/vosk/models/vosk-model-small-en-us-0.15.zip && \
unzip vosk-model-small-en-us-0.15.zip && \
rm vosk-model-small-en-us-0.15.zip
# Install Ollama
# RUN curl -fsSL https://ollama.com/install.sh | sh
# Pull the Ollama model
# RUN ollama serve & sleep 5 && ollama pull gemma:2b
# Expose the port FastAPI will run on
EXPOSE 7860
# Start the API
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"] |