File size: 1,459 Bytes
a7455ea
 
 
 
 
 
 
 
 
 
 
 
da6f433
 
 
 
 
 
 
 
 
 
 
 
a7455ea
d8e4f29
 
 
a7455ea
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# Use an official Python runtime as a parent image with Python 3.10
FROM python:3.10-slim

# Create a non-root user and change ownership
RUN useradd -m -u 1000 user
WORKDIR /app
COPY --chown=user . /app

# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
RUN pip uninstall transformers -y
RUN pip install transformers tensorflow tf-keras python-dotenv astrapy langchain-community
RUN pip install --no-cache-dir transformers[torch] 

# Create a script to download the model
RUN echo 'from transformers import AutoTokenizer, AutoModelForSequenceClassification; \
          tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert"); \
          model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")' > /download_model.py

# Run the script to cache the model during build
RUN python /download_model.py

# Remove the download script (optional)
RUN rm /download_model.py

# Create the data directory and set permissions
RUN mkdir -p /app/data && chmod 777 /app/data

# Make port 7860 available to the world outside this container
EXPOSE 7860

# Define environment variable
ENV FLASK_APP=app
ENV FLASK_RUN_HOST=0.0.0.0
ENV FLASK_RUN_PORT=7860
ENV FLASK_ENV=development 

# Run gunicorn command when the container launches
# CMD ["gunicorn", "-t", "120", "-w", "4", "app:app"]
CMD ["uvicorn", "app:asgi_app", "--host", "0.0.0.0", "--port", "7860", "--timeout-keep-alive", "120"]