playgr commited on
Commit
53d2856
·
1 Parent(s): bff6d5b

first test

Browse files
Files changed (2) hide show
  1. Dockerfile +17 -12
  2. run.py +7 -1
Dockerfile CHANGED
@@ -1,21 +1,26 @@
1
  # 1) Imagen base con PyTorch + CUDA 11.7 (T4 compatible)
2
  FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
3
 
4
- # 2) Directorio de trabajo
 
 
 
 
5
  WORKDIR /app
6
 
7
- # 3) Copia todo el contenido del repo (run.py, test.jpg, etc.)
8
  COPY . .
9
 
10
- # 4) Instala las dependencias Python necesarias
11
- RUN pip install --no-cache-dir \
12
- sam2 \
13
- torch \
14
- transformers \
15
- diffusers \
16
- huggingface_hub \
17
- numpy \
18
- Pillow
 
19
 
20
- # 5) Define el ENTRYPOINT para que al iniciar ejecute run.py
21
  ENTRYPOINT ["python", "run.py"]
 
1
  # 1) Imagen base con PyTorch + CUDA 11.7 (T4 compatible)
2
  FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
3
 
4
+ # 2) Redirige el cache de HF/transformers a un dir dentro de /app
5
+ ENV HF_HOME=/app/.cache \
6
+ TRANSFORMERS_CACHE=/app/.cache \
7
+ XDG_CACHE_HOME=/app/.cache
8
+
9
  WORKDIR /app
10
 
11
+ # 3) Copia todo el repo (incluye test.jpg, run.py, etc.)
12
  COPY . .
13
 
14
+ # 4) Crea la carpeta de caché y instala deps
15
+ RUN mkdir -p /app/.cache && \
16
+ pip install --no-cache-dir \
17
+ sam2 \
18
+ torch \
19
+ transformers \
20
+ diffusers \
21
+ huggingface_hub \
22
+ numpy \
23
+ Pillow
24
 
25
+ # 5) Al arranque, ejecuta run.py (usará test.jpg y prompt por defecto)
26
  ENTRYPOINT ["python", "run.py"]
run.py CHANGED
@@ -7,6 +7,8 @@ import uuid
7
  import numpy as np
8
  from PIL import Image, ImageFilter
9
 
 
 
10
  # Dependencias
11
  try:
12
  import torch
@@ -79,7 +81,11 @@ def main():
79
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
80
 
81
  # CLIP: load
82
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
 
 
 
 
83
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
84
  text_inputs = clip_processor(text=CLOTHING_LABELS, return_tensors="pt", padding=True).to(device)
85
  with torch.no_grad():
 
7
  import numpy as np
8
  from PIL import Image, ImageFilter
9
 
10
+ os.makedirs(os.environ["TRANSFORMERS_CACHE"], exist_ok=True)
11
+
12
  # Dependencias
13
  try:
14
  import torch
 
81
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
83
  # CLIP: load
84
+ clip_model = CLIPModel.from_pretrained(
85
+ "openai/clip-vit-base-patch32",
86
+ from_tf=True,
87
+ cache_dir=os.environ["TRANSFORMERS_CACHE"]
88
+ ).to(device)
89
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
90
  text_inputs = clip_processor(text=CLOTHING_LABELS, return_tensors="pt", padding=True).to(device)
91
  with torch.no_grad():