Segizu commited on
Commit
8d88e43
·
1 Parent(s): 752c0fd

metadata v12

Browse files
Files changed (3) hide show
  1. app.py +23 -42
  2. metadata.csv +0 -0
  3. metadata.py +2 -2
app.py CHANGED
@@ -7,21 +7,22 @@ import os
7
  import pickle
8
  from pathlib import Path
9
  import gc
10
- import requests
11
- from io import BytesIO
12
 
13
- # 🔑 Token de autenticación
14
  HF_TOKEN = os.getenv("HF_TOKEN")
15
- if not HF_TOKEN:
16
- raise ValueError("⚠️ Por favor, configura la variable de entorno HF_TOKEN para acceder al dataset privado")
17
 
18
  # 📁 Directorio para embeddings
19
  EMBEDDINGS_DIR = Path("embeddings")
20
  EMBEDDINGS_DIR.mkdir(exist_ok=True)
21
  EMBEDDINGS_FILE = EMBEDDINGS_DIR / "embeddings.pkl"
22
 
23
- # ✅ Cargar dataset desde metadata.csv (con URLs absolutas)
24
- dataset = load_dataset("csv", data_files="metadata.csv")
 
 
 
 
 
25
 
26
  # 🔄 Preprocesar imagen para Facenet
27
  def preprocess_image(img: Image.Image) -> np.ndarray:
@@ -40,38 +41,21 @@ def build_database():
40
  database = []
41
  batch_size = 10
42
 
43
- # Get the train split
44
- train_dataset = dataset["train"]
45
-
46
- # Debug: Print dataset structure
47
- print("Dataset structure:", train_dataset.features)
48
- print("First item structure:", train_dataset[0])
49
- print("Dataset type:", type(train_dataset))
50
- print("Dataset item type:", type(train_dataset[0]))
51
-
52
- for i in range(0, len(train_dataset), batch_size):
53
- batch = train_dataset[i:i + batch_size]
54
- print(f"📦 Procesando lote {i // batch_size + 1}/{(len(train_dataset) + batch_size - 1) // batch_size}")
55
 
56
  for j, item in enumerate(batch):
57
  try:
58
- print(f"Debug - Processing item {i+j}")
59
- print(f"Debug - Item type: {type(item)}")
60
- print(f"Debug - Item content: {item}")
61
-
62
- # Get the image URL
63
- image_url = item["image"]
64
- if not isinstance(image_url, str) or not image_url.startswith("http"):
65
- print(f"⚠️ Skipping item {i+j} - Invalid URL format")
66
  continue
67
-
68
- # Download and process the image
69
- response = requests.get(image_url, timeout=10)
70
- response.raise_for_status()
71
- img = Image.open(BytesIO(response.content))
72
-
73
- # Ensure image is in RGB mode
74
- img = img.convert("RGB")
75
  img_processed = preprocess_image(img)
76
  embedding = DeepFace.represent(
77
  img_path=img_processed,
@@ -80,19 +64,16 @@ def build_database():
80
  )[0]["embedding"]
81
 
82
  database.append((f"image_{i+j}", img, embedding))
83
- print(f"✅ Procesada imagen {i+j+1}/{len(train_dataset)}")
84
 
85
  del img_processed
86
  gc.collect()
87
 
88
  except Exception as e:
89
- print(f"❌ No se pudo procesar imagen {i+j}: {str(e)}")
90
- print(f"Error details: {type(e).__name__}")
91
- import traceback
92
- print(traceback.format_exc())
93
  continue
94
 
95
- # 💾 Guardar después de cada batch
96
  if database:
97
  print("💾 Guardando progreso...")
98
  with open(EMBEDDINGS_FILE, 'wb') as f:
@@ -135,7 +116,7 @@ def find_similar_faces(uploaded_image: Image.Image):
135
 
136
  return gallery_items, text_summary
137
 
138
- # ⚙️ Inicializar
139
  print("🚀 Iniciando aplicación...")
140
  database = build_database()
141
  print(f"✅ Base de datos cargada con {len(database)} imágenes")
 
7
  import pickle
8
  from pathlib import Path
9
  import gc
 
 
10
 
11
+ # 🔐 Token automático (si es necesario)
12
  HF_TOKEN = os.getenv("HF_TOKEN")
 
 
13
 
14
  # 📁 Directorio para embeddings
15
  EMBEDDINGS_DIR = Path("embeddings")
16
  EMBEDDINGS_DIR.mkdir(exist_ok=True)
17
  EMBEDDINGS_FILE = EMBEDDINGS_DIR / "embeddings.pkl"
18
 
19
+ # ✅ Cargar dataset directamente desde Hugging Face Hub
20
+ dataset = load_dataset(
21
+ path="Segizu/facial-recognition",
22
+ data_files="metadata.csv",
23
+ token=HF_TOKEN
24
+ )
25
+ dataset = dataset["train"].cast_column("image", HfImage())
26
 
27
  # 🔄 Preprocesar imagen para Facenet
28
  def preprocess_image(img: Image.Image) -> np.ndarray:
 
41
  database = []
42
  batch_size = 10
43
 
44
+ for i in range(0, len(dataset), batch_size):
45
+ batch = dataset[i:i + batch_size]
46
+ print(f"📦 Procesando lote {i // batch_size + 1}/{(len(dataset) + batch_size - 1) // batch_size}")
 
 
 
 
 
 
 
 
 
47
 
48
  for j, item in enumerate(batch):
49
  try:
50
+ if not isinstance(item, dict) or "image" not in item:
51
+ print(f"⚠️ Saltando item {i+j} - estructura inválida: {item}")
 
 
 
 
 
 
52
  continue
53
+
54
+ img = item["image"]
55
+ if not isinstance(img, Image.Image):
56
+ print(f"⚠️ Saltando item {i+j} - no es imagen: {type(img)}")
57
+ continue
58
+
 
 
59
  img_processed = preprocess_image(img)
60
  embedding = DeepFace.represent(
61
  img_path=img_processed,
 
64
  )[0]["embedding"]
65
 
66
  database.append((f"image_{i+j}", img, embedding))
67
+ print(f"✅ Procesada imagen {i+j+1}/{len(dataset)}")
68
 
69
  del img_processed
70
  gc.collect()
71
 
72
  except Exception as e:
73
+ print(f"❌ Error al procesar imagen {i+j}: {str(e)}")
 
 
 
74
  continue
75
 
76
+ # Guardar después de cada lote
77
  if database:
78
  print("💾 Guardando progreso...")
79
  with open(EMBEDDINGS_FILE, 'wb') as f:
 
116
 
117
  return gallery_items, text_summary
118
 
119
+ # ⚙️ Iniciar la aplicación
120
  print("🚀 Iniciando aplicación...")
121
  database = build_database()
122
  print(f"✅ Base de datos cargada con {len(database)} imágenes")
metadata.csv CHANGED
The diff for this file is too large to render. See raw diff
 
metadata.py CHANGED
@@ -2,14 +2,14 @@ from huggingface_hub import HfApi
2
  import csv
3
  import os
4
 
5
- HF_TOKEN = os.getenv("HF_TOKEN") or "hf_token"
6
  repo_id = "Segizu/facial-recognition"
7
 
8
  api = HfApi()
9
  files = api.list_repo_files(repo_id=repo_id, repo_type="dataset", token=HF_TOKEN)
10
 
11
  # Generar URLs completas
12
- base_url = f"https://huggingface.co/datasets/{repo_id}/resolve/main/"
13
  image_urls = [base_url + f for f in files if f.lower().endswith(".jpg")]
14
 
15
  # Escribir nuevo metadata.csv
 
2
  import csv
3
  import os
4
 
5
+ HF_TOKEN = os.getenv("HF_TOKEN") or ""
6
  repo_id = "Segizu/facial-recognition"
7
 
8
  api = HfApi()
9
  files = api.list_repo_files(repo_id=repo_id, repo_type="dataset", token=HF_TOKEN)
10
 
11
  # Generar URLs completas
12
+ base_url = f""
13
  image_urls = [base_url + f for f in files if f.lower().endswith(".jpg")]
14
 
15
  # Escribir nuevo metadata.csv