Spaces:
Running
Running
Wkatir
commited on
Commit
·
dded47d
1
Parent(s):
081b432
fix: an issue with the format
Browse files- pages/1_Cloudinary_AI.py +28 -15
- pages/2_Cloudinary_Crop.py +26 -17
- pages/3_Image_Compression_Tool.py +28 -28
pages/1_Cloudinary_AI.py
CHANGED
@@ -42,14 +42,20 @@ def cleanup_cloudinary():
|
|
42 |
|
43 |
|
44 |
def process_image(image, width, height):
|
|
|
|
|
|
|
|
|
45 |
if not st.session_state.get('cloudinary_initialized', False):
|
46 |
st.error("Cloudinary no está inicializado correctamente")
|
47 |
-
return None
|
48 |
|
49 |
try:
|
|
|
|
|
50 |
if not check_file_size(image, 10):
|
51 |
-
st.error(f"La imagen
|
52 |
-
return None
|
53 |
|
54 |
image_content = image.read()
|
55 |
|
@@ -68,10 +74,11 @@ def process_image(image, width, height):
|
|
68 |
|
69 |
processed_url = response['secure_url']
|
70 |
processed_image = requests.get(processed_url).content
|
71 |
-
|
|
|
72 |
except Exception as e:
|
73 |
st.error(f"Error procesando imagen: {e}")
|
74 |
-
return None
|
75 |
|
76 |
|
77 |
def main():
|
@@ -125,9 +132,13 @@ def main():
|
|
125 |
if uploaded_files:
|
126 |
st.header("Imágenes Originales")
|
127 |
cols = st.columns(3)
|
|
|
|
|
128 |
for idx, file in enumerate(uploaded_files):
|
|
|
|
|
129 |
with cols[idx % 3]:
|
130 |
-
st.image(
|
131 |
|
132 |
if st.button("Procesar Imágenes"):
|
133 |
if not st.session_state.get('cloudinary_initialized', False):
|
@@ -137,24 +148,26 @@ def main():
|
|
137 |
processed_images = []
|
138 |
progress_bar = st.progress(0)
|
139 |
|
140 |
-
for idx,
|
141 |
-
|
142 |
-
|
|
|
|
|
143 |
if processed:
|
144 |
-
processed_images.append(processed)
|
145 |
-
progress_bar.progress((idx + 1) / len(
|
146 |
|
147 |
if processed_images:
|
148 |
st.header("Imágenes Procesadas")
|
149 |
cols = st.columns(3)
|
150 |
-
for idx, img_bytes in enumerate(processed_images):
|
151 |
with cols[idx % 3]:
|
152 |
st.image(img_bytes)
|
153 |
|
154 |
zip_buffer = io.BytesIO()
|
155 |
with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
|
156 |
-
for idx, img_bytes in enumerate(processed_images):
|
157 |
-
zip_file.writestr(f'imagen_procesada_{idx}.
|
158 |
|
159 |
st.download_button(
|
160 |
label="Descargar todas las imágenes",
|
@@ -165,4 +178,4 @@ def main():
|
|
165 |
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
-
main()
|
|
|
42 |
|
43 |
|
44 |
def process_image(image, width, height):
|
45 |
+
"""
|
46 |
+
Procesa la imagen usando Cloudinary. Se asegura de reiniciar el puntero del archivo.
|
47 |
+
Retorna una tupla (imagen_procesada_bytes, extension_del_archivo).
|
48 |
+
"""
|
49 |
if not st.session_state.get('cloudinary_initialized', False):
|
50 |
st.error("Cloudinary no está inicializado correctamente")
|
51 |
+
return None, None
|
52 |
|
53 |
try:
|
54 |
+
# Reiniciar el puntero para garantizar la lectura completa
|
55 |
+
image.seek(0)
|
56 |
if not check_file_size(image, 10):
|
57 |
+
st.error(f"La imagen excede el límite de 10MB")
|
58 |
+
return None, None
|
59 |
|
60 |
image_content = image.read()
|
61 |
|
|
|
74 |
|
75 |
processed_url = response['secure_url']
|
76 |
processed_image = requests.get(processed_url).content
|
77 |
+
file_format = response.get('format', 'jpg') # Se obtiene el formato real procesado
|
78 |
+
return processed_image, file_format
|
79 |
except Exception as e:
|
80 |
st.error(f"Error procesando imagen: {e}")
|
81 |
+
return None, None
|
82 |
|
83 |
|
84 |
def main():
|
|
|
132 |
if uploaded_files:
|
133 |
st.header("Imágenes Originales")
|
134 |
cols = st.columns(3)
|
135 |
+
# Guardar el contenido original para evitar que se consuma el stream
|
136 |
+
original_images = []
|
137 |
for idx, file in enumerate(uploaded_files):
|
138 |
+
file_bytes = file.getvalue()
|
139 |
+
original_images.append((file.name, file_bytes))
|
140 |
with cols[idx % 3]:
|
141 |
+
st.image(file_bytes)
|
142 |
|
143 |
if st.button("Procesar Imágenes"):
|
144 |
if not st.session_state.get('cloudinary_initialized', False):
|
|
|
148 |
processed_images = []
|
149 |
progress_bar = st.progress(0)
|
150 |
|
151 |
+
for idx, (name, img_bytes) in enumerate(original_images):
|
152 |
+
# Crear un nuevo objeto BytesIO para cada imagen
|
153 |
+
img_io = io.BytesIO(img_bytes)
|
154 |
+
with st.spinner(f'Procesando imagen {idx + 1}/{len(original_images)}...'):
|
155 |
+
processed, file_format = process_image(img_io, width, height)
|
156 |
if processed:
|
157 |
+
processed_images.append((processed, file_format))
|
158 |
+
progress_bar.progress((idx + 1) / len(original_images))
|
159 |
|
160 |
if processed_images:
|
161 |
st.header("Imágenes Procesadas")
|
162 |
cols = st.columns(3)
|
163 |
+
for idx, (img_bytes, file_format) in enumerate(processed_images):
|
164 |
with cols[idx % 3]:
|
165 |
st.image(img_bytes)
|
166 |
|
167 |
zip_buffer = io.BytesIO()
|
168 |
with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
|
169 |
+
for idx, (img_bytes, file_format) in enumerate(processed_images):
|
170 |
+
zip_file.writestr(f'imagen_procesada_{idx}.{file_format}', img_bytes)
|
171 |
|
172 |
st.download_button(
|
173 |
label="Descargar todas las imágenes",
|
|
|
178 |
|
179 |
|
180 |
if __name__ == "__main__":
|
181 |
+
main()
|
pages/2_Cloudinary_Crop.py
CHANGED
@@ -42,13 +42,21 @@ def cleanup_cloudinary():
|
|
42 |
|
43 |
|
44 |
def process_image(image, width, height, gravity_option):
|
|
|
|
|
|
|
|
|
|
|
45 |
if not st.session_state.get('cloudinary_initialized', False):
|
46 |
st.error("Cloudinary no está inicializado correctamente")
|
47 |
return None
|
48 |
|
49 |
try:
|
|
|
|
|
|
|
50 |
if not check_file_size(image, 10):
|
51 |
-
st.error(f"
|
52 |
return None
|
53 |
|
54 |
image_content = image.read()
|
@@ -62,13 +70,14 @@ def process_image(image, width, height, gravity_option):
|
|
62 |
"gravity": gravity_option,
|
63 |
"quality": 100,
|
64 |
"dpr": 3,
|
65 |
-
"flags": "preserve_transparency" if
|
66 |
}]
|
67 |
)
|
68 |
|
69 |
processed_url = response['secure_url']
|
70 |
processed_image = requests.get(processed_url).content
|
71 |
|
|
|
72 |
cloudinary.api.delete_resources([response['public_id']])
|
73 |
return processed_image
|
74 |
except Exception as e:
|
@@ -122,26 +131,26 @@ def main():
|
|
122 |
if uploaded_files:
|
123 |
st.header("Vista Previa Original")
|
124 |
cols = st.columns(3)
|
|
|
|
|
125 |
for idx, file in enumerate(uploaded_files):
|
|
|
|
|
126 |
with cols[idx % 3]:
|
127 |
-
st.image(
|
128 |
|
129 |
if st.button("✨ Procesar Imágenes"):
|
130 |
processed_images = []
|
131 |
progress_bar = st.progress(0)
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
except Exception as e:
|
142 |
-
st.error(f"Error con {file.name}: {str(e)}")
|
143 |
-
|
144 |
-
status.update(label="Proceso completado!", state="complete")
|
145 |
|
146 |
if processed_images:
|
147 |
st.header("Resultados Finales")
|
@@ -153,7 +162,7 @@ def main():
|
|
153 |
zip_buffer = io.BytesIO()
|
154 |
with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
|
155 |
for name, img_bytes in processed_images:
|
156 |
-
|
157 |
zip_file.writestr(f"procesada_{name}", img_bytes)
|
158 |
|
159 |
st.download_button(
|
@@ -166,4 +175,4 @@ def main():
|
|
166 |
|
167 |
|
168 |
if __name__ == "__main__":
|
169 |
-
main()
|
|
|
42 |
|
43 |
|
44 |
def process_image(image, width, height, gravity_option):
|
45 |
+
"""
|
46 |
+
Procesa la imagen usando Cloudinary y retorna la imagen procesada.
|
47 |
+
Se reinicia el puntero del stream y se utiliza el atributo 'name'
|
48 |
+
para determinar si se debe preservar la transparencia en PNG.
|
49 |
+
"""
|
50 |
if not st.session_state.get('cloudinary_initialized', False):
|
51 |
st.error("Cloudinary no está inicializado correctamente")
|
52 |
return None
|
53 |
|
54 |
try:
|
55 |
+
# Reinicia el puntero para leer la imagen completa
|
56 |
+
image.seek(0)
|
57 |
+
image_name = getattr(image, 'name', '')
|
58 |
if not check_file_size(image, 10):
|
59 |
+
st.error(f"{image_name} excede el límite de 10MB")
|
60 |
return None
|
61 |
|
62 |
image_content = image.read()
|
|
|
70 |
"gravity": gravity_option,
|
71 |
"quality": 100,
|
72 |
"dpr": 3,
|
73 |
+
"flags": "preserve_transparency" if image_name.lower().endswith('.png') else None
|
74 |
}]
|
75 |
)
|
76 |
|
77 |
processed_url = response['secure_url']
|
78 |
processed_image = requests.get(processed_url).content
|
79 |
|
80 |
+
# Limpia el recurso procesado en Cloudinary
|
81 |
cloudinary.api.delete_resources([response['public_id']])
|
82 |
return processed_image
|
83 |
except Exception as e:
|
|
|
131 |
if uploaded_files:
|
132 |
st.header("Vista Previa Original")
|
133 |
cols = st.columns(3)
|
134 |
+
# Almacena el contenido original de cada imagen en memoria
|
135 |
+
original_images = []
|
136 |
for idx, file in enumerate(uploaded_files):
|
137 |
+
file_bytes = file.getvalue()
|
138 |
+
original_images.append((file.name, file_bytes))
|
139 |
with cols[idx % 3]:
|
140 |
+
st.image(file_bytes, caption=file.name)
|
141 |
|
142 |
if st.button("✨ Procesar Imágenes"):
|
143 |
processed_images = []
|
144 |
progress_bar = st.progress(0)
|
145 |
|
146 |
+
# Procesa cada imagen utilizando un nuevo objeto BytesIO
|
147 |
+
for idx, (name, img_bytes) in enumerate(original_images):
|
148 |
+
st.write(f"Procesando: {name}")
|
149 |
+
img_io = io.BytesIO(img_bytes)
|
150 |
+
processed = process_image(img_io, width, height, gravity_option)
|
151 |
+
if processed:
|
152 |
+
processed_images.append((name, processed))
|
153 |
+
progress_bar.progress((idx + 1) / len(original_images))
|
|
|
|
|
|
|
|
|
154 |
|
155 |
if processed_images:
|
156 |
st.header("Resultados Finales")
|
|
|
162 |
zip_buffer = io.BytesIO()
|
163 |
with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
|
164 |
for name, img_bytes in processed_images:
|
165 |
+
# Se utiliza el nombre original para mantener la extensión
|
166 |
zip_file.writestr(f"procesada_{name}", img_bytes)
|
167 |
|
168 |
st.download_button(
|
|
|
175 |
|
176 |
|
177 |
if __name__ == "__main__":
|
178 |
+
main()
|
pages/3_Image_Compression_Tool.py
CHANGED
@@ -53,12 +53,10 @@ def optimize_image(image_file):
|
|
53 |
return None, None
|
54 |
|
55 |
|
56 |
-
def process_filename(original_name):
|
57 |
-
"""Genera nombre de archivo optimizado usando pathlib"""
|
58 |
path = Path(original_name)
|
59 |
-
new_suffix =
|
60 |
-
if path.suffix.lower() in ['.png', '.PNG'] and Image.open(original_name).mode in ('RGBA', 'LA'):
|
61 |
-
new_suffix = '.png' # Mantener PNG si tiene transparencia
|
62 |
return f"{path.stem}_optimizado{new_suffix}"
|
63 |
|
64 |
|
@@ -89,32 +87,35 @@ def main():
|
|
89 |
progress_bar = st.progress(0)
|
90 |
total_files = len(uploaded_files)
|
91 |
|
92 |
-
|
93 |
-
for idx, file in enumerate(uploaded_files):
|
94 |
-
try:
|
95 |
-
if file.size > 50 * 1024 * 1024:
|
96 |
-
st.error(f"Archivo {file.name} excede 50MB")
|
97 |
-
continue
|
98 |
|
99 |
-
|
100 |
-
|
|
|
|
|
|
|
101 |
|
102 |
-
|
103 |
-
|
104 |
-
reduction = original_size - new_size
|
105 |
-
total_reduction += reduction
|
106 |
|
107 |
-
|
108 |
-
|
|
|
|
|
109 |
|
110 |
-
|
111 |
-
|
112 |
|
113 |
-
|
114 |
-
|
|
|
115 |
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
118 |
|
119 |
if processed_images:
|
120 |
# Mostrar resultados
|
@@ -139,10 +140,9 @@ def main():
|
|
139 |
label="📥 Descargar Todas las Imágenes",
|
140 |
data=zip_buffer.getvalue(),
|
141 |
file_name=f"imagenes_optimizadas_{datetime.now().strftime('%Y%m%d_%H%M')}.zip",
|
142 |
-
mime="application/zip"
|
143 |
-
type="primary"
|
144 |
)
|
145 |
|
146 |
|
147 |
if __name__ == "__main__":
|
148 |
-
main()
|
|
|
53 |
return None, None
|
54 |
|
55 |
|
56 |
+
def process_filename(original_name, optimized_format):
|
57 |
+
"""Genera nombre de archivo optimizado usando pathlib y el formato optimizado"""
|
58 |
path = Path(original_name)
|
59 |
+
new_suffix = f".{optimized_format.lower()}" if optimized_format else '.jpg'
|
|
|
|
|
60 |
return f"{path.stem}_optimizado{new_suffix}"
|
61 |
|
62 |
|
|
|
87 |
progress_bar = st.progress(0)
|
88 |
total_files = len(uploaded_files)
|
89 |
|
90 |
+
st.info("Optimizando imágenes...")
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
+
for idx, file in enumerate(uploaded_files):
|
93 |
+
try:
|
94 |
+
if file.size > 50 * 1024 * 1024:
|
95 |
+
st.error(f"Archivo {file.name} excede 50MB")
|
96 |
+
continue
|
97 |
|
98 |
+
original_size = file.size
|
99 |
+
optimized_data, format_used = optimize_image(file)
|
|
|
|
|
100 |
|
101 |
+
if optimized_data and format_used:
|
102 |
+
new_size = len(optimized_data)
|
103 |
+
reduction = original_size - new_size
|
104 |
+
total_reduction += reduction
|
105 |
|
106 |
+
new_name = process_filename(file.name, format_used)
|
107 |
+
processed_images.append((new_name, optimized_data, original_size, new_size))
|
108 |
|
109 |
+
st.write(f"✅ {file.name} optimizado ({reduction / 1024:.1f} KB ahorrados)")
|
110 |
+
else:
|
111 |
+
st.write(f"⚠️ {file.name} no se optimizó porque no se logró reducir el tamaño.")
|
112 |
|
113 |
+
progress_bar.progress((idx + 1) / total_files)
|
114 |
+
|
115 |
+
except Exception as e:
|
116 |
+
st.error(f"Error procesando {file.name}: {str(e)}")
|
117 |
+
|
118 |
+
st.success(f"¡Optimización completada! (Ahorro total: {total_reduction / 1024:.1f} KB)")
|
119 |
|
120 |
if processed_images:
|
121 |
# Mostrar resultados
|
|
|
140 |
label="📥 Descargar Todas las Imágenes",
|
141 |
data=zip_buffer.getvalue(),
|
142 |
file_name=f"imagenes_optimizadas_{datetime.now().strftime('%Y%m%d_%H%M')}.zip",
|
143 |
+
mime="application/zip"
|
|
|
144 |
)
|
145 |
|
146 |
|
147 |
if __name__ == "__main__":
|
148 |
+
main()
|