Alessio Grancini
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,9 @@ import os
|
|
7 |
import torch
|
8 |
import utils
|
9 |
import plotly.graph_objects as go
|
|
|
|
|
|
|
10 |
|
11 |
from image_segmenter import ImageSegmenter
|
12 |
from monocular_depth_estimator import MonocularDepthEstimator
|
@@ -151,6 +154,7 @@ def get_detection_data(image):
|
|
151 |
def decode_base64_image(base64_string):
|
152 |
"""Decodes Base64 string into a NumPy image."""
|
153 |
try:
|
|
|
154 |
img_data = base64.b64decode(base64_string)
|
155 |
img = Image.open(BytesIO(img_data))
|
156 |
img = np.array(img)
|
@@ -161,23 +165,27 @@ def get_detection_data(image):
|
|
161 |
|
162 |
def encode_base64_image(image):
|
163 |
"""Encodes a NumPy image into a Base64 string."""
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
168 |
|
169 |
try:
|
170 |
-
if isinstance(image, str):
|
171 |
-
|
172 |
-
|
173 |
-
|
|
|
|
|
|
|
174 |
|
175 |
# Resize image
|
176 |
image = utils.resize(image)
|
177 |
|
178 |
# Extract dimensions
|
179 |
-
|
180 |
-
height, width = image.shape[:2]
|
181 |
|
182 |
# Get detections and depth
|
183 |
image_segmentation, objects_data = img_seg.predict(image)
|
@@ -187,55 +195,13 @@ def get_detection_data(image):
|
|
187 |
segmentation_b64 = encode_base64_image(image_segmentation)
|
188 |
depth_b64 = encode_base64_image(depth_colormap)
|
189 |
|
190 |
-
|
191 |
-
|
192 |
-
for data in objects_data:
|
193 |
-
cls_id, cls_name, cls_center, cls_mask, cls_clr = data
|
194 |
-
masked_depth, mean_depth = utils.get_masked_depth(depthmap, cls_mask)
|
195 |
-
|
196 |
-
y_indices, x_indices = np.where(cls_mask > 0)
|
197 |
-
if len(x_indices) > 0 and len(y_indices) > 0:
|
198 |
-
x1, x2 = np.min(x_indices), np.max(x_indices)
|
199 |
-
y1, y2 = np.min(y_indices), np.max(y_indices)
|
200 |
-
else:
|
201 |
-
continue
|
202 |
-
|
203 |
-
# Normalize coordinates
|
204 |
-
bbox_normalized = [
|
205 |
-
float(x1 / width),
|
206 |
-
float(y1 / height),
|
207 |
-
float(x2 / width),
|
208 |
-
float(y2 / height),
|
209 |
-
]
|
210 |
-
|
211 |
-
detection = {
|
212 |
-
"id": int(cls_id),
|
213 |
-
"category": cls_name,
|
214 |
-
"center": [
|
215 |
-
float(cls_center[0] / width),
|
216 |
-
float(cls_center[1] / height),
|
217 |
-
],
|
218 |
-
"bbox": bbox_normalized,
|
219 |
-
"depth": float(mean_depth * 10), # Convert to meters
|
220 |
-
"color": [float(c / 255) for c in cls_clr],
|
221 |
-
"mask": cls_mask.tolist(),
|
222 |
-
"confidence": 1.0, # Placeholder confidence
|
223 |
-
}
|
224 |
-
detections.append(detection)
|
225 |
-
|
226 |
-
# Camera parameters
|
227 |
-
camera_params = {
|
228 |
-
"fx": getattr(depth_estimator, "fx_depth", 0),
|
229 |
-
"fy": getattr(depth_estimator, "fy_depth", 0),
|
230 |
-
"cx": getattr(depth_estimator, "cx_depth", width // 2),
|
231 |
-
"cy": getattr(depth_estimator, "cy_depth", height // 2),
|
232 |
-
}
|
233 |
|
234 |
return {
|
235 |
-
"detections":
|
236 |
-
"depth_map": depth_b64,
|
237 |
-
"segmentation": segmentation_b64,
|
238 |
-
"camera_params": camera_params,
|
239 |
"image_size": {"width": width, "height": height},
|
240 |
}
|
241 |
|
|
|
7 |
import torch
|
8 |
import utils
|
9 |
import plotly.graph_objects as go
|
10 |
+
from io import BytesIO
|
11 |
+
from PIL import Image
|
12 |
+
import base64
|
13 |
|
14 |
from image_segmenter import ImageSegmenter
|
15 |
from monocular_depth_estimator import MonocularDepthEstimator
|
|
|
154 |
def decode_base64_image(base64_string):
|
155 |
"""Decodes Base64 string into a NumPy image."""
|
156 |
try:
|
157 |
+
print(f"🔍 Received Base64 String (Truncated): {base64_string[:50]}...") # Debugging
|
158 |
img_data = base64.b64decode(base64_string)
|
159 |
img = Image.open(BytesIO(img_data))
|
160 |
img = np.array(img)
|
|
|
165 |
|
166 |
def encode_base64_image(image):
|
167 |
"""Encodes a NumPy image into a Base64 string."""
|
168 |
+
try:
|
169 |
+
_, buffer = cv2.imencode('.png', image)
|
170 |
+
return base64.b64encode(buffer).decode("utf-8")
|
171 |
+
except Exception as e:
|
172 |
+
print(f"🚨 Error encoding image to Base64: {e}")
|
173 |
+
return None
|
174 |
|
175 |
try:
|
176 |
+
if not isinstance(image, str):
|
177 |
+
print("🚨 Error: Expected Base64 string but received:", type(image))
|
178 |
+
return {"error": "Invalid input format. Expected Base64-encoded image."}
|
179 |
+
|
180 |
+
image = decode_base64_image(image)
|
181 |
+
if image is None:
|
182 |
+
return {"error": "Base64 decoding failed. Ensure correct encoding."}
|
183 |
|
184 |
# Resize image
|
185 |
image = utils.resize(image)
|
186 |
|
187 |
# Extract dimensions
|
188 |
+
height, width = image.shape[:2]
|
|
|
189 |
|
190 |
# Get detections and depth
|
191 |
image_segmentation, objects_data = img_seg.predict(image)
|
|
|
195 |
segmentation_b64 = encode_base64_image(image_segmentation)
|
196 |
depth_b64 = encode_base64_image(depth_colormap)
|
197 |
|
198 |
+
if segmentation_b64 is None or depth_b64 is None:
|
199 |
+
return {"error": "Failed to encode output images."}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
return {
|
202 |
+
"detections": objects_data, # Keeping as original
|
203 |
+
"depth_map": depth_b64,
|
204 |
+
"segmentation": segmentation_b64,
|
|
|
205 |
"image_size": {"width": width, "height": height},
|
206 |
}
|
207 |
|