lorenzoinnocenti commited on
Commit
637a738
·
1 Parent(s): 75a73c6

updated model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Distribution / packaging
7
+ .Python
8
+ build/
9
+ develop-eggs/
10
+ dist/
11
+ downloads/
12
+ eggs/
13
+ .eggs/
14
+ lib/
15
+ lib64/
16
+ parts/
17
+ sdist/
18
+ var/
19
+ wheels/
20
+ share/python-wheels/
21
+ *.egg-info/
22
+ .installed.cfg
23
+ *.egg
24
+ MANIFEST
25
+
26
+ # PyInstaller
27
+ # Usually these files are written by a python script from a template
28
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
29
+ *.manifest
30
+ *.spec
31
+
32
+ # Installer logs
33
+ pip-log.txt
34
+ pip-delete-this-directory.txt
35
+
36
+ # Unit test / coverage reports
37
+ htmlcov/
38
+ .tox/
39
+ .nox/
40
+ .coverage
41
+ .coverage.*
42
+ .cache
43
+ nosetests.xml
44
+ coverage.xml
45
+ *.cover
46
+ *.py,cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+ cover/
50
+
51
+ # pyenv
52
+ .python-version
53
+
54
+ # Environments
55
+ .env
56
+ .venv
57
+ env/
58
+ venv/
59
+ ENV/
60
+ env.bak/
61
+ venv.bak/
62
+
63
+ # vscode
64
+ .vscode/
65
+
66
+ # tmp folder
67
+ .tmp
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: Infrastructure Damage Assessment
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.24.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Infrastructure Damage Assessment
3
+ emoji: 🏠
4
+ color: green
 
5
  sdk: gradio
6
+ sdk_version: 5.21.0
7
  app_file: app.py
8
  pinned: false
9
+ license: mit
10
+ short_description: Road and buildings damage assessment
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import os
4
+ from PIL import Image
5
+ from math import ceil, floor
6
+ from numpy import ndarray
7
+ from typing import Callable, List
8
+ import scipy.signal
9
+ import onnxruntime as ort
10
+ from tqdm import tqdm
11
+
12
+ # needed to run locally
13
+ os.environ["GRADIO_TEMP_DIR"] = ".tmp"
14
+
15
+ WINDOW_CACHE = dict()
16
+
17
+
18
+ def _spline_window(window_size: int, power: int = 2) -> np.ndarray:
19
+ """Generates a 1-dimensional spline of order 'power' (typically 2), in the designated
20
+ window.
21
+ Args:
22
+ window_size (int): size of the interested window
23
+ power (int, optional): Order of the spline. Defaults to 2.
24
+ Returns:
25
+ np.ndarray: 1D spline
26
+ """
27
+ intersection = int(window_size / 4)
28
+ wind_outer = (
29
+ abs(2 * (scipy.signal.windows.triang(window_size))) ** power) / 2
30
+ wind_outer[intersection:-intersection] = 0
31
+ wind_inner = (
32
+ 1 - (abs(2 * (scipy.signal.windows.triang(window_size) - 1)) ** power) / 2
33
+ )
34
+ wind_inner[:intersection] = 0
35
+ wind_inner[-intersection:] = 0
36
+ wind = wind_inner + wind_outer
37
+ wind = wind / np.average(wind)
38
+ return wind
39
+
40
+
41
+ def _spline_2d(window_size: int, power: int = 2) -> ndarray:
42
+ """Makes a 1D window spline function, then combines it to return a 2D window function.
43
+ The 2D window is useful to smoothly interpolate between patches.
44
+ Args:
45
+ window_size (int): size of the window (patch)
46
+ power (int, optional): Which order for the spline. Defaults to 2.
47
+ Returns:
48
+ np.ndarray: numpy array containing a 2D spline function
49
+ """
50
+ # Memorization to avoid remaking it for every call
51
+ # since the same window is needed multiple times
52
+ wind = _spline_window(window_size, power)
53
+ # make it 2d
54
+ wind2 = wind[:, None] * wind[None, :]
55
+ wind2 = wind2 / np.max(wind2)
56
+ return wind2
57
+
58
+
59
+ def _spline_4d(
60
+ window_size: int,
61
+ power: int = 2,
62
+ batch_size: int = 1,
63
+ channels: int = 1
64
+ ) -> ndarray:
65
+ """Makes a 4D window spline function
66
+ Same as the 2D version, but repeated across all channels and batch"""
67
+ global WINDOW_CACHE
68
+ key = f"{window_size}_{power}"
69
+ if key in WINDOW_CACHE:
70
+ wind4 = WINDOW_CACHE[key]
71
+ else:
72
+ wind2 = _spline_2d(window_size, power)
73
+ wind4 = wind2[None, None, :, :] * np.ones((batch_size, channels, 1, 1))
74
+ WINDOW_CACHE[key] = wind2
75
+ return wind4
76
+
77
+
78
+ def pad_image(image: np.array, tile_size: int, subdivisions: int) -> np.array:
79
+ """Add borders to the given image for a "valid" border pattern according to "window_size" and "subdivisions".
80
+ Image is expected as a numpy array with shape (width, height, channels).
81
+ Args:
82
+ image (torch.Tensor): input image, 3D channels-last tensor
83
+ tile_size (int): size of a single patch, useful to compute padding
84
+ subdivisions (int): amount of overlap, useful for padding
85
+ Returns:
86
+ torch.Tensor: same image, padded specularly by a certain amount in every direction
87
+ """
88
+ step = tile_size // subdivisions
89
+ _, in_h, in_w = image.shape
90
+ pad_h = step - (in_h % step)
91
+ pad_w = step - (in_w % step)
92
+ pad_h_l = pad_h // 2
93
+ pad_h_r = (pad_h // 2) + (pad_h % 2)
94
+ pad_w_l = pad_w // 2
95
+ pad_w_r = (pad_w // 2) + (pad_w % 2)
96
+ pad = int(round(tile_size * (1 - 1.0 / subdivisions)))
97
+ image = np.pad(
98
+ image,
99
+ ((0, 0), (pad + pad_h_l, pad + pad_h_r), (pad + pad_w_l, pad + pad_w_r)),
100
+ mode="reflect",
101
+ )
102
+ return image, [pad + pad_h_l, pad + pad_h_r, pad + pad_w_l, pad + pad_w_r]
103
+
104
+
105
+ def unpad_image(padded_image: ndarray, pads) -> ndarray:
106
+ """Reverts changes made by 'pad_image'. The same padding is removed, so tile_size and subdivisions
107
+ must be coherent.
108
+
109
+ Args:
110
+ padded_image (torch.Tensor): image with padding still applied
111
+ tile_size (int): size of a single patch
112
+ subdivisions (int): subdivisions to compute overlap
113
+
114
+ Returns:
115
+ torch.Tensor: image without padding, 2D channels-last tensor
116
+ """
117
+ pad_left, pad_right, pad_top, pad_bottom = pads
118
+ # crop the image left, right, top and bottom
119
+ # get number of dimensions of padded_image
120
+ n_dims = len(padded_image.shape)
121
+ # if padded_image is 2d
122
+ if n_dims == 2:
123
+ result = padded_image[pad_left:-pad_right, pad_top:-pad_bottom]
124
+ # if padded_image is 3d
125
+ elif n_dims == 3:
126
+ result = padded_image[:, pad_left:-pad_right, pad_top:-pad_bottom]
127
+ else:
128
+ raise ValueError(
129
+ f"padded_image has {n_dims} dimensions, expected 2 or 3.")
130
+ return result
131
+
132
+
133
+ def windowed_generator(
134
+ padded_image: ndarray, window_size: int, subdivisions: int, batch_size: int = None
135
+ ):
136
+ """Generator that yield tiles grouped by batch size.
137
+ Args:
138
+ padded_image (np.ndarray): input image to be processed (already padded), supposed channels-first
139
+ window_size (int): size of a single patch
140
+ subdivisions (int): subdivision count on each patch to compute the step
141
+ batch_size (int, optional): amount of patches in each batch. Defaults to None.
142
+
143
+ Yields:
144
+ Tuple[List[tuple], np.ndarray]: list of coordinates and respective patches as single batch array
145
+ """
146
+ step = window_size // subdivisions
147
+ channel, width, height = padded_image.shape
148
+ batch_size = batch_size or 1
149
+ batch = []
150
+ coords = []
151
+ for x in range(0, width - window_size + 1, step):
152
+ for y in range(0, height - window_size + 1, step):
153
+ coords.append((x, y))
154
+ # extract the tile, place channels first for batch
155
+ tile = padded_image[:, x: x + window_size, y: y + window_size]
156
+ batch.append(tile)
157
+ # yield the batch once full and restore lists right after
158
+ if len(batch) == batch_size:
159
+ yield coords, np.stack(batch)
160
+ coords.clear()
161
+ batch.clear()
162
+ # handle last (possibly unfinished) batch
163
+ if len(batch) > 0:
164
+ yield coords, np.stack(batch)
165
+
166
+
167
+ def reconstruct(
168
+ canvas: ndarray, tile_size: int, coords: List[tuple], predictions: ndarray
169
+ ) -> ndarray:
170
+ """Helper function that iterates the result batch onto the given canvas to reconstruct
171
+ the final result batch after batch.
172
+ Args:
173
+ canvas (torch.Tensor): container for the final image.
174
+ tile_size (int): size of a single patch.
175
+ coords (List[tuple]): list of pixel coordinates corresponding to the batch items
176
+ predictions (torch.Tensor): array containing patch predictions, shape (batch, tile_size, tile_size, num_classes)
177
+
178
+ Returns:
179
+ torch.Tensor: the updated canvas, shape (padded_w, padded_h, num_classes)
180
+ """
181
+ for (x, y), patch in zip(coords, predictions):
182
+ # get canvas number of dimensions
183
+ n_dims = len(canvas.shape)
184
+ # if canvas is 2d
185
+ if n_dims == 2:
186
+ canvas[x: x + tile_size, y: y + tile_size] += patch
187
+ # if canvas is 3d
188
+ elif n_dims == 3:
189
+ canvas[:, x: x + tile_size, y: y + tile_size] += patch
190
+ else:
191
+ raise ValueError(
192
+ f"Canvas has {n_dims} dimensions, expected 2 or 3.")
193
+ return canvas
194
+
195
+
196
+ def predict_smooth_windowing(
197
+ image: ndarray,
198
+ tile_size: int,
199
+ subdivisions: int,
200
+ prediction_fn: Callable,
201
+ batch_size: int = 1,
202
+ out_dim: int = 1,
203
+ ) -> np.ndarray:
204
+ """Allows to predict a large image in one go, dividing it in squared, fixed-size tiles and smoothly
205
+ interpolating over them to produce a single, coherent output with the same dimensions.
206
+ Args:
207
+ image (np.ndarray): input image, expected a 3D vector
208
+ tile_size (int): size of each squared tile
209
+ subdivisions (int): number of subdivisions over the single tile for overlaps
210
+ prediction_fn (Callable): callback that takes the input batch and returns an output tensor
211
+ batch_size (int, optional): size of each batch. Defaults to None.
212
+ channels_first (int, optional): whether the input image is channels-first or not
213
+ mirrored (bool, optional): whether to use dihedral predictions (every simmetry). Defaults to False.
214
+
215
+ Returns:
216
+ np.ndarray: numpy array with dimensions (w, h), containing smooth predictions
217
+ """
218
+ img, pads = pad_image(image=image, tile_size=tile_size,
219
+ subdivisions=subdivisions)
220
+ spline = _spline_4d(window_size=tile_size, power=2)
221
+ # canvas = np.zeros(img.shape[1], img.shape[2])
222
+ canvas = np.zeros((out_dim, img.shape[1], img.shape[2]))
223
+ loop = tqdm(windowed_generator(
224
+ padded_image=img,
225
+ window_size=tile_size,
226
+ subdivisions=subdivisions,
227
+ batch_size=batch_size,
228
+ ))
229
+ for coords, batch in loop:
230
+ pred_batch = prediction_fn(batch) # .permute(0, 2, 3, 1)
231
+ # must be 3d for reconstruction to work
232
+ pred_batch = pred_batch * spline
233
+ canvas = reconstruct(
234
+ canvas, tile_size=tile_size, coords=coords, predictions=pred_batch
235
+ )
236
+ prediction = unpad_image(canvas, pads=pads)
237
+ return prediction
238
+
239
+
240
+ def center_pad(x, padding, div_factor=32, mode="reflect"):
241
+ # center pad with different padding for each city
242
+ # pads the image with the same padding on all sides
243
+ # the output size must be at least the size + 2*padding
244
+ # and divisible by div_factor
245
+ # first, compute the size of the padded image
246
+ size_x = x.shape[3]
247
+ size_y = x.shape[2]
248
+ # get the min padding
249
+ min_padding_x = size_x + 2 * padding
250
+ min_padding_y = size_y + 2 * padding
251
+ # get the new size
252
+ new_size_x = int(ceil(min_padding_x / div_factor) * div_factor)
253
+ new_size_y = int(ceil(min_padding_y / div_factor) * div_factor)
254
+ # get the padding
255
+ pad_x = new_size_x - size_x
256
+ pad_y = new_size_y - size_y
257
+ pad_left = int(floor(pad_x / 2))
258
+ pad_right = int(ceil(pad_x / 2))
259
+ pad_top = int(floor(pad_y / 2))
260
+ pad_bottom = int(ceil(pad_y / 2))
261
+ if pad_x > size_x or pad_y > size_y:
262
+ padded = np.pad(
263
+ x,
264
+ (
265
+ (0, 0),
266
+ (0, 0),
267
+ (int(floor(size_x / 2)), int(ceil(size_x / 2))),
268
+ (int(floor(size_y / 2)), int(ceil(size_y / 2))),
269
+ ),
270
+ mode=mode,
271
+ )
272
+ # and then pad to size
273
+ padded = np.pad(
274
+ x,
275
+ (
276
+ (0, 0),
277
+ (0, 0),
278
+ (int(floor(new_size_x / 2)), int(ceil(new_size_x / 2))),
279
+ (int(floor(new_size_y / 2)), int(ceil(new_size_y / 2))),
280
+ ),
281
+ mode=mode,
282
+ )
283
+ else:
284
+ padded = np.pad(
285
+ x,
286
+ (
287
+ (0, 0),
288
+ (0, 0),
289
+ (pad_top, pad_bottom),
290
+ (pad_left, pad_right),
291
+ ),
292
+ mode=mode,
293
+ )
294
+ paddings = (pad_top, pad_bottom, pad_left, pad_right)
295
+ return padded, paddings
296
+
297
+
298
+ class ChangeDetectionModel:
299
+ def __init__(self):
300
+ path = "assets/models/change_detection.onnx"
301
+ self.model = ort.InferenceSession(path)
302
+ self.size = 256
303
+ self.subdivisions = 2
304
+ self.batch_size = 2
305
+ self.out_dim = 1
306
+
307
+ def forward(self, x):
308
+ assert x.ndim == 3, "Expected 3D tensor"
309
+ # remove batch dimension
310
+ x = x/255
311
+ # cast to fp32
312
+ x = x.astype(np.float32)
313
+ pred = predict_smooth_windowing(
314
+ image=x,
315
+ tile_size=self.size,
316
+ subdivisions=self.subdivisions,
317
+ prediction_fn=self.callback,
318
+ batch_size=self.batch_size,
319
+ out_dim=self.out_dim
320
+ )
321
+ # apply sigmoid
322
+ pred = 1 / (1 + np.exp(-pred))
323
+ # set pred to 0 if less than 0.25, 1 if more than .25 and less then .5
324
+ # to 2 if more than .5 and less than .75, and to 3 if more than .75
325
+ pred = pred * 3
326
+ pred = np.round(pred)
327
+ return pred[0]
328
+
329
+ def callback(self, x: ndarray) -> ndarray:
330
+ # run onnx inference
331
+ out = self.model.run(None, {"input": x})[0]
332
+ return out
333
+
334
+
335
+ class LocalizationModel:
336
+ def __init__(self):
337
+ path = "assets/models/localization.onnx"
338
+ self.model = ort.InferenceSession(path)
339
+ self.size = 384
340
+ self.subdivisions = 2
341
+ self.batch_size = 2
342
+ self.out_dim = 3
343
+
344
+ def forward(self, x):
345
+ assert x.ndim == 3, "Expected 3D tensor"
346
+ # remove batch dimension
347
+ x = x/255
348
+ # cast to fp32
349
+ x = x.astype(np.float32)
350
+ pred = predict_smooth_windowing(
351
+ image=x,
352
+ tile_size=self.size,
353
+ subdivisions=self.subdivisions,
354
+ prediction_fn=self.callback,
355
+ batch_size=self.batch_size,
356
+ out_dim=self.out_dim
357
+ )
358
+ # compute the argmax
359
+ pred = np.argmax(pred, axis=0)
360
+ return pred
361
+
362
+ def callback(self, x: ndarray) -> ndarray:
363
+ # run onnx inference
364
+ out = self.model.run(None, {"input": x})[0]
365
+ return out
366
+
367
+
368
+ def infer(image1, image2):
369
+ localization_model = LocalizationModel()
370
+ change_detection_model = ChangeDetectionModel()
371
+ # resize image1 to image2
372
+ image1 = image1.resize(image2.size)
373
+ # half resolution
374
+ image1 = image1.resize((image1.width // 2, image1.height // 2))
375
+ image2 = image2.resize((image2.width // 2, image2.height // 2))
376
+ # convert images to numpy arrays
377
+ image1 = np.array(image1)
378
+ image2 = np.array(image2)
379
+ # from whc to cwh
380
+ image1_array = np.transpose(image1, (2, 0, 1))
381
+ image2_array = np.transpose(image2, (2, 0, 1))
382
+ output_image1 = localization_model.forward(image1_array)
383
+ # concatenate the images
384
+ cat_image_array = np.concatenate([image1_array, image2_array], axis=0)
385
+ output_image2 = change_detection_model.forward(cat_image_array)
386
+ output_image1_color = np.zeros(
387
+ (output_image1.shape[0], output_image1.shape[1], 3))
388
+ # set output_image1_color to gray where output_image1 is 1
389
+ output_image1_color[output_image1 == 0] = [0, 0, 0] # Class 0: bg
390
+ output_image1_color[output_image1 == 1] = [150, 150, 150] # Class 1: road
391
+ output_image1_color[output_image1 == 2] = [200, 0, 0] # Class 2: house
392
+ # compute average of output_image1_color and input1
393
+ output_image1_color = (output_image1_color*0.5 + image1*0.5)
394
+ output_image1 = Image.fromarray(output_image1_color.astype(np.uint8))
395
+ output_image2_color = np.zeros(
396
+ (output_image2.shape[0], output_image2.shape[1], 3))
397
+ output_image2_color[output_image2 == 0] = [0, 0, 0] # Class 0: no change
398
+ output_image2_color[output_image2 == 1] = [0, 255, 0] # Class 1: minor change
399
+ output_image2_color[output_image2 == 2] = [255, 255, 0] # Class 2: major change
400
+ output_image2_color[output_image2 == 3] = [255, 0, 0] # Class 3: destroyed
401
+ output_image2_color = output_image2_color*0.5 + image2*0.5
402
+ output_image2 = Image.fromarray(output_image2_color.astype(np.uint8))
403
+ return output_image1, output_image2
404
+
405
+
406
+ # Define sample image pairs
407
+ sample_images = [
408
+ ["assets/data/bata_1_pre.png", "assets/data/bata_1_post.png"],
409
+ ["assets/data/bata_2_pre.png", "assets/data/bata_2_post.png"],
410
+ ["assets/data/beirut_1_pre.png", "assets/data/beirut_1_post.png"]
411
+ ]
412
+
413
+ with gr.Blocks() as demo:
414
+ gr.Markdown("## Infrastructure Damage Assessment")
415
+ # description
416
+ gr.Markdown(
417
+ "This is a demo for infrastructure damage assessment using satellite images.\
418
+ It contains two models: one for localization and the other for change detection. \
419
+ The localization model is used to segment the image into three classes: background (in black), road (in grey), and houses (in red). \
420
+ The change detection model is used to detect changes between two images.\
421
+ The output of the change detection model is colored as follows: no change (in black), minor change (in green), major change (in yellow), and destroyed (in red).\
422
+ The output of the localization model (on the left) is blended with the pre-disaster image to highlight the areas of interest.\
423
+ The output of the change detection model (on the right) is blended with the post-disaster image to highlight the changes.\
424
+ You can upload your own images or use the sample images provided below."
425
+ )
426
+ gr.Markdown(
427
+ "Note: the models run at half resolution for faster inference, \
428
+ so the output images will be less accurate than the full-resolution models.\
429
+ It still takes a few minutes to run the inference, so please be patient."
430
+ )
431
+ with gr.Row(): # Place images in the same row
432
+ with gr.Column(scale=1): # First half-column
433
+ input_image1 = gr.Image(label="Pre-disaster Image", type="pil")
434
+ with gr.Column(scale=1): # Second half-column
435
+ input_image2 = gr.Image(label="Post-disaster Image", type="pil")
436
+ with gr.Row(): # Row for output images
437
+ output_image1 = gr.Image(label="Roads and buildings localization", type="pil")
438
+ output_image2 = gr.Image(label="Change detection", type="pil")
439
+ submit_button = gr.Button("Run Inference")
440
+ examples = gr.Examples(examples=sample_images, inputs=[
441
+ input_image1, input_image2])
442
+ submit_button.click(fn=infer, inputs=[input_image1, input_image2], outputs=[
443
+ output_image1, output_image2])
444
+
445
+ demo.launch()
assets/data/bata_1_post.png ADDED

Git LFS Details

  • SHA256: 54c7ef13d115ad325cf70335fed04e5aefe1dbb4f45bc07a0065e337b4b6a29e
  • Pointer size: 132 Bytes
  • Size of remote file: 5.49 MB
assets/data/bata_1_pre.png ADDED

Git LFS Details

  • SHA256: 7f2e9a21f5f0597fddea814ec8afd39c7ec29c6df455403810bdd4f95ec1e964
  • Pointer size: 132 Bytes
  • Size of remote file: 6.47 MB
assets/data/bata_2_post.png ADDED

Git LFS Details

  • SHA256: 2dc33d37211ededc57cbbdcaae6a5290d54f662730e72f380b5a5c22af07c427
  • Pointer size: 132 Bytes
  • Size of remote file: 3.5 MB
assets/data/bata_2_pre.png ADDED

Git LFS Details

  • SHA256: 0cbfeadee6054cdc195e50887327477d1e0c72f28c11a75e1db949e3b6885c6b
  • Pointer size: 132 Bytes
  • Size of remote file: 4.41 MB
assets/data/beirut_1_post.png ADDED

Git LFS Details

  • SHA256: d6acd830409439cf8f48cf9115ca153d1d5ba118aa915e271cc9a7f9d32f9483
  • Pointer size: 132 Bytes
  • Size of remote file: 8.33 MB
assets/data/beirut_1_pre.png ADDED

Git LFS Details

  • SHA256: 2bd1c1a6780c38ad69a64ff5ba0e9ec8d50aa5292dc7823ae92aa94647c6c0f7
  • Pointer size: 132 Bytes
  • Size of remote file: 9.51 MB
assets/data/beirut_2_post.png ADDED

Git LFS Details

  • SHA256: 839b644cea98694c7822c5b46ae0f92d75c5d7834369f492ac855cc1346a2ffb
  • Pointer size: 132 Bytes
  • Size of remote file: 5.06 MB
assets/data/beirut_2_pre.png ADDED

Git LFS Details

  • SHA256: 65d856a3f035d3203ca0c0536f51a59c8678cc45e7176dbf847bf1732c45fb00
  • Pointer size: 132 Bytes
  • Size of remote file: 5.42 MB
assets/data/croppe.png ADDED

Git LFS Details

  • SHA256: 3996e5c4eb884a0216a6bb2a34236d78e68ed2ab98a0603fb14e45ef09116f90
  • Pointer size: 132 Bytes
  • Size of remote file: 2.8 MB
assets/models/change_detection.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02b62f43110535754fea7a6aedc9a2c1719b935830b220853d17afe8565297dd
3
+ size 230737192
assets/models/localization.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:545413e05e89b0b3382b4aade9e8f2a5f8c825d5c4e91b77b6c4f57401ff3086
3
+ size 295
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio==5.21.0
2
+ numpy==2.2.4
3
+ onnxruntime==1.21.0
4
+ pandas==2.2.3
5
+ pillow==11.1.0
6
+ scipy==1.15.2
7
+ tqdm==4.67.1