Spaces:
Sleeping
Sleeping
Commit
·
a9f36c4
1
Parent(s):
d366051
Removed redundant packages, change output image formatting
Browse files
app.py
CHANGED
@@ -1,24 +1,17 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import os
|
4 |
-
import six
|
5 |
-
import six.moves.urllib as urllib
|
6 |
-
import sys
|
7 |
-
import tarfile
|
8 |
import tensorflow as tf
|
9 |
-
import zipfile
|
10 |
import pathlib
|
11 |
import json
|
12 |
import matplotlib.pyplot as plt
|
13 |
from datetime import datetime
|
14 |
-
from collections import defaultdict
|
15 |
-
from io import StringIO
|
16 |
from matplotlib import pyplot as plt
|
17 |
from PIL import Image
|
18 |
-
from IPython.display import display
|
19 |
import io
|
20 |
import pathlib
|
21 |
|
|
|
22 |
|
23 |
RoboflowAPI = os.environ['RoboflowAPI']
|
24 |
RoboflowCocoAPI = os.environ['RoboflowCocoAPI']
|
@@ -45,14 +38,15 @@ def load_model(model_dir):
|
|
45 |
model = tf.saved_model.load(str(model_dir))
|
46 |
model = model.signatures['serving_default']
|
47 |
return model
|
48 |
-
|
49 |
-
os.
|
50 |
-
os.
|
51 |
-
os.
|
52 |
-
os.
|
53 |
-
os.
|
54 |
-
os.
|
55 |
-
os.
|
|
|
56 |
|
57 |
PATH_TO_TEST_IMAGES_DIR = pathlib.Path("COCO" + '/test/')
|
58 |
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
|
@@ -75,8 +69,6 @@ image_path_to_id = {im['file_name']: im['id']
|
|
75 |
for im in test_metadata['images']}
|
76 |
|
77 |
faster_rcnn_model = load_model('../../Faster RCNN/saved_model')
|
78 |
-
print(faster_rcnn_model.inputs)
|
79 |
-
print(faster_rcnn_model.outputs)
|
80 |
|
81 |
def run_inference_for_single_image(model, image):
|
82 |
'''Run single image through tensorflow object detection saved_model.
|
@@ -256,8 +248,6 @@ for image_path in TEST_IMAGE_PATHS:
|
|
256 |
context_features_matrix = np.concatenate(context_features, axis=0)
|
257 |
context_rcnn_model = load_model('../../Context RCNN/saved_model')
|
258 |
context_padding_size = 2000
|
259 |
-
print(context_rcnn_model.inputs)
|
260 |
-
print(context_rcnn_model.outputs)
|
261 |
|
262 |
def run_context_rcnn_inference_for_single_image(
|
263 |
model, image, context_features, context_padding_size):
|
@@ -393,6 +383,7 @@ def show_context_rcnn_inference(
|
|
393 |
|
394 |
|
395 |
def segment(image):
|
|
|
396 |
plt.rcParams['axes.grid'] = False
|
397 |
plt.rcParams['xtick.labelsize'] = False
|
398 |
plt.rcParams['ytick.labelsize'] = False
|
@@ -408,14 +399,14 @@ def segment(image):
|
|
408 |
context_rcnn_image, faster_rcnn_image = show_context_rcnn_inference(
|
409 |
context_rcnn_model, image.name, context_features_matrix,
|
410 |
faster_rcnn_output_dict, context_padding_size)
|
411 |
-
plt.subplot(1,2,1)
|
412 |
plt.imshow(faster_rcnn_image)
|
413 |
plt.title('Faster R-CNN')
|
414 |
-
plt.subplot(1,2,2)
|
415 |
plt.imshow(context_rcnn_image)
|
416 |
plt.title('Context R-CNN')
|
417 |
buf = io.BytesIO()
|
418 |
-
plt.savefig(buf,
|
419 |
buf.seek(0)
|
420 |
img = Image.open(buf)
|
421 |
return img
|
@@ -423,4 +414,4 @@ examples = os.listdir('../../Examples')
|
|
423 |
examples = ['../../Examples/' + item for item in examples]
|
424 |
title="Context R-CNN"
|
425 |
description=f'<p class="has-line-data" data-line-start="0" data-line-end="1">Gradio demo for <strong>Context R-CNN</strong>: <a href="https://arxiv.org/abs/1912.03538">[Paper]</a>.</p><p class="has-line-data" data-line-start="2" data-line-end="3">Context R-CNN is an object detection algorithm that uses contextual features to improve object detection. It is based on Faster R-CNN, but it adds a module that can incorporate contextual features from surrounding frames. This allows Context R-CNN to better identify objects that are partially obscured or that are moving quickly.</p><p class="has-line-data" data-line-start="4" data-line-end="5">The contextual features are stored in a memory bank, which is built up over time as the camera captures images. The memory bank is indexed using an attention mechanism, which allows Context R-CNN to focus on the most relevant contextual features for each object.</p><p class="has-line-data" data-line-start="6" data-line-end="7">Context R-CNN has been shown to improve object detection performance on a variety of datasets, including camera trap data and traffic camera data. It is a promising approach for improving object detection in static monitoring cameras, where the sampling rate is low and the objects may exhibit long-term behavior.</p><p class="has-line-data" data-line-start="8" data-line-end="9">This application of Context R-CNN demonstrates its potential for use in camera trap images of Gopher Tortoises in the wild. It also shows how Context R-CNN can improve object detection performance over existing Faster R-CNN implementations. Both models of R-CNN were trained on the exact same datasets for best comparison. Context R-CNN improves upon Faster R-CNN by building a contextual memory bank, such contextual information can include the position of other objects in the scene, the motion of the objects, and the time of day. The contextual feature matrix used by Context R-CNN model was build using Faster R-CNN model.</p><p class="has-line-data" data-line-start="11" data-line-end="12"><strong>The examples images provided in this demo were not used to train or test the models.</strong></p><p class="has-line-data" data-line-start="13" data-line-end="14">Note: The model requires the date taken attribute to be present in the metadata of the uploaded images in order to process them correctly.</p></br>Training instructions for Context R-CNN can be found on this <a href="https://github.com/prakrutpatel/Context-RCNN-Tortoises">Github</a>'
|
426 |
-
gr.Interface(fn=segment, inputs = "file",outputs = "
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import os
|
|
|
|
|
|
|
|
|
4 |
import tensorflow as tf
|
|
|
5 |
import pathlib
|
6 |
import json
|
7 |
import matplotlib.pyplot as plt
|
8 |
from datetime import datetime
|
|
|
|
|
9 |
from matplotlib import pyplot as plt
|
10 |
from PIL import Image
|
|
|
11 |
import io
|
12 |
import pathlib
|
13 |
|
14 |
+
Image.MAX_IMAGE_PIXELS = None
|
15 |
|
16 |
RoboflowAPI = os.environ['RoboflowAPI']
|
17 |
RoboflowCocoAPI = os.environ['RoboflowCocoAPI']
|
|
|
38 |
model = tf.saved_model.load(str(model_dir))
|
39 |
model = model.signatures['serving_default']
|
40 |
return model
|
41 |
+
if "Tortoise" not in os.listdir():
|
42 |
+
os.system('mkdir "Tortoise"')
|
43 |
+
os.chdir('Tortoise/')
|
44 |
+
os.system(f'curl -L "https://app.roboflow.com/ds/{RoboflowAPI}" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip')
|
45 |
+
os.chdir('..')
|
46 |
+
os.system('mkdir "COCO"')
|
47 |
+
os.chdir('COCO/')
|
48 |
+
os.system(f'curl -L "https://app.roboflow.com/ds/{RoboflowCocoAPI}" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip')
|
49 |
+
os.chdir('..')
|
50 |
|
51 |
PATH_TO_TEST_IMAGES_DIR = pathlib.Path("COCO" + '/test/')
|
52 |
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
|
|
|
69 |
for im in test_metadata['images']}
|
70 |
|
71 |
faster_rcnn_model = load_model('../../Faster RCNN/saved_model')
|
|
|
|
|
72 |
|
73 |
def run_inference_for_single_image(model, image):
|
74 |
'''Run single image through tensorflow object detection saved_model.
|
|
|
248 |
context_features_matrix = np.concatenate(context_features, axis=0)
|
249 |
context_rcnn_model = load_model('../../Context RCNN/saved_model')
|
250 |
context_padding_size = 2000
|
|
|
|
|
251 |
|
252 |
def run_context_rcnn_inference_for_single_image(
|
253 |
model, image, context_features, context_padding_size):
|
|
|
383 |
|
384 |
|
385 |
def segment(image):
|
386 |
+
|
387 |
plt.rcParams['axes.grid'] = False
|
388 |
plt.rcParams['xtick.labelsize'] = False
|
389 |
plt.rcParams['ytick.labelsize'] = False
|
|
|
399 |
context_rcnn_image, faster_rcnn_image = show_context_rcnn_inference(
|
400 |
context_rcnn_model, image.name, context_features_matrix,
|
401 |
faster_rcnn_output_dict, context_padding_size)
|
402 |
+
plt.subplot(1,2,1, frameon=False)
|
403 |
plt.imshow(faster_rcnn_image)
|
404 |
plt.title('Faster R-CNN')
|
405 |
+
plt.subplot(1,2,2, frameon=False)
|
406 |
plt.imshow(context_rcnn_image)
|
407 |
plt.title('Context R-CNN')
|
408 |
buf = io.BytesIO()
|
409 |
+
plt.savefig(buf, dpi=1600, bbox_inches='tight')
|
410 |
buf.seek(0)
|
411 |
img = Image.open(buf)
|
412 |
return img
|
|
|
414 |
examples = ['../../Examples/' + item for item in examples]
|
415 |
title="Context R-CNN"
|
416 |
description=f'<p class="has-line-data" data-line-start="0" data-line-end="1">Gradio demo for <strong>Context R-CNN</strong>: <a href="https://arxiv.org/abs/1912.03538">[Paper]</a>.</p><p class="has-line-data" data-line-start="2" data-line-end="3">Context R-CNN is an object detection algorithm that uses contextual features to improve object detection. It is based on Faster R-CNN, but it adds a module that can incorporate contextual features from surrounding frames. This allows Context R-CNN to better identify objects that are partially obscured or that are moving quickly.</p><p class="has-line-data" data-line-start="4" data-line-end="5">The contextual features are stored in a memory bank, which is built up over time as the camera captures images. The memory bank is indexed using an attention mechanism, which allows Context R-CNN to focus on the most relevant contextual features for each object.</p><p class="has-line-data" data-line-start="6" data-line-end="7">Context R-CNN has been shown to improve object detection performance on a variety of datasets, including camera trap data and traffic camera data. It is a promising approach for improving object detection in static monitoring cameras, where the sampling rate is low and the objects may exhibit long-term behavior.</p><p class="has-line-data" data-line-start="8" data-line-end="9">This application of Context R-CNN demonstrates its potential for use in camera trap images of Gopher Tortoises in the wild. It also shows how Context R-CNN can improve object detection performance over existing Faster R-CNN implementations. Both models of R-CNN were trained on the exact same datasets for best comparison. Context R-CNN improves upon Faster R-CNN by building a contextual memory bank, such contextual information can include the position of other objects in the scene, the motion of the objects, and the time of day. The contextual feature matrix used by Context R-CNN model was build using Faster R-CNN model.</p><p class="has-line-data" data-line-start="11" data-line-end="12"><strong>The examples images provided in this demo were not used to train or test the models.</strong></p><p class="has-line-data" data-line-start="13" data-line-end="14">Note: The model requires the date taken attribute to be present in the metadata of the uploaded images in order to process them correctly.</p></br>Training instructions for Context R-CNN can be found on this <a href="https://github.com/prakrutpatel/Context-RCNN-Tortoises">Github</a>'
|
417 |
+
gr.Interface(fn=segment, inputs = "file",outputs = gr.Image(type="pil", width=50, height=20) ,title=title, description=description ,examples=examples,cache_examples=True).launch()
|