Spaces:
Runtime error
Runtime error
add a slider for threshold
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ output_layer_ir = compiled_model.output("boxes")
|
|
16 |
#####
|
17 |
#Inference
|
18 |
#####
|
19 |
-
def predict(img: np.ndarray) -> str:
|
20 |
# input: numpy array of image in RGB (see defaults for https://www.gradio.app/docs/#image)
|
21 |
|
22 |
# Text detection models expect an image in BGR format.
|
@@ -34,7 +34,7 @@ def predict(img: np.ndarray) -> str:
|
|
34 |
# Remove zero only boxes.
|
35 |
boxes = boxes[~np.all(boxes == 0, axis=1)]
|
36 |
|
37 |
-
result = convert_result_to_image(image, resized_image, boxes, conf_labels=False)
|
38 |
|
39 |
#plt.figure(figsize=(10, 6))
|
40 |
#plt.axis("off")
|
@@ -106,7 +106,10 @@ enable_queue=True
|
|
106 |
|
107 |
gr.Interface(
|
108 |
fn=predict,
|
109 |
-
inputs=
|
|
|
|
|
|
|
110 |
outputs=gr.outputs.Image(type='filepath'),
|
111 |
title=title,
|
112 |
description=description,
|
|
|
16 |
#####
|
17 |
#Inference
|
18 |
#####
|
19 |
+
def predict(img: np.ndarray, threshold) -> str:
|
20 |
# input: numpy array of image in RGB (see defaults for https://www.gradio.app/docs/#image)
|
21 |
|
22 |
# Text detection models expect an image in BGR format.
|
|
|
34 |
# Remove zero only boxes.
|
35 |
boxes = boxes[~np.all(boxes == 0, axis=1)]
|
36 |
|
37 |
+
result = convert_result_to_image(image, resized_image, boxes, threshold=threshold, conf_labels=False)
|
38 |
|
39 |
#plt.figure(figsize=(10, 6))
|
40 |
#plt.axis("off")
|
|
|
106 |
|
107 |
gr.Interface(
|
108 |
fn=predict,
|
109 |
+
inputs=[
|
110 |
+
gr.inputs.Image(),
|
111 |
+
gr.Slider(minimum=.1, maximum=1, value=.3)
|
112 |
+
],
|
113 |
outputs=gr.outputs.Image(type='filepath'),
|
114 |
title=title,
|
115 |
description=description,
|