Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .envrc +0 -0
- README.md +3 -9
- detect.tflite +3 -0
- flagged/Input Image/7a7aebc69b43ac36aac5/file_count259.jpg +0 -0
- flagged/Input Image/f6b627eb8f74bd6e4332/Thc.jpg +0 -0
- flagged/Output Image/085b79dad6d37cf75318/image.webp +0 -0
- flagged/log.csv +3 -0
- gradio_image.py +162 -0
- labelmap.txt +5 -0
- requirements.txt +3 -0
.envrc
ADDED
File without changes
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: test
|
3 |
+
app_file: gradio_image.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 4.28.3
|
|
|
|
|
6 |
---
|
|
|
|
detect.tflite
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14264c619e14c3ff3f28009cde01d264b2349201395ab9023e06380c78d18760
|
3 |
+
size 5837076
|
flagged/Input Image/7a7aebc69b43ac36aac5/file_count259.jpg
ADDED
![]() |
flagged/Input Image/f6b627eb8f74bd6e4332/Thc.jpg
ADDED
![]() |
flagged/Output Image/085b79dad6d37cf75318/image.webp
ADDED
![]() |
flagged/log.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Input Image,Output Image,flag,username,timestamp
|
2 |
+
flagged/Input Image/f6b627eb8f74bd6e4332/Thc.jpg,,,,2024-05-02 01:58:17.051988
|
3 |
+
flagged/Input Image/7a7aebc69b43ac36aac5/file_count259.jpg,flagged/Output Image/085b79dad6d37cf75318/image.webp,,,2024-05-02 03:14:56.188933
|
gradio_image.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
from tensorflow.lite.python.interpreter import Interpreter
|
6 |
+
|
7 |
+
|
8 |
+
def tflite_detect_images(
|
9 |
+
modelpath,
|
10 |
+
lblpath,
|
11 |
+
image_path,
|
12 |
+
min_conf=0.1,
|
13 |
+
):
|
14 |
+
# Grab filenames of all images in test folder
|
15 |
+
|
16 |
+
# Load the label map into memory
|
17 |
+
with open(lblpath, "r") as f:
|
18 |
+
labels = [line.strip() for line in f.readlines()]
|
19 |
+
|
20 |
+
# Load the Tensorflow Lite model into memory
|
21 |
+
interpreter = Interpreter(model_path=modelpath)
|
22 |
+
interpreter.allocate_tensors()
|
23 |
+
|
24 |
+
# Get model details
|
25 |
+
input_details = interpreter.get_input_details()
|
26 |
+
output_details = interpreter.get_output_details()
|
27 |
+
# print("input", input_details)
|
28 |
+
# print("output_details________________________")
|
29 |
+
# print("output", output_details)
|
30 |
+
height = input_details[0]["shape"][1]
|
31 |
+
width = input_details[0]["shape"][2]
|
32 |
+
# print(height, width)
|
33 |
+
|
34 |
+
float_input = input_details[0]["dtype"] == np.float32
|
35 |
+
|
36 |
+
input_mean = 127.5
|
37 |
+
input_std = 127.5
|
38 |
+
|
39 |
+
# Loop over every image and perform detection
|
40 |
+
|
41 |
+
# Load image and resize to expected shape [1xHxWx3]
|
42 |
+
image = cv2.imread(image_path)
|
43 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
44 |
+
imH, imW, _ = image.shape
|
45 |
+
image_resized = cv2.resize(image, (width, height))
|
46 |
+
input_data = np.expand_dims(image_resized, axis=0)
|
47 |
+
# print("before_float", input_data)
|
48 |
+
|
49 |
+
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
|
50 |
+
if float_input:
|
51 |
+
# print("truue")
|
52 |
+
input_data = (np.float32(input_data) - input_mean) / input_std
|
53 |
+
# print("after float_mean", input_data)
|
54 |
+
# Perform the actual detection by running the model with the image as input
|
55 |
+
interpreter.set_tensor(input_details[0]["index"], input_data)
|
56 |
+
interpreter.invoke()
|
57 |
+
|
58 |
+
# Retrieve detection results
|
59 |
+
boxes = interpreter.get_tensor(output_details[1]["index"])[
|
60 |
+
0
|
61 |
+
] # Bounding box coordinates of detected objects
|
62 |
+
classes = interpreter.get_tensor(output_details[3]["index"])[
|
63 |
+
0
|
64 |
+
] # Class index of detected objects
|
65 |
+
scores = interpreter.get_tensor(output_details[0]["index"])[
|
66 |
+
0
|
67 |
+
] # Confidence of detected objects
|
68 |
+
# print(boxes)
|
69 |
+
# print("clas", classes)
|
70 |
+
# print("scores", scores)
|
71 |
+
|
72 |
+
# Loop over all detections and draw detection box if confidence is above minimum threshold
|
73 |
+
for i in range(len(scores)):
|
74 |
+
if (scores[i] > min_conf) and (scores[i] <= 1.0):
|
75 |
+
# Get bounding box coordinates and draw box
|
76 |
+
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
|
77 |
+
ymin = int(max(1, (boxes[i][0] * imH)))
|
78 |
+
xmin = int(max(1, (boxes[i][1] * imW)))
|
79 |
+
ymax = int(min(imH, (boxes[i][2] * imH)))
|
80 |
+
xmax = int(min(imW, (boxes[i][3] * imW)))
|
81 |
+
|
82 |
+
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (10, 255, 0), 2)
|
83 |
+
|
84 |
+
# Draw label
|
85 |
+
# object_name = labels[
|
86 |
+
# int(classes[i])
|
87 |
+
# ] # Look up object name from "labels" array using class index
|
88 |
+
# # label = "%s: %d%%" % (
|
89 |
+
# # object_name,
|
90 |
+
# # int(scores[i] * 100),
|
91 |
+
# # ) # Example: 'person: 72%'
|
92 |
+
# label = object_name
|
93 |
+
# labelSize, baseLine = cv2.getTextSize(
|
94 |
+
# label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2
|
95 |
+
# ) # Get font size
|
96 |
+
# # Define the position and rotation of the main text
|
97 |
+
# # main_x = 20
|
98 |
+
# # main_y = 180
|
99 |
+
# main_rotation = 90
|
100 |
+
|
101 |
+
# # Calculate the rotation matrix for the main text
|
102 |
+
# main_rotation_matrix = cv2.getRotationMatrix2D((xmax, ymin), main_rotation, 1)
|
103 |
+
|
104 |
+
# # Create a black image with the same size as the input image
|
105 |
+
# text_img = np.zeros_like(image)
|
106 |
+
|
107 |
+
# label_ymin = max(
|
108 |
+
# ymin , labelSize[1] + 10
|
109 |
+
# ) # Make sure not to draw label too close to top of window
|
110 |
+
# cv2.rectangle(
|
111 |
+
# text_img,
|
112 |
+
# (xmin, label_ymin - labelSize[1] - 10),
|
113 |
+
# (xmin + labelSize[0], label_ymin + baseLine - 10),
|
114 |
+
# (255, 255, 255),
|
115 |
+
# cv2.FILLED,
|
116 |
+
# ) # Draw white box to put label text in
|
117 |
+
# cv2.putText(
|
118 |
+
# text_img,
|
119 |
+
# label,
|
120 |
+
# (xmin, label_ymin - 7),
|
121 |
+
# cv2.FONT_HERSHEY_SIMPLEX,
|
122 |
+
# 0.7,
|
123 |
+
# (0, 0, 0),
|
124 |
+
# 2,
|
125 |
+
# ) # Draw label text
|
126 |
+
# rotated_text_img = cv2.warpAffine(text_img, main_rotation_matrix, (image.shape[1], image.shape[0]))
|
127 |
+
# image = cv2.add(image, rotated_text_img)
|
128 |
+
# detections.append([object_name, scores[i], xmin, ymin, xmax, ymax])
|
129 |
+
|
130 |
+
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
131 |
+
# cv2.imwrite("output.jpg", image)
|
132 |
+
return image
|
133 |
+
|
134 |
+
|
135 |
+
def show_image(img):
|
136 |
+
PATH_TO_MODEL = "detect.tflite" # Path to .tflite model file
|
137 |
+
PATH_TO_LABELS = "labelmap.txt" # Path to labelmap.txt file
|
138 |
+
min_conf_threshold = 0.3 # Confidence threshold (try changing this to 0.01 if you don't see any detection results
|
139 |
+
|
140 |
+
# Run inferencing function!
|
141 |
+
cv_image = tflite_detect_images(
|
142 |
+
PATH_TO_MODEL, PATH_TO_LABELS, img, min_conf_threshold
|
143 |
+
)
|
144 |
+
|
145 |
+
# # Convert To PIL Image
|
146 |
+
# image = Image.open(img)
|
147 |
+
# print(type(image))
|
148 |
+
|
149 |
+
# # Convert the image to a NumPy array
|
150 |
+
# image_array = np.array(image)
|
151 |
+
# print(type(image_array))
|
152 |
+
|
153 |
+
return cv_image
|
154 |
+
|
155 |
+
|
156 |
+
app = gr.Interface(
|
157 |
+
fn=show_image,
|
158 |
+
inputs=gr.Image(label="Input Image", type="filepath"),
|
159 |
+
outputs=gr.Image(label="Output Image", type="filepath"),
|
160 |
+
)
|
161 |
+
|
162 |
+
app.launch(share=True)
|
labelmap.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
neokit
|
2 |
+
negative
|
3 |
+
positive
|
4 |
+
invalid
|
5 |
+
invalidline
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.28.3
|
2 |
+
opencv-python==4.9.0.80
|
3 |
+
tensorflow==2.16.1
|