Spaces:
Running
Running
feat: Add ArUco Marker Detector functionality and UI integration
Browse files- Introduced ArUco Marker Detector to the application, allowing users to detect ArUco markers in the video feed.
- Added a selection interface for various ArUco dictionaries, enhancing flexibility in marker detection.
- Updated the documentation to include detailed descriptions and usage examples for the ArUco Marker Detector.
- Implemented the detection logic in the OpenCVUtils class, ensuring efficient processing of detected markers.
- app.py +55 -0
- src/opencv_utils.py +114 -27
- src/tkinter_app.py +72 -0
app.py
CHANGED
@@ -201,6 +201,7 @@ with main_tabs[0]: # Camera Feed Tab
|
|
201 |
"Optical Flow",
|
202 |
"Hand Tracker",
|
203 |
"Face Tracker",
|
|
|
204 |
]
|
205 |
|
206 |
# Use multiselect to both select and order filters
|
@@ -273,6 +274,35 @@ with main_tabs[0]: # Camera Feed Tab
|
|
273 |
else:
|
274 |
morph_op, morph_ks = "erode", 5
|
275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
with video_col:
|
277 |
st.markdown("## 📹 Live Camera Feed")
|
278 |
# WebRTC settings for real-time video
|
@@ -317,6 +347,8 @@ with main_tabs[0]: # Camera Feed Tab
|
|
317 |
img = app.detect_hands(img)
|
318 |
elif fn == "Face Tracker":
|
319 |
img = app.detect_faces(img)
|
|
|
|
|
320 |
|
321 |
prev_gray = curr_gray
|
322 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
@@ -600,6 +632,29 @@ with main_tabs[2]: # Documentation Tab
|
|
600 |
**Docs**: [MediaPipe Face Detector](https://developers.google.com/mediapipe/solutions/vision/face_detector)
|
601 |
"""
|
602 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
603 |
else:
|
604 |
# Fallback for any filters missed
|
605 |
st.markdown(
|
|
|
201 |
"Optical Flow",
|
202 |
"Hand Tracker",
|
203 |
"Face Tracker",
|
204 |
+
"ArUco Marker Detector",
|
205 |
]
|
206 |
|
207 |
# Use multiselect to both select and order filters
|
|
|
274 |
else:
|
275 |
morph_op, morph_ks = "erode", 5
|
276 |
|
277 |
+
if "ArUco Marker Detector" in selected_functions:
|
278 |
+
with st.expander("🔍 ArUco Marker Parameters", expanded=True):
|
279 |
+
aruco_dict = st.selectbox(
|
280 |
+
"ArUco Dictionary",
|
281 |
+
options=[
|
282 |
+
"DICT_4X4_50",
|
283 |
+
"DICT_4X4_100",
|
284 |
+
"DICT_4X4_250",
|
285 |
+
"DICT_4X4_1000",
|
286 |
+
"DICT_5X5_50",
|
287 |
+
"DICT_5X5_100",
|
288 |
+
"DICT_5X5_250",
|
289 |
+
"DICT_5X5_1000",
|
290 |
+
"DICT_6X6_50",
|
291 |
+
"DICT_6X6_100",
|
292 |
+
"DICT_6X6_250",
|
293 |
+
"DICT_6X6_1000",
|
294 |
+
"DICT_7X7_50",
|
295 |
+
"DICT_7X7_100",
|
296 |
+
"DICT_7X7_250",
|
297 |
+
"DICT_7X7_1000",
|
298 |
+
"DICT_ARUCO_ORIGINAL",
|
299 |
+
],
|
300 |
+
index=10, # Default to DICT_6X6_250
|
301 |
+
help="Select the ArUco marker dictionary. Different dictionaries support different marker patterns and IDs.",
|
302 |
+
)
|
303 |
+
else:
|
304 |
+
aruco_dict = "DICT_6X6_250"
|
305 |
+
|
306 |
with video_col:
|
307 |
st.markdown("## 📹 Live Camera Feed")
|
308 |
# WebRTC settings for real-time video
|
|
|
347 |
img = app.detect_hands(img)
|
348 |
elif fn == "Face Tracker":
|
349 |
img = app.detect_faces(img)
|
350 |
+
elif fn == "ArUco Marker Detector":
|
351 |
+
img = app.detect_aruco_markers(img, dict_type=aruco_dict)
|
352 |
|
353 |
prev_gray = curr_gray
|
354 |
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
|
|
632 |
**Docs**: [MediaPipe Face Detector](https://developers.google.com/mediapipe/solutions/vision/face_detector)
|
633 |
"""
|
634 |
)
|
635 |
+
elif filter_name == "ArUco Marker Detector":
|
636 |
+
st.markdown(
|
637 |
+
"""
|
638 |
+
Detects ArUco markers in the video feed. ArUco markers are square fiducial markers that can be used for camera pose estimation, calibration, and object tracking.
|
639 |
+
|
640 |
+
**Parameters:**
|
641 |
+
- **ArUco Dictionary**: Select the dictionary type for the markers you want to detect. Different dictionaries support different marker patterns and ID ranges.
|
642 |
+
|
643 |
+
**Usage**:
|
644 |
+
- Augmented reality
|
645 |
+
- Camera calibration
|
646 |
+
- Object tracking
|
647 |
+
- Robotics navigation
|
648 |
+
- Positional reference
|
649 |
+
|
650 |
+
**How it works**:
|
651 |
+
1. Converts the image to grayscale
|
652 |
+
2. Detects markers using the selected dictionary
|
653 |
+
3. Draws detected markers with their IDs
|
654 |
+
|
655 |
+
**Docs**: [OpenCV ArUco Marker Detection](https://docs.opencv.org/4.x/d5/dae/tutorial_aruco_detection.html)
|
656 |
+
"""
|
657 |
+
)
|
658 |
else:
|
659 |
# Fallback for any filters missed
|
660 |
st.markdown(
|
src/opencv_utils.py
CHANGED
@@ -21,6 +21,27 @@ class OpenCVUtils:
|
|
21 |
min_tracking_confidence=0.7,
|
22 |
)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def detect_faces(self, frame: np.ndarray, draw: bool = True) -> np.ndarray:
|
25 |
"""
|
26 |
Detect a face in the frame with the face mesh tracker of mediapipe
|
@@ -40,6 +61,59 @@ class OpenCVUtils:
|
|
40 |
result = self.hand_tracker.detect(frame, draw=draw)
|
41 |
return result
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def apply_color_filter(
|
44 |
self, frame: np.ndarray, lower_bound: list, upper_bound: list
|
45 |
) -> np.ndarray:
|
@@ -191,56 +265,69 @@ class OpenCVUtils:
|
|
191 |
def adaptive_threshold(self, image: np.ndarray) -> np.ndarray:
|
192 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
193 |
return cv2.cvtColor(
|
194 |
-
cv2.adaptiveThreshold(
|
195 |
-
|
196 |
-
|
|
|
|
|
197 |
|
198 |
-
def morphology(
|
|
|
|
|
199 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (ksize, ksize))
|
200 |
ops = {
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
}
|
206 |
-
if op in [
|
207 |
-
flag = cv2.MORPH_OPEN if op ==
|
208 |
return ops[op](image, flag, kernel)
|
209 |
return ops[op](image, kernel)
|
210 |
|
211 |
def sharpen(self, image: np.ndarray) -> np.ndarray:
|
212 |
-
kernel = np.array([[0, -1, 0],
|
213 |
-
[-1, 5, -1],
|
214 |
-
[0, -1, 0]])
|
215 |
return cv2.filter2D(image, -1, kernel)
|
216 |
|
217 |
def hough_lines(self, image: np.ndarray) -> np.ndarray:
|
218 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
219 |
edges = cv2.Canny(gray, 50, 150)
|
220 |
-
lines = cv2.HoughLinesP(
|
221 |
-
|
|
|
222 |
if lines is not None:
|
223 |
-
for x1, y1, x2, y2 in lines[:,0]:
|
224 |
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
|
225 |
return image
|
226 |
|
227 |
def hough_circles(self, image: np.ndarray) -> np.ndarray:
|
228 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
229 |
-
circles = cv2.HoughCircles(
|
230 |
-
|
231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
if circles is not None:
|
233 |
circles = np.uint16(np.around(circles))
|
234 |
for x, y, r in circles[0, :]:
|
235 |
cv2.circle(image, (x, y), r, (0, 255, 0), 2)
|
236 |
return image
|
237 |
|
238 |
-
def optical_flow(
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
|
|
|
|
242 |
hsv = np.zeros_like(image)
|
243 |
-
hsv[...,1] = 255
|
244 |
-
hsv[...,0] = ang * 180 / np.pi / 2
|
245 |
-
hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
|
246 |
-
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
|
|
|
21 |
min_tracking_confidence=0.7,
|
22 |
)
|
23 |
|
24 |
+
# Initialize ArUco dictionaries
|
25 |
+
self.aruco_dicts = {
|
26 |
+
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
|
27 |
+
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
|
28 |
+
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
|
29 |
+
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
|
30 |
+
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
|
31 |
+
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
|
32 |
+
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
|
33 |
+
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
|
34 |
+
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
|
35 |
+
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
|
36 |
+
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
|
37 |
+
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
|
38 |
+
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
|
39 |
+
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
|
40 |
+
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
|
41 |
+
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
|
42 |
+
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
|
43 |
+
}
|
44 |
+
|
45 |
def detect_faces(self, frame: np.ndarray, draw: bool = True) -> np.ndarray:
|
46 |
"""
|
47 |
Detect a face in the frame with the face mesh tracker of mediapipe
|
|
|
61 |
result = self.hand_tracker.detect(frame, draw=draw)
|
62 |
return result
|
63 |
|
64 |
+
def detect_aruco_markers(
|
65 |
+
self, frame: np.ndarray, dict_type: str = "DICT_6X6_250", draw: bool = True
|
66 |
+
) -> np.ndarray:
|
67 |
+
"""
|
68 |
+
Detect ArUco markers in the frame
|
69 |
+
|
70 |
+
:param frame: The frame to detect ArUco markers
|
71 |
+
:param dict_type: The ArUco dictionary type to use for detection
|
72 |
+
:param draw: If the detected markers should be drawn on the frame
|
73 |
+
:return: The frame with detected ArUco markers drawn (if draw=True)
|
74 |
+
"""
|
75 |
+
# Create a copy of the frame to avoid modifying the original
|
76 |
+
output = frame.copy()
|
77 |
+
|
78 |
+
# Convert the image to grayscale
|
79 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
80 |
+
|
81 |
+
# Get the ArUco dictionary
|
82 |
+
|
83 |
+
aruco_dict = cv2.aruco.getPredefinedDictionary(getattr(cv2.aruco, dict_type))
|
84 |
+
|
85 |
+
# Set the detection parameters (using default values)
|
86 |
+
parameters = cv2.aruco.DetectorParameters()
|
87 |
+
|
88 |
+
# Detect ArUco markers
|
89 |
+
corners, ids, rejected = cv2.aruco.detectMarkers(
|
90 |
+
gray, aruco_dict, parameters=parameters
|
91 |
+
)
|
92 |
+
|
93 |
+
# If markers are detected and draw is True
|
94 |
+
if draw and ids is not None:
|
95 |
+
# Draw the detected markers
|
96 |
+
cv2.aruco.drawDetectedMarkers(output, corners, ids)
|
97 |
+
|
98 |
+
# For each marker, draw additional information
|
99 |
+
for i, corner in enumerate(corners):
|
100 |
+
# Get the center of the marker
|
101 |
+
c = corner[0]
|
102 |
+
center = (int(c[:, 0].mean()), int(c[:, 1].mean()))
|
103 |
+
|
104 |
+
# Draw the marker ID
|
105 |
+
cv2.putText(
|
106 |
+
output,
|
107 |
+
f"ID: {ids[i][0]}",
|
108 |
+
(center[0], center[1] - 15),
|
109 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
110 |
+
0.5,
|
111 |
+
(0, 255, 0),
|
112 |
+
2,
|
113 |
+
)
|
114 |
+
|
115 |
+
return output
|
116 |
+
|
117 |
def apply_color_filter(
|
118 |
self, frame: np.ndarray, lower_bound: list, upper_bound: list
|
119 |
) -> np.ndarray:
|
|
|
265 |
def adaptive_threshold(self, image: np.ndarray) -> np.ndarray:
|
266 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
267 |
return cv2.cvtColor(
|
268 |
+
cv2.adaptiveThreshold(
|
269 |
+
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
|
270 |
+
),
|
271 |
+
cv2.COLOR_GRAY2BGR,
|
272 |
+
)
|
273 |
|
274 |
+
def morphology(
|
275 |
+
self, image: np.ndarray, op: str = "erode", ksize: int = 5
|
276 |
+
) -> np.ndarray:
|
277 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (ksize, ksize))
|
278 |
ops = {
|
279 |
+
"erode": cv2.erode,
|
280 |
+
"dilate": cv2.dilate,
|
281 |
+
"open": cv2.morphologyEx,
|
282 |
+
"close": cv2.morphologyEx,
|
283 |
}
|
284 |
+
if op in ["open", "close"]:
|
285 |
+
flag = cv2.MORPH_OPEN if op == "open" else cv2.MORPH_CLOSE
|
286 |
return ops[op](image, flag, kernel)
|
287 |
return ops[op](image, kernel)
|
288 |
|
289 |
def sharpen(self, image: np.ndarray) -> np.ndarray:
|
290 |
+
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
|
|
|
|
|
291 |
return cv2.filter2D(image, -1, kernel)
|
292 |
|
293 |
def hough_lines(self, image: np.ndarray) -> np.ndarray:
|
294 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
295 |
edges = cv2.Canny(gray, 50, 150)
|
296 |
+
lines = cv2.HoughLinesP(
|
297 |
+
edges, 1, np.pi / 180, threshold=50, minLineLength=50, maxLineGap=10
|
298 |
+
)
|
299 |
if lines is not None:
|
300 |
+
for x1, y1, x2, y2 in lines[:, 0]:
|
301 |
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
|
302 |
return image
|
303 |
|
304 |
def hough_circles(self, image: np.ndarray) -> np.ndarray:
|
305 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
306 |
+
circles = cv2.HoughCircles(
|
307 |
+
gray,
|
308 |
+
cv2.HOUGH_GRADIENT,
|
309 |
+
dp=1.2,
|
310 |
+
minDist=50,
|
311 |
+
param1=50,
|
312 |
+
param2=30,
|
313 |
+
minRadius=5,
|
314 |
+
maxRadius=100,
|
315 |
+
)
|
316 |
if circles is not None:
|
317 |
circles = np.uint16(np.around(circles))
|
318 |
for x, y, r in circles[0, :]:
|
319 |
cv2.circle(image, (x, y), r, (0, 255, 0), 2)
|
320 |
return image
|
321 |
|
322 |
+
def optical_flow(
|
323 |
+
self, prev_gray: np.ndarray, curr_gray: np.ndarray, image: np.ndarray
|
324 |
+
) -> np.ndarray:
|
325 |
+
flow = cv2.calcOpticalFlowFarneback(
|
326 |
+
prev_gray, curr_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0
|
327 |
+
)
|
328 |
+
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
|
329 |
hsv = np.zeros_like(image)
|
330 |
+
hsv[..., 1] = 255
|
331 |
+
hsv[..., 0] = ang * 180 / np.pi / 2
|
332 |
+
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
|
333 |
+
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
|
src/tkinter_app.py
CHANGED
@@ -579,6 +579,67 @@ class MainWindow:
|
|
579 |
selectcolor=self.colors["black"],
|
580 |
).pack()
|
581 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
582 |
# Cria o label para exibir a imagem
|
583 |
self.image_label = Label(self.paned_window, bg=self.colors["black"])
|
584 |
self.paned_window.add(self.image_label)
|
@@ -595,6 +656,17 @@ class MainWindow:
|
|
595 |
else:
|
596 |
self.functions.remove(function)
|
597 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
def process_optical_flow(self, frame: np.ndarray) -> np.ndarray:
|
599 |
"""
|
600 |
Special handler for optical flow which needs to track previous frames
|
|
|
579 |
selectcolor=self.colors["black"],
|
580 |
).pack()
|
581 |
|
582 |
+
ttk.Separator(scrollable_frame, orient=HORIZONTAL).pack(fill=X, padx=3, pady=3)
|
583 |
+
|
584 |
+
# Add ArUco Marker Detector
|
585 |
+
self.aruco_marker_var = IntVar()
|
586 |
+
self.aruco_marker_var.trace_add(
|
587 |
+
"write",
|
588 |
+
lambda *args: self.add_function(
|
589 |
+
self.detect_aruco_markers, self.aruco_marker_var
|
590 |
+
),
|
591 |
+
)
|
592 |
+
Checkbutton(
|
593 |
+
scrollable_frame,
|
594 |
+
text="ArUco Marker Detector",
|
595 |
+
variable=self.aruco_marker_var,
|
596 |
+
font=self.font,
|
597 |
+
bg=self.colors["black"],
|
598 |
+
fg=self.colors["white"],
|
599 |
+
highlightbackground=self.colors["black"],
|
600 |
+
selectcolor=self.colors["black"],
|
601 |
+
).pack()
|
602 |
+
|
603 |
+
# ArUco dictionary selector
|
604 |
+
Label(
|
605 |
+
scrollable_frame,
|
606 |
+
text="ArUco Dictionary",
|
607 |
+
bg=self.colors["black"],
|
608 |
+
fg=self.colors["white"],
|
609 |
+
).pack()
|
610 |
+
|
611 |
+
self.aruco_dict_var = StringVar(value="DICT_6X6_250")
|
612 |
+
aruco_dicts = [
|
613 |
+
"DICT_4X4_50",
|
614 |
+
"DICT_4X4_100",
|
615 |
+
"DICT_4X4_250",
|
616 |
+
"DICT_4X4_1000",
|
617 |
+
"DICT_5X5_50",
|
618 |
+
"DICT_5X5_100",
|
619 |
+
"DICT_5X5_250",
|
620 |
+
"DICT_5X5_1000",
|
621 |
+
"DICT_6X6_50",
|
622 |
+
"DICT_6X6_100",
|
623 |
+
"DICT_6X6_250",
|
624 |
+
"DICT_6X6_1000",
|
625 |
+
"DICT_7X7_50",
|
626 |
+
"DICT_7X7_100",
|
627 |
+
"DICT_7X7_250",
|
628 |
+
"DICT_7X7_1000",
|
629 |
+
"DICT_ARUCO_ORIGINAL",
|
630 |
+
]
|
631 |
+
|
632 |
+
# Create a combobox for selecting dictionary
|
633 |
+
aruco_dict_combo = ttk.Combobox(
|
634 |
+
scrollable_frame,
|
635 |
+
textvariable=self.aruco_dict_var,
|
636 |
+
values=aruco_dicts,
|
637 |
+
state="readonly",
|
638 |
+
width=20,
|
639 |
+
)
|
640 |
+
aruco_dict_combo.pack(pady=5)
|
641 |
+
aruco_dict_combo.current(10) # Default to DICT_6X6_250
|
642 |
+
|
643 |
# Cria o label para exibir a imagem
|
644 |
self.image_label = Label(self.paned_window, bg=self.colors["black"])
|
645 |
self.paned_window.add(self.image_label)
|
|
|
656 |
else:
|
657 |
self.functions.remove(function)
|
658 |
|
659 |
+
def detect_aruco_markers(self, frame: np.ndarray) -> np.ndarray:
|
660 |
+
"""
|
661 |
+
Wrapper for ArUco marker detection to pass the dictionary type parameter
|
662 |
+
|
663 |
+
:param frame: The frame to detect ArUco markers
|
664 |
+
:return: The frame with detected ArUco markers
|
665 |
+
"""
|
666 |
+
return self.aplication.detect_aruco_markers(
|
667 |
+
frame, dict_type=self.aruco_dict_var.get()
|
668 |
+
)
|
669 |
+
|
670 |
def process_optical_flow(self, frame: np.ndarray) -> np.ndarray:
|
671 |
"""
|
672 |
Special handler for optical flow which needs to track previous frames
|