samuellimabraz commited on
Commit
9201cee
·
unverified ·
1 Parent(s): d1b79b8

feat: Streamline filter selection and enhance documentation in app.py

Browse files

- Replaced categorized filter selection with a unified multiselect interface, allowing users to select and order filters more intuitively.
- Updated filter parameters for "Resize" to support higher resolutions.
- Enhanced the documentation section to provide detailed descriptions for each filter, improving user understanding and accessibility.
- Removed outdated category-based filter organization for a cleaner UI experience.

Files changed (1) hide show
  1. app.py +242 -284
app.py CHANGED
@@ -176,68 +176,42 @@ with col2:
176
  unsafe_allow_html=True,
177
  )
178
 
179
- # Create main layout
180
  main_tabs = st.tabs(["📹 Camera Feed", "ℹ️ About", "📋 Documentation"])
181
-
182
  with main_tabs[0]: # Camera Feed Tab
183
  # Create columns for camera and controls
184
  video_col, control_col = st.columns([3, 1])
185
-
186
  with control_col:
187
  st.markdown("## 🎛️ Controls")
188
 
189
- # Organize filters into categories
190
- filter_categories = {
191
- "Basic Transformations": ["Resize", "Rotation", "Blur", "Sharpen"],
192
- "Edge & Contour Detection": ["Canny", "Contour", "Hough Lines"],
193
- "Color Operations": [
194
- "Color Filter",
195
- "Histogram Equalization",
196
- "Color Quantization",
197
- ],
198
- "Artistic Effects": ["Pencil Sketch", "Morphology", "Adaptive Threshold"],
199
- "Advanced Features": ["Optical Flow", "Hand Tracker", "Face Tracker"],
200
- }
201
-
202
- # Use a dictionary to store the expanded state of each category
203
- if "expanded" not in st.session_state:
204
- st.session_state.expanded = {cat: False for cat in filter_categories}
205
- st.session_state.expanded["Basic Transformations"] = (
206
- True # Expand the first one by default
207
- )
 
 
 
 
 
 
 
 
208
 
209
- # Create filter selection UI with categories
210
- selected_functions = []
211
- for category, filters in filter_categories.items():
212
- with st.expander(
213
- f"**{category}**", expanded=st.session_state.expanded[category]
214
- ):
215
- # Show checkboxes for each filter in this category
216
- selected_in_category = []
217
- for filter_name in filters:
218
- if st.checkbox(filter_name, key=f"check_{filter_name}"):
219
- selected_in_category.append(filter_name)
220
-
221
- # If any filters selected in this category, add a reorder section
222
- if selected_in_category:
223
- st.markdown("**Order within category:**")
224
- for i, filter_name in enumerate(selected_in_category):
225
- col1, col2 = st.columns([4, 1])
226
- with col1:
227
- st.text(f"{i+1}. {filter_name}")
228
- with col2:
229
- if i > 0 and st.button("↑", key=f"up_{filter_name}"):
230
- # Move filter up in the list
231
- selected_in_category[i], selected_in_category[i - 1] = (
232
- selected_in_category[i - 1],
233
- selected_in_category[i],
234
- )
235
- st.rerun()
236
-
237
- # Add selected filters to the main list
238
- selected_functions.extend(selected_in_category)
239
-
240
- # Show the currently applied filters
241
  if selected_functions:
242
  st.markdown("### 📌 Applied Filters")
243
  for i, fn in enumerate(selected_functions):
@@ -246,13 +220,13 @@ with main_tabs[0]: # Camera Feed Tab
246
  st.info("Select filters to apply to the camera feed")
247
 
248
  # Filter parameters - using expanders for cleaner UI
249
- if any(f in selected_functions for f in ["Resize"]):
250
  with st.expander("📐 Resize Parameters", expanded=True):
251
- w = st.slider("Width", 320, 1280, 640)
252
- h = st.slider("Height", 240, 720, 480)
253
  else:
254
  # Default values if not displayed
255
- w, h = 640, 480
256
 
257
  if "Rotation" in selected_functions:
258
  with st.expander("🔄 Rotation Parameters", expanded=True):
@@ -280,20 +254,6 @@ with main_tabs[0]: # Camera Feed Tab
280
  us = st.slider("Sat (U)", 0, 255, 255)
281
  uv = st.slider("Val (U)", 0, 255, 255)
282
 
283
- # Color preview - Make it dynamic again
284
- # Use the lower bound HSV values to generate an HSL color for CSS
285
- preview_color_hsl = f"hsl({lh * 2}, {ls / 2.55}%, {lv / 2.55}%)"
286
- st.markdown(
287
- f"""
288
- <div style="background-color: {preview_color_hsl}; width: 100%; height: 30px;
289
- border: 1px solid #555555; border-radius: 5px; margin-top: 10px;">
290
- <p class='color-preview-text' style='text-align: center; line-height: 30px; font-size: 12px; font-weight: bold;'>
291
- Preview (Lower Bound)
292
- </p>
293
- </div>
294
- """,
295
- unsafe_allow_html=True,
296
- )
297
  else:
298
  lh, ls, lv, uh, us, uv = 0, 0, 0, 180, 255, 255
299
 
@@ -315,7 +275,6 @@ with main_tabs[0]: # Camera Feed Tab
315
 
316
  with video_col:
317
  st.markdown("## 📹 Live Camera Feed")
318
-
319
  # WebRTC settings for real-time video
320
  prev_gray = None
321
 
@@ -324,6 +283,7 @@ with main_tabs[0]: # Camera Feed Tab
324
  img = frame.to_ndarray(format="bgr24")
325
  curr_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
326
 
 
327
  for fn in selected_functions:
328
  if fn == "Color Filter":
329
  img = app.apply_color_filter(img, (lh, ls, lv), (uh, us, uv))
@@ -377,7 +337,7 @@ with main_tabs[0]: # Camera Feed Tab
377
  "border-radius": "8px",
378
  "margin": "0 auto",
379
  "display": "block",
380
- "border": "2px solid #AAAAAA", # Changed border to lighter grey
381
  },
382
  ),
383
  )
@@ -434,221 +394,219 @@ with main_tabs[2]: # Documentation Tab
434
  )
435
 
436
  # Create documentation for each filter category
437
- for category, filters in filter_categories.items():
438
- with st.expander(f"**{category}**", expanded=False):
439
- for filter_name in filters:
440
- st.markdown(f"#### {filter_name}")
441
-
442
- # Add detailed description and links for each filter
443
- if filter_name == "Resize":
444
- st.markdown(
445
- """
446
- Changes the dimensions (width and height) of the video frame. Useful for adjusting the output size or preparing the frame for other operations that require a specific input size.
447
-
448
- **Parameters:**
449
- - **Width**: Target width in pixels.
450
- - **Height**: Target height in pixels.
451
-
452
- **Usage**: Scaling for performance, UI fitting, preprocessing for models.
453
-
454
- **Docs**: [OpenCV Geometric Transformations](https://docs.opencv.org/4.x/da/d6e/tutorial_py_geometric_transformations.html) (See `cv2.resize`)
455
- """
456
- )
457
- elif filter_name == "Rotation":
458
- st.markdown(
459
- """
460
- Rotates the video frame around its center by a specified angle.
461
-
462
- **Parameters:**
463
- - **Angle**: Rotation angle in degrees (0-360).
464
-
465
- **Usage**: Image orientation correction, creative effects.
466
-
467
- **Docs**: [OpenCV Geometric Transformations](https://docs.opencv.org/4.x/da/d6e/tutorial_py_geometric_transformations.html) (See `cv2.getRotationMatrix2D` and `cv2.warpAffine`)
468
- """
469
- )
470
- elif filter_name == "Blur":
471
- st.markdown(
472
- """
473
- Applies Gaussian blur to smooth the image, reducing noise and detail. The kernel size determines the extent of blurring.
474
-
475
- **Parameters:**
476
- - **Kernel Size**: Size of the blurring matrix (must be an odd number). Higher values create more blur.
477
-
478
- **Usage**: Noise reduction, detail smoothing, pre-processing for edge detection or other algorithms.
479
-
480
- **Docs**: [OpenCV Smoothing Images](https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html) (See `cv2.GaussianBlur`)
481
- """
482
- )
483
- elif filter_name == "Sharpen":
484
- st.markdown(
485
- """
486
- Enhances the edges and details in the image using a sharpening kernel. This is achieved by subtracting a blurred version of the image from the original.
487
-
488
- **Parameters:** None (uses a fixed kernel).
489
-
490
- **Usage**: Enhancing image clarity, highlighting details.
491
-
492
- **Docs**: [OpenCV Image Filtering Concepts](https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html) (Concept explanation, the implementation uses a custom kernel)
493
- """
494
- )
495
- elif filter_name == "Canny":
496
- st.markdown(
497
- """
498
- Detects edges in the image using the Canny edge detection algorithm, a multi-stage process to find sharp changes in intensity.
499
-
500
- **Parameters:**
501
- - **Lower Threshold**: Minimum intensity gradient to be considered a potential edge.
502
- - **Upper Threshold**: Maximum intensity gradient. Edges above this are definite edges. Pixels between the thresholds are included if connected to definite edges.
503
-
504
- **Usage**: Edge detection, feature extraction, object boundary identification.
505
-
506
- **Docs**: [OpenCV Canny Edge Detection](https://docs.opencv.org/4.x/da/d22/tutorial_py_canny.html)
507
- """
508
- )
509
- elif filter_name == "Contour":
510
- st.markdown(
511
- """
512
- Finds and draws contours (continuous curves joining points along a boundary with the same intensity) in the image. Usually applied after thresholding or edge detection.
513
-
514
- **Parameters:** None (finds contours on the processed image and draws them).
515
-
516
- **Usage**: Object detection, shape analysis, feature extraction.
517
-
518
- **Docs**: [OpenCV Contours](https://docs.opencv.org/4.x/d4/d73/tutorial_py_contours_begin.html) (See `cv2.findContours`, `cv2.drawContours`)
519
- """
520
- )
521
- elif filter_name == "Hough Lines":
522
- st.markdown(
523
- """
524
- Detects straight lines in the image using the Hough Line Transform (Probabilistic variant). Works best on edge-detected images.
525
-
526
- **Parameters:** None (uses preset parameters for `cv2.HoughLinesP`).
527
-
528
- **Usage**: Line detection in images, structure identification.
529
-
530
- **Docs**: [OpenCV Hough Line Transform](https://docs.opencv.org/4.x/d6/d10/tutorial_py_houghlines.html) (See `cv2.HoughLinesP`)
531
- """
532
- )
533
- elif filter_name == "Color Filter":
534
- st.markdown(
535
- """
536
- Isolates specific colors by converting the image to HSV (Hue, Saturation, Value) color space and applying a threshold based on the selected ranges.
537
-
538
- **Parameters:**
539
- - **Lower Bounds (Hue, Sat, Val)**: Minimum HSV values for the color range.
540
- - **Upper Bounds (Hue, Sat, Val)**: Maximum HSV values for the color range.
541
-
542
- **Usage**: Object detection based on color, color segmentation, special effects.
543
-
544
- **Docs**: [OpenCV Changing Colorspaces](https://docs.opencv.org/4.x/df/d9d/tutorial_py_colorspaces.html) (See `cv2.cvtColor` and `cv2.inRange`)
545
- """
546
- )
547
- elif filter_name == "Histogram Equalization":
548
- st.markdown(
549
- """
550
- Improves contrast in grayscale images by redistributing pixel intensities more evenly across the histogram. Applied to the Value channel if the input is color.
551
-
552
- **Parameters:** None.
553
-
554
- **Usage**: Enhancing contrast in low-contrast images, improving visibility of details.
555
-
556
- **Docs**: [OpenCV Histogram Equalization](https://docs.opencv.org/4.x/d5/daf/tutorial_py_histogram_equalization.html) (See `cv2.equalizeHist`)
557
- """
558
- )
559
- elif filter_name == "Color Quantization":
560
- st.markdown(
561
- """
562
- Reduces the number of distinct colors in an image using K-Means clustering in the color space. Groups similar colors together.
563
-
564
- **Parameters:** None (uses a fixed number of clusters, K=8).
565
-
566
- **Usage**: Image compression, posterization effect, simplifying color palettes.
567
-
568
- **Docs**: [OpenCV K-Means Clustering](https://docs.opencv.org/4.x/d1/d5c/tutorial_py_kmeans_opencv.html) (Underlying algorithm)
569
- """
570
- )
571
- elif filter_name == "Pencil Sketch":
572
- st.markdown(
573
- """
574
- Creates a pencil sketch effect by converting the image to grayscale, inverting it, blurring the inverted image, and blending it with the original grayscale image using color dodge.
575
-
576
- **Parameters:** None.
577
-
578
- **Usage**: Artistic image transformation, creating sketch-like visuals.
579
-
580
- **Docs**: Involves multiple OpenCV steps (Grayscale, Blur, Blending). See [Color Dodge Blending](https://en.wikipedia.org/wiki/Blend_modes#Dodge_and_burn).
581
- """
582
- )
583
- elif filter_name == "Morphology":
584
- st.markdown(
585
- """
586
- Applies morphological operations (Erode, Dilate, Open, Close) to modify the shape of features in the image, typically on binary images.
587
-
588
- **Parameters:**
589
- - **Operation**: Type of morphological operation (`erode`, `dilate`, `open`, `close`).
590
- - **Kernel Size**: Size of the structuring element used (odd number).
591
-
592
- **Usage**: Noise removal, joining broken parts, thinning/thickening features.
593
-
594
- **Docs**: [OpenCV Morphological Transformations](https://docs.opencv.org/4.x/d9/d61/tutorial_py_morphological_ops.html) (See `cv2.erode`, `cv2.dilate`, `cv2.morphologyEx`)
595
- """
596
- )
597
- elif filter_name == "Adaptive Threshold":
598
- st.markdown(
599
- """
600
- Applies adaptive thresholding, where the threshold value is calculated locally for different regions of the image. Useful for images with varying illumination.
601
-
602
- **Parameters:** None (uses `cv2.ADAPTIVE_THRESH_GAUSSIAN_C`).
603
-
604
- **Usage**: Image segmentation in non-uniform lighting conditions.
605
-
606
- **Docs**: [OpenCV Image Thresholding](https://docs.opencv.org/4.x/d7/d4d/tutorial_py_thresholding.html) (See `cv2.adaptiveThreshold`)
607
- """
608
- )
609
- elif filter_name == "Optical Flow":
610
- st.markdown(
611
- """
612
- Calculates and visualizes the apparent motion of objects between consecutive frames using the Farneback algorithm. Shows motion vectors as lines on the image.
613
 
614
- **Parameters:** None (Requires previous frame data internally).
615
-
616
- **Usage**: Motion tracking, video stabilization analysis, action recognition.
617
-
618
- **Docs**: [OpenCV Optical Flow](https://docs.opencv.org/4.x/d4/dee/tutorial_optical_flow.html) (See `cv2.calcOpticalFlowFarneback`)
619
- """
620
- )
621
- elif filter_name == "Hand Tracker":
622
- st.markdown(
623
- """
624
- Detects and tracks hand positions and landmarks (joints) in real-time using the MediaPipe Hands solution. Draws landmarks and connections on the detected hands.
625
 
626
- **Parameters:** None (uses pre-trained MediaPipe models).
627
-
628
- **Usage**: Gesture recognition, sign language interpretation, virtual object interaction, hand pose estimation.
629
-
630
- **Docs**: [MediaPipe Hand Landmarker](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker)
631
- """
632
- )
633
- elif filter_name == "Face Tracker":
634
- st.markdown(
635
- """
636
- Detects faces in the video feed using the MediaPipe Face Detection solution and draws bounding boxes around them.
637
 
638
- **Parameters:** None (uses pre-trained MediaPipe models).
639
-
640
- **Usage**: Face detection, counting people, basic facial analysis applications, input for face recognition or landmark detection.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
 
642
- **Docs**: [MediaPipe Face Detector](https://developers.google.com/mediapipe/solutions/vision/face_detector)
643
- """
644
- )
645
- else:
646
- # Fallback for any filters missed
647
- st.markdown(
648
- f"Detailed documentation for the **{filter_name}** filter is pending."
649
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650
 
651
- st.divider() # Add a separator between filter descriptions
652
 
653
  st.markdown(
654
  """
 
176
  unsafe_allow_html=True,
177
  )
178
 
 
179
  main_tabs = st.tabs(["📹 Camera Feed", "ℹ️ About", "📋 Documentation"])
 
180
  with main_tabs[0]: # Camera Feed Tab
181
  # Create columns for camera and controls
182
  video_col, control_col = st.columns([3, 1])
 
183
  with control_col:
184
  st.markdown("## 🎛️ Controls")
185
 
186
+ # List all available filters
187
+ all_filters = [
188
+ "Resize",
189
+ "Rotation",
190
+ "Blur",
191
+ "Sharpen",
192
+ "Canny",
193
+ "Contour",
194
+ "Hough Lines",
195
+ "Color Filter",
196
+ "Histogram Equalization",
197
+ "Color Quantization",
198
+ "Pencil Sketch",
199
+ "Morphology",
200
+ "Adaptive Threshold",
201
+ "Optical Flow",
202
+ "Hand Tracker",
203
+ "Face Tracker",
204
+ ]
205
+
206
+ # Use multiselect to both select and order filters
207
+ selected_functions = st.multiselect(
208
+ "Select and order filters to apply:",
209
+ options=all_filters,
210
+ default=[],
211
+ help="Filters will be applied in the order they appear here. Drag to reorder.",
212
+ )
213
 
214
+ # Show the currently applied filters with their order
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  if selected_functions:
216
  st.markdown("### 📌 Applied Filters")
217
  for i, fn in enumerate(selected_functions):
 
220
  st.info("Select filters to apply to the camera feed")
221
 
222
  # Filter parameters - using expanders for cleaner UI
223
+ if "Resize" in selected_functions:
224
  with st.expander("📐 Resize Parameters", expanded=True):
225
+ w = st.slider("Width", 320, 1920, 1280)
226
+ h = st.slider("Height", 240, 1080, 720)
227
  else:
228
  # Default values if not displayed
229
+ w, h = 1280, 720
230
 
231
  if "Rotation" in selected_functions:
232
  with st.expander("🔄 Rotation Parameters", expanded=True):
 
254
  us = st.slider("Sat (U)", 0, 255, 255)
255
  uv = st.slider("Val (U)", 0, 255, 255)
256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  else:
258
  lh, ls, lv, uh, us, uv = 0, 0, 0, 180, 255, 255
259
 
 
275
 
276
  with video_col:
277
  st.markdown("## 📹 Live Camera Feed")
 
278
  # WebRTC settings for real-time video
279
  prev_gray = None
280
 
 
283
  img = frame.to_ndarray(format="bgr24")
284
  curr_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
285
 
286
+ # Apply filters in the order they were selected
287
  for fn in selected_functions:
288
  if fn == "Color Filter":
289
  img = app.apply_color_filter(img, (lh, ls, lv), (uh, us, uv))
 
337
  "border-radius": "8px",
338
  "margin": "0 auto",
339
  "display": "block",
340
+ "border": "2px solid #AAAAAA",
341
  },
342
  ),
343
  )
 
394
  )
395
 
396
  # Create documentation for each filter category
397
+ for filter_name in all_filters:
398
+ st.markdown(f"#### {filter_name}")
399
+
400
+ # Add detailed description and links for each filter
401
+ if filter_name == "Resize":
402
+ st.markdown(
403
+ """
404
+ Changes the dimensions (width and height) of the video frame. Useful for adjusting the output size or preparing the frame for other operations that require a specific input size.
405
+
406
+ **Parameters:**
407
+ - **Width**: Target width in pixels.
408
+ - **Height**: Target height in pixels.
409
+
410
+ **Usage**: Scaling for performance, UI fitting, preprocessing for models.
411
+
412
+ **Docs**: [OpenCV Geometric Transformations](https://docs.opencv.org/4.x/da/d6e/tutorial_py_geometric_transformations.html) (See `cv2.resize`)
413
+ """
414
+ )
415
+ elif filter_name == "Rotation":
416
+ st.markdown(
417
+ """
418
+ Rotates the video frame around its center by a specified angle.
419
+
420
+ **Parameters:**
421
+ - **Angle**: Rotation angle in degrees (0-360).
422
+
423
+ **Usage**: Image orientation correction, creative effects.
424
+
425
+ **Docs**: [OpenCV Geometric Transformations](https://docs.opencv.org/4.x/da/d6e/tutorial_py_geometric_transformations.html) (See `cv2.getRotationMatrix2D` and `cv2.warpAffine`)
426
+ """
427
+ )
428
+ elif filter_name == "Blur":
429
+ st.markdown(
430
+ """
431
+ Applies Gaussian blur to smooth the image, reducing noise and detail. The kernel size determines the extent of blurring.
432
+
433
+ **Parameters:**
434
+ - **Kernel Size**: Size of the blurring matrix (must be an odd number). Higher values create more blur.
435
+
436
+ **Usage**: Noise reduction, detail smoothing, pre-processing for edge detection or other algorithms.
437
+
438
+ **Docs**: [OpenCV Smoothing Images](https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html) (See `cv2.GaussianBlur`)
439
+ """
440
+ )
441
+ elif filter_name == "Sharpen":
442
+ st.markdown(
443
+ """
444
+ Enhances the edges and details in the image using a sharpening kernel. This is achieved by subtracting a blurred version of the image from the original.
445
+
446
+ **Parameters:** None (uses a fixed kernel).
447
+
448
+ **Usage**: Enhancing image clarity, highlighting details.
449
+
450
+ **Docs**: [OpenCV Image Filtering Concepts](https://docs.opencv.org/4.x/d4/d13/tutorial_py_filtering.html) (Concept explanation, the implementation uses a custom kernel)
451
+ """
452
+ )
453
+ elif filter_name == "Canny":
454
+ st.markdown(
455
+ """
456
+ Detects edges in the image using the Canny edge detection algorithm, a multi-stage process to find sharp changes in intensity.
457
+
458
+ **Parameters:**
459
+ - **Lower Threshold**: Minimum intensity gradient to be considered a potential edge.
460
+ - **Upper Threshold**: Maximum intensity gradient. Edges above this are definite edges. Pixels between the thresholds are included if connected to definite edges.
461
+
462
+ **Usage**: Edge detection, feature extraction, object boundary identification.
463
+
464
+ **Docs**: [OpenCV Canny Edge Detection](https://docs.opencv.org/4.x/da/d22/tutorial_py_canny.html)
465
+ """
466
+ )
467
+ elif filter_name == "Contour":
468
+ st.markdown(
469
+ """
470
+ Finds and draws contours (continuous curves joining points along a boundary with the same intensity) in the image. Usually applied after thresholding or edge detection.
471
+
472
+ **Parameters:** None (finds contours on the processed image and draws them).
473
+
474
+ **Usage**: Object detection, shape analysis, feature extraction.
475
+
476
+ **Docs**: [OpenCV Contours](https://docs.opencv.org/4.x/d4/d73/tutorial_py_contours_begin.html) (See `cv2.findContours`, `cv2.drawContours`)
477
+ """
478
+ )
479
+ elif filter_name == "Hough Lines":
480
+ st.markdown(
481
+ """
482
+ Detects straight lines in the image using the Hough Line Transform (Probabilistic variant). Works best on edge-detected images.
483
+
484
+ **Parameters:** None (uses preset parameters for `cv2.HoughLinesP`).
485
+
486
+ **Usage**: Line detection in images, structure identification.
487
+
488
+ **Docs**: [OpenCV Hough Line Transform](https://docs.opencv.org/4.x/d6/d10/tutorial_py_houghlines.html) (See `cv2.HoughLinesP`)
489
+ """
490
+ )
491
+ elif filter_name == "Color Filter":
492
+ st.markdown(
493
+ """
494
+ Isolates specific colors by converting the image to HSV (Hue, Saturation, Value) color space and applying a threshold based on the selected ranges.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
 
496
+ **Parameters:**
497
+ - **Lower Bounds (Hue, Sat, Val)**: Minimum HSV values for the color range.
498
+ - **Upper Bounds (Hue, Sat, Val)**: Maximum HSV values for the color range.
 
 
 
 
 
 
 
 
499
 
500
+ **Usage**: Object detection based on color, color segmentation, special effects.
 
 
 
 
 
 
 
 
 
 
501
 
502
+ **Docs**: [OpenCV Changing Colorspaces](https://docs.opencv.org/4.x/df/d9d/tutorial_py_colorspaces.html) (See `cv2.cvtColor` and `cv2.inRange`)
503
+ """
504
+ )
505
+ elif filter_name == "Histogram Equalization":
506
+ st.markdown(
507
+ """
508
+ Improves contrast in grayscale images by redistributing pixel intensities more evenly across the histogram. Applied to the Value channel if the input is color.
509
+
510
+ **Parameters:** None.
511
+
512
+ **Usage**: Enhancing contrast in low-contrast images, improving visibility of details.
513
+
514
+ **Docs**: [OpenCV Histogram Equalization](https://docs.opencv.org/4.x/d5/daf/tutorial_py_histogram_equalization.html) (See `cv2.equalizeHist`)
515
+ """
516
+ )
517
+ elif filter_name == "Color Quantization":
518
+ st.markdown(
519
+ """
520
+ Reduces the number of distinct colors in an image using K-Means clustering in the color space. Groups similar colors together.
521
+
522
+ **Parameters:** None (uses a fixed number of clusters, K=8).
523
+
524
+ **Usage**: Image compression, posterization effect, simplifying color palettes.
525
+
526
+ **Docs**: [OpenCV K-Means Clustering](https://docs.opencv.org/4.x/d1/d5c/tutorial_py_kmeans_opencv.html) (Underlying algorithm)
527
+ """
528
+ )
529
+ elif filter_name == "Pencil Sketch":
530
+ st.markdown(
531
+ """
532
+ Creates a pencil sketch effect by converting the image to grayscale, inverting it, blurring the inverted image, and blending it with the original grayscale image using color dodge.
533
+
534
+ **Parameters:** None.
535
+
536
+ **Usage**: Artistic image transformation, creating sketch-like visuals.
537
+
538
+ **Docs**: Involves multiple OpenCV steps (Grayscale, Blur, Blending). See [Color Dodge Blending](https://en.wikipedia.org/wiki/Blend_modes#Dodge_and_burn).
539
+ """
540
+ )
541
+ elif filter_name == "Morphology":
542
+ st.markdown(
543
+ """
544
+ Applies morphological operations (Erode, Dilate, Open, Close) to modify the shape of features in the image, typically on binary images.
545
+
546
+ **Parameters:**
547
+ - **Operation**: Type of morphological operation (`erode`, `dilate`, `open`, `close`).
548
+ - **Kernel Size**: Size of the structuring element used (odd number).
549
+
550
+ **Usage**: Noise removal, joining broken parts, thinning/thickening features.
551
+
552
+ **Docs**: [OpenCV Morphological Transformations](https://docs.opencv.org/4.x/d9/d61/tutorial_py_morphological_ops.html) (See `cv2.erode`, `cv2.dilate`, `cv2.morphologyEx`)
553
+ """
554
+ )
555
+ elif filter_name == "Adaptive Threshold":
556
+ st.markdown(
557
+ """
558
+ Applies adaptive thresholding, where the threshold value is calculated locally for different regions of the image. Useful for images with varying illumination.
559
+
560
+ **Parameters:** None (uses `cv2.ADAPTIVE_THRESH_GAUSSIAN_C`).
561
+
562
+ **Usage**: Image segmentation in non-uniform lighting conditions.
563
+
564
+ **Docs**: [OpenCV Image Thresholding](https://docs.opencv.org/4.x/d7/d4d/tutorial_py_thresholding.html) (See `cv2.adaptiveThreshold`)
565
+ """
566
+ )
567
+ elif filter_name == "Optical Flow":
568
+ st.markdown(
569
+ """
570
+ Calculates and visualizes the apparent motion of objects between consecutive frames using the Farneback algorithm. Shows motion vectors as lines on the image.
571
+
572
+ **Parameters:** None (Requires previous frame data internally).
573
+
574
+ **Usage**: Motion tracking, video stabilization analysis, action recognition.
575
+
576
+ **Docs**: [OpenCV Optical Flow](https://docs.opencv.org/4.x/d4/dee/tutorial_optical_flow.html) (See `cv2.calcOpticalFlowFarneback`)
577
+ """
578
+ )
579
+ elif filter_name == "Hand Tracker":
580
+ st.markdown(
581
+ """
582
+ Detects and tracks hand positions and landmarks (joints) in real-time using the MediaPipe Hands solution. Draws landmarks and connections on the detected hands.
583
+
584
+ **Parameters:** None (uses pre-trained MediaPipe models).
585
 
586
+ **Usage**: Gesture recognition, sign language interpretation, virtual object interaction, hand pose estimation.
587
+
588
+ **Docs**: [MediaPipe Hand Landmarker](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker)
589
+ """
590
+ )
591
+ elif filter_name == "Face Tracker":
592
+ st.markdown(
593
+ """
594
+ Detects faces in the video feed using the MediaPipe Face Detection solution and draws bounding boxes around them.
595
+
596
+ **Parameters:** None (uses pre-trained MediaPipe models).
597
+
598
+ **Usage**: Face detection, counting people, basic facial analysis applications, input for face recognition or landmark detection.
599
+
600
+ **Docs**: [MediaPipe Face Detector](https://developers.google.com/mediapipe/solutions/vision/face_detector)
601
+ """
602
+ )
603
+ else:
604
+ # Fallback for any filters missed
605
+ st.markdown(
606
+ f"Detailed documentation for the **{filter_name}** filter is pending."
607
+ )
608
 
609
+ st.divider() # Add a separator between filter descriptions
610
 
611
  st.markdown(
612
  """