import av import cv2 import numpy as np import streamlit as st import os import time from streamlit_webrtc import webrtc_streamer, WebRtcMode, VideoHTMLAttributes from src.opencv_utils import OpenCVUtils from twilio.rest import Client # Custom theme settings st.set_page_config( page_title="OpenCV Explorer", page_icon="âšĢ", # Changed icon for minimalism layout="wide", initial_sidebar_state="expanded", ) # Create a custom theme def create_custom_theme(): # Create a .streamlit directory if it doesn't exist os.makedirs(".streamlit", exist_ok=True) # Create a config.toml file with custom theme settings with open(".streamlit/config.toml", "w") as f: f.write( """ [theme] base = "dark" # Use Streamlit's dark theme as a base primaryColor = "#CCCCCC" # Light Grey accent backgroundColor = "#0E1117" # Default Streamlit dark bg secondaryBackgroundColor = "#262730" # Slightly lighter dark grey textColor = "#FAFAFA" # Light text font = "sans serif" """ ) # Apply custom theme create_custom_theme() def get_ice_servers(): """ Get ICE servers configuration. For Streamlit Cloud deployment, a TURN server is required in addition to STUN. This function will try to use Twilio's TURN server service if credentials are available, otherwise it falls back to a free STUN server from Google. """ try: # Try to get Twilio credentials from environment variables account_sid = os.environ.get("TWILIO_ACCOUNT_SID") auth_token = os.environ.get("TWILIO_AUTH_TOKEN") if account_sid and auth_token: client = Client(account_sid, auth_token) token = client.tokens.create() return token.ice_servers else: st.warning( "Twilio credentials not found. Using free STUN server only, which may not work reliably." # Removed Streamlit Cloud mention for generality ) except Exception as e: st.error(f"Error setting up Twilio TURN servers: {e}") # Fallback to Google's free STUN server return [{"urls": ["stun:stun.l.google.com:19302"]}] @st.cache_resource def get_app(): return OpenCVUtils() app = get_app() # --- HIDE STREAMLIT STYLE --- hide_st_style = """ """ st.markdown(hide_st_style, unsafe_allow_html=True) # --------------------------- # Top header with logo and title col1, col2 = st.columns([1, 5]) with col1: st.image( "https://opencv.org/wp-content/uploads/2020/07/OpenCV_logo_black-2.png", width=100, ) with col2: st.title("OpenCV Explorer") st.markdown( """

Explore computer vision filters and transformations in real-time using your webcam.

""", unsafe_allow_html=True, ) # Create main layout main_tabs = st.tabs(["📹 Camera Feed", "â„šī¸ About", "📋 Documentation"]) with main_tabs[0]: # Camera Feed Tab # Create columns for camera and controls video_col, control_col = st.columns([3, 1]) with control_col: st.markdown("## đŸŽ›ī¸ Controls") # Organize filters into categories filter_categories = { "Basic Transformations": ["Resize", "Rotation", "Blur", "Sharpen"], "Edge & Contour Detection": ["Canny", "Contour", "Hough Lines"], "Color Operations": [ "Color Filter", "Histogram Equalization", "Color Quantization", ], "Artistic Effects": ["Pencil Sketch", "Morphology", "Adaptive Threshold"], "Advanced Features": ["Optical Flow", "Hand Tracker", "Face Tracker"], } # Use a dictionary to store the expanded state of each category if "expanded" not in st.session_state: st.session_state.expanded = {cat: False for cat in filter_categories} st.session_state.expanded["Basic Transformations"] = ( True # Expand the first one by default ) # Create filter selection UI with categories selected_functions = [] for category, filters in filter_categories.items(): with st.expander( f"**{category}**", expanded=st.session_state.expanded[category] ): # Show checkboxes for each filter in this category selected_in_category = [] for filter_name in filters: if st.checkbox(filter_name, key=f"check_{filter_name}"): selected_in_category.append(filter_name) # If any filters selected in this category, add a reorder section if selected_in_category: st.markdown("**Order within category:**") for i, filter_name in enumerate(selected_in_category): col1, col2 = st.columns([4, 1]) with col1: st.text(f"{i+1}. {filter_name}") with col2: if i > 0 and st.button("↑", key=f"up_{filter_name}"): # Move filter up in the list selected_in_category[i], selected_in_category[i - 1] = ( selected_in_category[i - 1], selected_in_category[i], ) st.rerun() # Add selected filters to the main list selected_functions.extend(selected_in_category) # Show the currently applied filters if selected_functions: st.markdown("### 📌 Applied Filters") for i, fn in enumerate(selected_functions): st.markdown(f"**{i+1}.** {fn}") else: st.info("Select filters to apply to the camera feed") # Filter parameters - using expanders for cleaner UI if any(f in selected_functions for f in ["Resize"]): with st.expander("📐 Resize Parameters", expanded=True): w = st.slider("Width", 320, 1280, 640) h = st.slider("Height", 240, 720, 480) else: # Default values if not displayed w, h = 640, 480 if "Rotation" in selected_functions: with st.expander("🔄 Rotation Parameters", expanded=True): ang = st.slider("Angle", 0, 360, 0) else: ang = 0 if "Blur" in selected_functions: with st.expander("đŸŒĢī¸ Blur Parameters", expanded=True): bk = st.slider("Kernel Size (odd)", 1, 15, 5, step=2) else: bk = 5 if "Color Filter" in selected_functions: with st.expander("🎨 Color Filter Parameters", expanded=True): col1, col2 = st.columns(2) with col1: st.markdown("**Lower Bounds**") lh = st.slider("Hue (L)", 0, 180, 0) ls = st.slider("Sat (L)", 0, 255, 0) lv = st.slider("Val (L)", 0, 255, 0) with col2: st.markdown("**Upper Bounds**") uh = st.slider("Hue (U)", 0, 180, 180) us = st.slider("Sat (U)", 0, 255, 255) uv = st.slider("Val (U)", 0, 255, 255) # Color preview - Make it dynamic again # Use the lower bound HSV values to generate an HSL color for CSS preview_color_hsl = f"hsl({lh * 2}, {ls / 2.55}%, {lv / 2.55}%)" st.markdown( f"""

Preview (Lower Bound)

""", unsafe_allow_html=True, ) else: lh, ls, lv, uh, us, uv = 0, 0, 0, 180, 255, 255 if "Canny" in selected_functions: with st.expander("📊 Canny Edge Parameters", expanded=True): lc = st.slider("Lower Threshold", 0, 255, 100) uc = st.slider("Upper Threshold", 0, 255, 200) else: lc, uc = 100, 200 if "Morphology" in selected_functions: with st.expander("🧩 Morphology Parameters", expanded=True): morph_op = st.selectbox( "Operation", ["erode", "dilate", "open", "close"] ) morph_ks = st.slider("Kernel Size", 1, 31, 5, step=2) else: morph_op, morph_ks = "erode", 5 with video_col: st.markdown("## 📹 Live Camera Feed") # WebRTC settings for real-time video prev_gray = None def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame: global prev_gray img = frame.to_ndarray(format="bgr24") curr_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for fn in selected_functions: if fn == "Color Filter": img = app.apply_color_filter(img, (lh, ls, lv), (uh, us, uv)) elif fn == "Canny": img = app.apply_edge_detection(img, lc, uc) elif fn == "Blur": img = app.blur_image(img, bk) elif fn == "Rotation": img = app.rotate_image(img, ang) elif fn == "Resize": img = app.resize_image(img, w, h) elif fn == "Contour": img = app.apply_contour_detection(img) elif fn == "Histogram Equalization": img = app.equalize_histogram(img) elif fn == "Adaptive Threshold": img = app.adaptive_threshold(img) elif fn == "Morphology": img = app.morphology(img, morph_op, morph_ks) elif fn == "Sharpen": img = app.sharpen(img) elif fn == "Hough Lines": img = app.hough_lines(img) elif fn == "Optical Flow" and prev_gray is not None: img = app.optical_flow(prev_gray, curr_gray, img) elif fn == "Pencil Sketch": img = app.pencil_sketch(img) elif fn == "Color Quantization": img = app.color_quantization(img) elif fn == "Hand Tracker": img = app.detect_hands(img) elif fn == "Face Tracker": img = app.detect_faces(img) prev_gray = curr_gray return av.VideoFrame.from_ndarray(img, format="bgr24") webrtc_streamer( key="opencv-explorer", mode=WebRtcMode.SENDRECV, rtc_configuration={"iceServers": get_ice_servers()}, video_frame_callback=video_frame_callback, media_stream_constraints={"video": True, "audio": False}, async_processing=True, video_html_attrs=VideoHTMLAttributes( autoPlay=True, controls=False, style={ "width": f"{w}px", "height": f"{h}px", "border-radius": "8px", "margin": "0 auto", "display": "block", "border": "2px solid #AAAAAA", # Changed border to lighter grey }, ), ) # Performance metrics with st.expander("📊 Performance Metrics", expanded=False): col1, col2, col3 = st.columns(3) col1.metric("Resolution", f"{w}x{h} px") col2.metric("Filters Applied", len(selected_functions)) col3.metric("Frame Processing", f"{time.time():.2f} ms", delta=None) with main_tabs[1]: # About Tab st.markdown( """ ## About OpenCV Explorer OpenCV Explorer is an interactive web application that allows you to experiment with various computer vision techniques in real-time using your webcam. This application is built with: - **OpenCV**: Open Source Computer Vision Library - **Streamlit**: An open-source app framework for Machine Learning and Data Science - **WebRTC**: Web Real-Time Communication for live video streaming ### Features - Apply multiple filters and transformations to your webcam feed - Adjust parameters in real-time - Experiment with advanced computer vision techniques - Learn about image processing concepts ### How to Use 1. Select one or more filters from the categories in the control panel 2. Adjust the parameters for each selected filter 3. See the results in real-time through your webcam 4. Reorder filters to create different effects ### Privacy Note All processing is done in your browser. No video data is sent to any server except for the WebRTC connection. """ ) with main_tabs[2]: # Documentation Tab st.markdown( """ ## Documentation ### Available Filters """ ) # Create documentation for each filter category for category, filters in filter_categories.items(): with st.expander(f"**{category}**", expanded=False): for filter_name in filters: st.markdown(f"#### {filter_name}") # Add description for each filter if filter_name == "Color Filter": st.markdown( """ Isolates specific colors in the HSV (Hue, Saturation, Value) color space. **Parameters:** - **Hue**: Color type (0-180) - **Saturation**: Color intensity (0-255) - **Value**: Brightness (0-255) **Usage**: Object detection based on color, creative effects, background removal. """ ) elif filter_name == "Canny": st.markdown( """ Detects edges in the image using the Canny edge detection algorithm. **Parameters:** - **Lower Threshold**: Minimum gradient value to consider as an edge - **Upper Threshold**: Maximum gradient value to consider as an edge **Usage**: Edge detection, feature extraction, line detection. """ ) elif filter_name == "Blur": st.markdown( """ Applies Gaussian blur to smooth the image. **Parameters:** - **Kernel Size**: Size of the blurring matrix (higher values create more blur) **Usage**: Noise reduction, detail smoothing, pre-processing for other algorithms. """ ) elif filter_name == "Rotation": st.markdown( """ Rotates the image by a specified angle. **Parameters:** - **Angle**: Rotation angle in degrees (0-360) **Usage**: Image orientation correction, creative effects. """ ) elif filter_name == "Resize": st.markdown( """ Changes the dimensions of the image. **Parameters:** - **Width**: Output width in pixels - **Height**: Output height in pixels **Usage**: Scaling for performance, UI fitting, preprocessing. """ ) elif filter_name == "Hand Tracker": st.markdown( """ Detects and tracks hand positions and landmarks using MediaPipe. **Parameters:** None (uses pre-trained models) **Usage**: Gesture recognition, hand pose estimation, interactive applications. """ ) elif filter_name == "Face Tracker": st.markdown( """ Detects and tracks facial landmarks using MediaPipe. **Parameters:** None (uses pre-trained models) **Usage**: Face detection, facial expression analysis, AR effects. """ ) else: st.markdown(f"Documentation for {filter_name} filter.") st.markdown( """ ### Technical Details For more information about the algorithms and techniques used in this application, refer to: - [OpenCV Documentation](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html) - [MediaPipe Documentation](https://ai.google.dev/edge/mediapipe/solutions/guide?hl=pt-br) - [Streamlit Documentation](https://docs.streamlit.io/) """ ) st.markdown( """

OpenCV Explorer | Built with Streamlit | Š 2024

""", unsafe_allow_html=True, )