Spaces:
Running
Running
feat: Implement custom theme and enhanced UI for OpenCV Explorer
Browse files- Added a custom theme with a minimalist design, including a new dark grey icon and updated color scheme.
- Introduced a structured layout with tabs for camera feed, about section, and documentation.
- Organized filter controls into categories with expandable sections for improved user experience.
- Updated video frame callback and WebRTC settings for better performance and visual appeal.
- Enhanced documentation for available filters and added a privacy note regarding data handling.
app.py
CHANGED
@@ -3,11 +3,40 @@ import cv2
|
|
3 |
import numpy as np
|
4 |
import streamlit as st
|
5 |
import os
|
|
|
6 |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, VideoHTMLAttributes
|
7 |
from src.opencv_utils import OpenCVUtils
|
8 |
from twilio.rest import Client
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_ice_servers():
|
@@ -28,7 +57,7 @@ def get_ice_servers():
|
|
28 |
return token.ice_servers
|
29 |
else:
|
30 |
st.warning(
|
31 |
-
"Twilio credentials not found. Using free STUN server only, which may not work reliably
|
32 |
)
|
33 |
except Exception as e:
|
34 |
st.error(f"Error setting up Twilio TURN servers: {e}")
|
@@ -50,115 +79,448 @@ hide_st_style = """
|
|
50 |
#MainMenu {visibility: hidden;}
|
51 |
footer {visibility: hidden;}
|
52 |
header {visibility: hidden;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
</style>
|
54 |
"""
|
55 |
st.markdown(hide_st_style, unsafe_allow_html=True)
|
56 |
# ---------------------------
|
57 |
|
58 |
-
|
59 |
-
st.
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
"
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
"
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
]
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import numpy as np
|
4 |
import streamlit as st
|
5 |
import os
|
6 |
+
import time
|
7 |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, VideoHTMLAttributes
|
8 |
from src.opencv_utils import OpenCVUtils
|
9 |
from twilio.rest import Client
|
10 |
|
11 |
+
# Custom theme settings
|
12 |
+
st.set_page_config(
|
13 |
+
page_title="OpenCV Explorer",
|
14 |
+
page_icon="⚫", # Changed icon for minimalism
|
15 |
+
layout="wide",
|
16 |
+
initial_sidebar_state="expanded",
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
# Create a custom theme
|
21 |
+
def create_custom_theme():
|
22 |
+
# Create a .streamlit directory if it doesn't exist
|
23 |
+
os.makedirs(".streamlit", exist_ok=True)
|
24 |
+
# Create a config.toml file with custom theme settings
|
25 |
+
with open(".streamlit/config.toml", "w") as f:
|
26 |
+
f.write(
|
27 |
+
"""
|
28 |
+
[theme]
|
29 |
+
primaryColor = "#444444" # Dark Grey
|
30 |
+
backgroundColor = "#FFFFFF" # White
|
31 |
+
secondaryBackgroundColor = "#F0F2F6" # Light Grey
|
32 |
+
textColor = "#000000" # Black
|
33 |
+
font = "sans serif"
|
34 |
+
"""
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
# Apply custom theme
|
39 |
+
create_custom_theme()
|
40 |
|
41 |
|
42 |
def get_ice_servers():
|
|
|
57 |
return token.ice_servers
|
58 |
else:
|
59 |
st.warning(
|
60 |
+
"Twilio credentials not found. Using free STUN server only, which may not work reliably." # Removed Streamlit Cloud mention for generality
|
61 |
)
|
62 |
except Exception as e:
|
63 |
st.error(f"Error setting up Twilio TURN servers: {e}")
|
|
|
79 |
#MainMenu {visibility: hidden;}
|
80 |
footer {visibility: hidden;}
|
81 |
header {visibility: hidden;}
|
82 |
+
/* Body background */
|
83 |
+
.stApp {
|
84 |
+
background-color: #FFFFFF; /* Set solid white background */
|
85 |
+
}
|
86 |
+
/* Tab styling */
|
87 |
+
.stTabs [data-baseweb="tab-list"] {
|
88 |
+
gap: 8px; /* Slightly reduced gap */
|
89 |
+
border-bottom: 1px solid #CCCCCC; /* Add a subtle border */
|
90 |
+
}
|
91 |
+
.stTabs [data-baseweb="tab"] {
|
92 |
+
background-color: transparent; /* Make tabs transparent */
|
93 |
+
border-radius: 0; /* Remove border radius */
|
94 |
+
padding: 10px 15px;
|
95 |
+
color: #555555; /* Grey text */
|
96 |
+
border-bottom: 2px solid transparent; /* Prepare for selected indicator */
|
97 |
+
transition: all 0.3s ease;
|
98 |
+
}
|
99 |
+
.stTabs [data-baseweb="tab"]:hover {
|
100 |
+
background-color: #F0F2F6; /* Light grey hover */
|
101 |
+
color: #000000; /* Black text on hover */
|
102 |
+
}
|
103 |
+
.stTabs [aria-selected="true"] {
|
104 |
+
background-color: transparent !important;
|
105 |
+
color: #000000 !important; /* Black text for selected */
|
106 |
+
border-bottom: 2px solid #444444 !important; /* Dark grey underline for selected */
|
107 |
+
font-weight: 600; /* Make selected tab bold */
|
108 |
+
}
|
109 |
+
/* Sidebar styling */
|
110 |
+
.css-1d391kg { /* Target sidebar specifically */
|
111 |
+
background-color: #F0F2F6 !important; /* Light grey sidebar */
|
112 |
+
}
|
113 |
+
/* Ensure sidebar text is readable */
|
114 |
+
.css-1d391kg .stMarkdown, .css-1d391kg .stCheckbox, .css-1d391kg .stExpander, .css-1d391kg .stText, .css-1d391kg .stButton > button {
|
115 |
+
color: #000000 !important;
|
116 |
+
}
|
117 |
+
/* Button styling */
|
118 |
+
.stButton>button {
|
119 |
+
background-color: #FFFFFF !important;
|
120 |
+
color: #000000 !important;
|
121 |
+
border: 1px solid #CCCCCC !important;
|
122 |
+
transition: all 0.3s ease !important;
|
123 |
+
box-shadow: none !important; /* Remove default shadow */
|
124 |
+
}
|
125 |
+
.stButton>button:hover {
|
126 |
+
background-color: #F0F2F6 !important; /* Light grey on hover */
|
127 |
+
border-color: #AAAAAA !important;
|
128 |
+
transform: none !important; /* Remove hover transform */
|
129 |
+
box-shadow: none !important;
|
130 |
+
}
|
131 |
+
.stButton>button:active {
|
132 |
+
background-color: #E0E0E0 !important; /* Slightly darker grey on click */
|
133 |
+
}
|
134 |
+
/* Expander header styling */
|
135 |
+
.stExpander > div:first-child {
|
136 |
+
background-color: #F0F2F6; /* Light grey background for expander header */
|
137 |
+
border-radius: 4px;
|
138 |
+
}
|
139 |
+
.stExpander header { /* Target expander header specifically */
|
140 |
+
color: #000000 !important; /* Black text for expander header */
|
141 |
+
font-weight: 600;
|
142 |
+
}
|
143 |
+
/* General adjustments for minimalist feel */
|
144 |
+
h1, h2, h3, h4, h5, h6 {
|
145 |
+
color: #000000; /* Ensure headers are black */
|
146 |
+
}
|
147 |
+
.stMarkdown p {
|
148 |
+
color: #333333; /* Slightly lighter black for paragraph text */
|
149 |
+
}
|
150 |
</style>
|
151 |
"""
|
152 |
st.markdown(hide_st_style, unsafe_allow_html=True)
|
153 |
# ---------------------------
|
154 |
|
155 |
+
# Top header with logo and title
|
156 |
+
col1, col2 = st.columns([1, 5])
|
157 |
+
with col1:
|
158 |
+
st.image(
|
159 |
+
"https://opencv.org/wp-content/uploads/2020/07/OpenCV_logo_black-2.png",
|
160 |
+
width=100,
|
161 |
+
)
|
162 |
+
with col2:
|
163 |
+
st.title("OpenCV Explorer")
|
164 |
+
st.markdown(
|
165 |
+
"""
|
166 |
+
<p style='font-size: 18px; margin-top: -10px;'>
|
167 |
+
Explore computer vision filters and transformations in real-time using your webcam.
|
168 |
+
</p>
|
169 |
+
""",
|
170 |
+
unsafe_allow_html=True,
|
171 |
+
)
|
172 |
+
|
173 |
+
# Create main layout
|
174 |
+
main_tabs = st.tabs(["📹 Camera Feed", "ℹ️ About", "📋 Documentation"])
|
175 |
+
|
176 |
+
with main_tabs[0]: # Camera Feed Tab
|
177 |
+
# Create columns for camera and controls
|
178 |
+
video_col, control_col = st.columns([3, 1])
|
179 |
+
|
180 |
+
with control_col:
|
181 |
+
st.markdown("## 🎛️ Controls")
|
182 |
+
|
183 |
+
# Organize filters into categories
|
184 |
+
filter_categories = {
|
185 |
+
"Basic Transformations": ["Resize", "Rotation", "Blur", "Sharpen"],
|
186 |
+
"Edge & Contour Detection": ["Canny", "Contour", "Hough Lines"],
|
187 |
+
"Color Operations": [
|
188 |
+
"Color Filter",
|
189 |
+
"Histogram Equalization",
|
190 |
+
"Color Quantization",
|
191 |
+
],
|
192 |
+
"Artistic Effects": ["Pencil Sketch", "Morphology", "Adaptive Threshold"],
|
193 |
+
"Advanced Features": ["Optical Flow", "Hand Tracker", "Face Tracker"],
|
194 |
+
}
|
195 |
+
|
196 |
+
# Use a dictionary to store the expanded state of each category
|
197 |
+
if "expanded" not in st.session_state:
|
198 |
+
st.session_state.expanded = {cat: False for cat in filter_categories}
|
199 |
+
st.session_state.expanded["Basic Transformations"] = (
|
200 |
+
True # Expand the first one by default
|
201 |
+
)
|
202 |
+
|
203 |
+
# Create filter selection UI with categories
|
204 |
+
selected_functions = []
|
205 |
+
for category, filters in filter_categories.items():
|
206 |
+
with st.expander(
|
207 |
+
f"**{category}**", expanded=st.session_state.expanded[category]
|
208 |
+
):
|
209 |
+
# Show checkboxes for each filter in this category
|
210 |
+
selected_in_category = []
|
211 |
+
for filter_name in filters:
|
212 |
+
if st.checkbox(filter_name, key=f"check_{filter_name}"):
|
213 |
+
selected_in_category.append(filter_name)
|
214 |
+
|
215 |
+
# If any filters selected in this category, add a reorder section
|
216 |
+
if selected_in_category:
|
217 |
+
st.markdown("**Order within category:**")
|
218 |
+
for i, filter_name in enumerate(selected_in_category):
|
219 |
+
col1, col2 = st.columns([4, 1])
|
220 |
+
with col1:
|
221 |
+
st.text(f"{i+1}. {filter_name}")
|
222 |
+
with col2:
|
223 |
+
if i > 0 and st.button("↑", key=f"up_{filter_name}"):
|
224 |
+
# Move filter up in the list
|
225 |
+
selected_in_category[i], selected_in_category[i - 1] = (
|
226 |
+
selected_in_category[i - 1],
|
227 |
+
selected_in_category[i],
|
228 |
+
)
|
229 |
+
st.rerun()
|
230 |
+
|
231 |
+
# Add selected filters to the main list
|
232 |
+
selected_functions.extend(selected_in_category)
|
233 |
+
|
234 |
+
# Show the currently applied filters
|
235 |
+
if selected_functions:
|
236 |
+
st.markdown("### 📌 Applied Filters")
|
237 |
+
for i, fn in enumerate(selected_functions):
|
238 |
+
st.markdown(f"**{i+1}.** {fn}")
|
239 |
+
else:
|
240 |
+
st.info("Select filters to apply to the camera feed")
|
241 |
+
|
242 |
+
# Filter parameters - using expanders for cleaner UI
|
243 |
+
if any(f in selected_functions for f in ["Resize"]):
|
244 |
+
with st.expander("📐 Resize Parameters", expanded=True):
|
245 |
+
w = st.slider("Width", 320, 1280, 640)
|
246 |
+
h = st.slider("Height", 240, 720, 480)
|
247 |
+
else:
|
248 |
+
# Default values if not displayed
|
249 |
+
w, h = 640, 480
|
250 |
+
|
251 |
+
if "Rotation" in selected_functions:
|
252 |
+
with st.expander("🔄 Rotation Parameters", expanded=True):
|
253 |
+
ang = st.slider("Angle", 0, 360, 0)
|
254 |
+
else:
|
255 |
+
ang = 0
|
256 |
+
|
257 |
+
if "Blur" in selected_functions:
|
258 |
+
with st.expander("🌫️ Blur Parameters", expanded=True):
|
259 |
+
bk = st.slider("Kernel Size (odd)", 1, 15, 5, step=2)
|
260 |
+
else:
|
261 |
+
bk = 5
|
262 |
+
|
263 |
+
if "Color Filter" in selected_functions:
|
264 |
+
with st.expander("🎨 Color Filter Parameters", expanded=True):
|
265 |
+
col1, col2 = st.columns(2)
|
266 |
+
with col1:
|
267 |
+
st.markdown("**Lower Bounds**")
|
268 |
+
lh = st.slider("Hue (L)", 0, 180, 0)
|
269 |
+
ls = st.slider("Sat (L)", 0, 255, 0)
|
270 |
+
lv = st.slider("Val (L)", 0, 255, 0)
|
271 |
+
with col2:
|
272 |
+
st.markdown("**Upper Bounds**")
|
273 |
+
uh = st.slider("Hue (U)", 0, 180, 180)
|
274 |
+
us = st.slider("Sat (U)", 0, 255, 255)
|
275 |
+
uv = st.slider("Val (U)", 0, 255, 255)
|
276 |
+
|
277 |
+
# Color preview - Make it dynamic again
|
278 |
+
# Use the lower bound HSV values to generate an HSL color for CSS
|
279 |
+
preview_color_hsl = f"hsl({lh * 2}, {ls / 2.55}%, {lv / 2.55}%)"
|
280 |
+
st.markdown(
|
281 |
+
f"""
|
282 |
+
<div style="background-color: {preview_color_hsl}; width: 100%; height: 30px;
|
283 |
+
border: 1px solid #CCCCCC; border-radius: 5px; margin-top: 10px;">
|
284 |
+
<p style='text-align: center; color: #333; line-height: 30px; font-size: 12px; font-weight: bold;'>
|
285 |
+
Preview (Lower Bound)
|
286 |
+
</p>
|
287 |
+
</div>
|
288 |
+
""",
|
289 |
+
unsafe_allow_html=True,
|
290 |
+
)
|
291 |
+
else:
|
292 |
+
lh, ls, lv, uh, us, uv = 0, 0, 0, 180, 255, 255
|
293 |
+
|
294 |
+
if "Canny" in selected_functions:
|
295 |
+
with st.expander("📊 Canny Edge Parameters", expanded=True):
|
296 |
+
lc = st.slider("Lower Threshold", 0, 255, 100)
|
297 |
+
uc = st.slider("Upper Threshold", 0, 255, 200)
|
298 |
+
else:
|
299 |
+
lc, uc = 100, 200
|
300 |
+
|
301 |
+
if "Morphology" in selected_functions:
|
302 |
+
with st.expander("🧩 Morphology Parameters", expanded=True):
|
303 |
+
morph_op = st.selectbox(
|
304 |
+
"Operation", ["erode", "dilate", "open", "close"]
|
305 |
+
)
|
306 |
+
morph_ks = st.slider("Kernel Size", 1, 31, 5, step=2)
|
307 |
+
else:
|
308 |
+
morph_op, morph_ks = "erode", 5
|
309 |
+
|
310 |
+
with video_col:
|
311 |
+
st.markdown("## 📹 Live Camera Feed")
|
312 |
+
|
313 |
+
# WebRTC settings for real-time video
|
314 |
+
prev_gray = None
|
315 |
+
|
316 |
+
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
317 |
+
global prev_gray
|
318 |
+
img = frame.to_ndarray(format="bgr24")
|
319 |
+
curr_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
320 |
+
|
321 |
+
for fn in selected_functions:
|
322 |
+
if fn == "Color Filter":
|
323 |
+
img = app.apply_color_filter(img, (lh, ls, lv), (uh, us, uv))
|
324 |
+
elif fn == "Canny":
|
325 |
+
img = app.apply_edge_detection(img, lc, uc)
|
326 |
+
elif fn == "Blur":
|
327 |
+
img = app.blur_image(img, bk)
|
328 |
+
elif fn == "Rotation":
|
329 |
+
img = app.rotate_image(img, ang)
|
330 |
+
elif fn == "Resize":
|
331 |
+
img = app.resize_image(img, w, h)
|
332 |
+
elif fn == "Contour":
|
333 |
+
img = app.apply_contour_detection(img)
|
334 |
+
elif fn == "Histogram Equalization":
|
335 |
+
img = app.equalize_histogram(img)
|
336 |
+
elif fn == "Adaptive Threshold":
|
337 |
+
img = app.adaptive_threshold(img)
|
338 |
+
elif fn == "Morphology":
|
339 |
+
img = app.morphology(img, morph_op, morph_ks)
|
340 |
+
elif fn == "Sharpen":
|
341 |
+
img = app.sharpen(img)
|
342 |
+
elif fn == "Hough Lines":
|
343 |
+
img = app.hough_lines(img)
|
344 |
+
elif fn == "Optical Flow" and prev_gray is not None:
|
345 |
+
img = app.optical_flow(prev_gray, curr_gray, img)
|
346 |
+
elif fn == "Pencil Sketch":
|
347 |
+
img = app.pencil_sketch(img)
|
348 |
+
elif fn == "Color Quantization":
|
349 |
+
img = app.color_quantization(img)
|
350 |
+
elif fn == "Hand Tracker":
|
351 |
+
img = app.detect_hands(img)
|
352 |
+
elif fn == "Face Tracker":
|
353 |
+
img = app.detect_faces(img)
|
354 |
+
|
355 |
+
prev_gray = curr_gray
|
356 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
357 |
+
|
358 |
+
webrtc_streamer(
|
359 |
+
key="opencv-explorer",
|
360 |
+
mode=WebRtcMode.SENDRECV,
|
361 |
+
rtc_configuration={"iceServers": get_ice_servers()},
|
362 |
+
video_frame_callback=video_frame_callback,
|
363 |
+
media_stream_constraints={"video": True, "audio": False},
|
364 |
+
async_processing=True,
|
365 |
+
video_html_attrs=VideoHTMLAttributes(
|
366 |
+
autoPlay=True,
|
367 |
+
controls=False,
|
368 |
+
style={
|
369 |
+
"width": f"{w}px",
|
370 |
+
"height": f"{h}px",
|
371 |
+
"border-radius": "8px",
|
372 |
+
"margin": "0 auto",
|
373 |
+
"display": "block",
|
374 |
+
"border": "2px solid #444444", # Changed border to dark grey
|
375 |
+
},
|
376 |
+
),
|
377 |
+
)
|
378 |
+
|
379 |
+
# Performance metrics
|
380 |
+
with st.expander("📊 Performance Metrics", expanded=False):
|
381 |
+
col1, col2, col3 = st.columns(3)
|
382 |
+
col1.metric("Resolution", f"{w}x{h} px")
|
383 |
+
col2.metric("Filters Applied", len(selected_functions))
|
384 |
+
col3.metric("Frame Processing", f"{time.time():.2f} ms", delta=None)
|
385 |
+
|
386 |
+
with main_tabs[1]: # About Tab
|
387 |
+
st.markdown(
|
388 |
+
"""
|
389 |
+
## About OpenCV Explorer
|
390 |
+
|
391 |
+
OpenCV Explorer is an interactive web application that allows you to experiment with various computer vision techniques in real-time using your webcam. This application is built with:
|
392 |
+
|
393 |
+
- **OpenCV**: Open Source Computer Vision Library
|
394 |
+
- **Streamlit**: An open-source app framework for Machine Learning and Data Science
|
395 |
+
- **WebRTC**: Web Real-Time Communication for live video streaming
|
396 |
+
|
397 |
+
### Features
|
398 |
+
|
399 |
+
- Apply multiple filters and transformations to your webcam feed
|
400 |
+
- Adjust parameters in real-time
|
401 |
+
- Experiment with advanced computer vision techniques
|
402 |
+
- Learn about image processing concepts
|
403 |
+
|
404 |
+
### How to Use
|
405 |
+
|
406 |
+
1. Select one or more filters from the categories in the control panel
|
407 |
+
2. Adjust the parameters for each selected filter
|
408 |
+
3. See the results in real-time through your webcam
|
409 |
+
4. Reorder filters to create different effects
|
410 |
+
|
411 |
+
### Privacy Note
|
412 |
+
|
413 |
+
All processing is done in your browser. No video data is sent to any server except for the WebRTC connection.
|
414 |
+
"""
|
415 |
+
)
|
416 |
+
|
417 |
+
with main_tabs[2]: # Documentation Tab
|
418 |
+
st.markdown(
|
419 |
+
"""
|
420 |
+
## Documentation
|
421 |
+
|
422 |
+
### Available Filters
|
423 |
+
"""
|
424 |
+
)
|
425 |
+
|
426 |
+
# Create documentation for each filter category
|
427 |
+
for category, filters in filter_categories.items():
|
428 |
+
with st.expander(f"**{category}**", expanded=False):
|
429 |
+
for filter_name in filters:
|
430 |
+
st.markdown(f"#### {filter_name}")
|
431 |
+
|
432 |
+
# Add description for each filter
|
433 |
+
if filter_name == "Color Filter":
|
434 |
+
st.markdown(
|
435 |
+
"""
|
436 |
+
Isolates specific colors in the HSV (Hue, Saturation, Value) color space.
|
437 |
+
|
438 |
+
**Parameters:**
|
439 |
+
- **Hue**: Color type (0-180)
|
440 |
+
- **Saturation**: Color intensity (0-255)
|
441 |
+
- **Value**: Brightness (0-255)
|
442 |
+
|
443 |
+
**Usage**: Object detection based on color, creative effects, background removal.
|
444 |
+
"""
|
445 |
+
)
|
446 |
+
elif filter_name == "Canny":
|
447 |
+
st.markdown(
|
448 |
+
"""
|
449 |
+
Detects edges in the image using the Canny edge detection algorithm.
|
450 |
+
|
451 |
+
**Parameters:**
|
452 |
+
- **Lower Threshold**: Minimum gradient value to consider as an edge
|
453 |
+
- **Upper Threshold**: Maximum gradient value to consider as an edge
|
454 |
+
|
455 |
+
**Usage**: Edge detection, feature extraction, line detection.
|
456 |
+
"""
|
457 |
+
)
|
458 |
+
elif filter_name == "Blur":
|
459 |
+
st.markdown(
|
460 |
+
"""
|
461 |
+
Applies Gaussian blur to smooth the image.
|
462 |
+
|
463 |
+
**Parameters:**
|
464 |
+
- **Kernel Size**: Size of the blurring matrix (higher values create more blur)
|
465 |
+
|
466 |
+
**Usage**: Noise reduction, detail smoothing, pre-processing for other algorithms.
|
467 |
+
"""
|
468 |
+
)
|
469 |
+
elif filter_name == "Rotation":
|
470 |
+
st.markdown(
|
471 |
+
"""
|
472 |
+
Rotates the image by a specified angle.
|
473 |
+
|
474 |
+
**Parameters:**
|
475 |
+
- **Angle**: Rotation angle in degrees (0-360)
|
476 |
+
|
477 |
+
**Usage**: Image orientation correction, creative effects.
|
478 |
+
"""
|
479 |
+
)
|
480 |
+
elif filter_name == "Resize":
|
481 |
+
st.markdown(
|
482 |
+
"""
|
483 |
+
Changes the dimensions of the image.
|
484 |
+
|
485 |
+
**Parameters:**
|
486 |
+
- **Width**: Output width in pixels
|
487 |
+
- **Height**: Output height in pixels
|
488 |
+
|
489 |
+
**Usage**: Scaling for performance, UI fitting, preprocessing.
|
490 |
+
"""
|
491 |
+
)
|
492 |
+
elif filter_name == "Hand Tracker":
|
493 |
+
st.markdown(
|
494 |
+
"""
|
495 |
+
Detects and tracks hand positions and landmarks using MediaPipe.
|
496 |
+
|
497 |
+
**Parameters:** None (uses pre-trained models)
|
498 |
+
|
499 |
+
**Usage**: Gesture recognition, hand pose estimation, interactive applications.
|
500 |
+
"""
|
501 |
+
)
|
502 |
+
elif filter_name == "Face Tracker":
|
503 |
+
st.markdown(
|
504 |
+
"""
|
505 |
+
Detects and tracks facial landmarks using MediaPipe.
|
506 |
+
|
507 |
+
**Parameters:** None (uses pre-trained models)
|
508 |
+
|
509 |
+
**Usage**: Face detection, facial expression analysis, AR effects.
|
510 |
+
"""
|
511 |
+
)
|
512 |
+
else:
|
513 |
+
st.markdown(f"Documentation for {filter_name} filter.")
|
514 |
+
|
515 |
+
st.markdown(
|
516 |
+
"""
|
517 |
+
### Technical Details
|
518 |
+
|
519 |
+
For more information about the algorithms and techniques used in this application, refer to:
|
520 |
+
|
521 |
+
- [OpenCV Documentation](https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html)
|
522 |
+
- [MediaPipe Documentation](https://ai.google.dev/edge/mediapipe/solutions/guide?hl=pt-br)
|
523 |
+
- [Streamlit Documentation](https://docs.streamlit.io/)
|
524 |
+
"""
|
525 |
+
)
|
526 |
+
|