princeta3011 commited on
Commit
a69193b
·
verified ·
1 Parent(s): 0b5230d

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +609 -0
main.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import os
5
+ import tempfile
6
+ import time
7
+ from PIL import Image
8
+ import matplotlib.pyplot as plt
9
+ import io
10
+ import base64
11
+
12
+ # Set page configuration
13
+ st.set_page_config(
14
+ page_title="Motion Detection and Object Tracking",
15
+ page_icon="🎥",
16
+ layout="wide"
17
+ )
18
+
19
+ # Custom CSS for styling
20
+ st.markdown("""
21
+ <style>
22
+ .main-header {
23
+ font-size: 2.5rem;
24
+ font-weight: bold;
25
+ margin-bottom: 1rem;
26
+ color: #2c3e50;
27
+ }
28
+ .sub-header {
29
+ font-size: 1.5rem;
30
+ margin-bottom: 1rem;
31
+ color: #34495e;
32
+ }
33
+ .stAlert {
34
+ background-color: #f8f9fa;
35
+ border: 1px solid #e9ecef;
36
+ border-radius: 0.25rem;
37
+ }
38
+ .thumbnail-container {
39
+ display: flex;
40
+ overflow-x: auto;
41
+ padding: 10px 0;
42
+ }
43
+ .thumbnail {
44
+ width: 120px;
45
+ height: 80px;
46
+ margin-right: 10px;
47
+ border: 2px solid transparent;
48
+ }
49
+ </style>
50
+ """, unsafe_allow_html=True)
51
+
52
+ # Utility functions from backend
53
+ def f_keepLargeComponents(I, th):
54
+ """Keep only large connected components in a binary image"""
55
+ R = np.zeros(I.shape) < 0
56
+ unique_labels = np.unique(I.flatten())
57
+ for label in unique_labels:
58
+ if label == 0:
59
+ pass
60
+ else:
61
+ I2 = I == label
62
+ if np.sum(I2) > th:
63
+ R = R | I2
64
+ return np.float32(255 * R)
65
+
66
+ def convert_to_image(cv2_img):
67
+ """Convert OpenCV image to PIL Image for Streamlit display"""
68
+ img_rgb = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
69
+ return Image.fromarray(img_rgb)
70
+
71
+ def get_image_base64(img):
72
+ """Convert image to base64 for HTML display"""
73
+ buffered = io.BytesIO()
74
+ img.save(buffered, format="JPEG")
75
+ img_str = base64.b64encode(buffered.getvalue()).decode()
76
+ return img_str
77
+
78
+ def process_frame(frame, bg_subtractor, min_area):
79
+ """Process a frame with background subtraction and component filtering"""
80
+ # Resize for consistent processing
81
+ frame = cv2.resize(frame, dsize=(600, 400))
82
+
83
+ # Apply background subtraction
84
+ fgmask = bg_subtractor.apply(frame)
85
+
86
+ # Apply morphological operations
87
+ K_r = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
88
+ fgmask = cv2.morphologyEx(np.float32(fgmask), cv2.MORPH_OPEN, K_r)
89
+
90
+ # Connected components analysis
91
+ num_labels, labels_im = cv2.connectedComponents(np.array(fgmask > 0, np.uint8))
92
+
93
+ # Keep only large components
94
+ fgmask = f_keepLargeComponents(labels_im, min_area)
95
+
96
+ # Create visualization mask
97
+ F = np.zeros(frame.shape, np.uint8)
98
+ F[:, :, 0], F[:, :, 1], F[:, :, 2] = fgmask, fgmask, fgmask
99
+
100
+ # Combine original and mask
101
+ combined = np.hstack((frame, F))
102
+
103
+ has_motion = np.sum(fgmask) > 0
104
+
105
+ return frame, fgmask, combined, has_motion
106
+
107
+ # App title and description
108
+ st.markdown("<div class='main-header'>Motion Detection and Object Tracking</div>", unsafe_allow_html=True)
109
+ st.markdown("Track objects and detect motion in videos or image sequences. Upload your own video or use your webcam.")
110
+
111
+ # Sidebar for settings
112
+ st.sidebar.markdown("<div class='sub-header'>Settings</div>", unsafe_allow_html=True)
113
+
114
+ # Input source selection
115
+ input_source = st.sidebar.radio("Select Input Source", ["Upload Video", "Upload Image Sequence", "Sample Video"])
116
+
117
+ # Motion detection parameters
118
+ min_area = st.sidebar.slider("Minimum Component Area", 100, 5000, 1000, 100)
119
+ history = st.sidebar.slider("Background History", 100, 1000, 500, 50)
120
+ var_threshold = st.sidebar.slider("Variance Threshold", 5, 100, 16, 1)
121
+ detect_shadows = st.sidebar.checkbox("Detect Shadows", value=True)
122
+
123
+ # Output settings
124
+ save_output = st.sidebar.checkbox("Save Processed Frames", value=False)
125
+ min_sequence_frames = st.sidebar.number_input("Minimum Frames in Sequence", 1, 20, 5)
126
+
127
+ # Object detection option
128
+ use_object_detection = st.sidebar.checkbox("Enable Object Detection", value=False)
129
+ if use_object_detection:
130
+ st.sidebar.warning("Note: Object detection requires the cvlib package which must be installed on your Streamlit Cloud deployment.")
131
+
132
+ # Create tabs for different functionalities
133
+ tab1, tab2, tab3 = st.tabs(["Process Video", "Results", "About"])
134
+
135
+ with tab1:
136
+ # Process input based on selection
137
+ if input_source == "Upload Video":
138
+ uploaded_file = st.file_uploader("Upload a video file", type=["mp4", "avi", "mov", "mkv"])
139
+
140
+ if uploaded_file is not None:
141
+ # Save uploaded file to a temporary file
142
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
143
+ temp_file.write(uploaded_file.read())
144
+
145
+ # Create background subtractor
146
+ fgModel = cv2.createBackgroundSubtractorMOG2(history=history, varThreshold=var_threshold, detectShadows=detect_shadows)
147
+
148
+ # Process video
149
+ if st.button("Process Video"):
150
+ # Create output directory if saving results
151
+ if save_output:
152
+ output_dir = tempfile.mkdtemp()
153
+ st.session_state['output_dir'] = output_dir
154
+ st.session_state['saved_frames'] = []
155
+
156
+ # Open video capture
157
+ cap = cv2.VideoCapture(temp_file.name)
158
+
159
+ # Process frames
160
+ frame_idx = 0
161
+ motion_sequence = []
162
+ sequence_counter = 0
163
+
164
+ progress_bar = st.progress(0)
165
+ frame_display = st.empty()
166
+ status_text = st.empty()
167
+
168
+ # Get total frames for progress calculation
169
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
170
+
171
+ while cap.isOpened():
172
+ ret, frame = cap.read()
173
+ if not ret:
174
+ break
175
+
176
+ frame_idx += 1
177
+
178
+ # Process frame
179
+ original, mask, combined, has_motion = process_frame(frame, fgModel, min_area)
180
+
181
+ # Update motion sequence
182
+ if has_motion:
183
+ motion_sequence.append(original)
184
+ elif len(motion_sequence) > 0:
185
+ # Save sequence if it meets minimum length
186
+ if len(motion_sequence) >= min_sequence_frames and save_output:
187
+ sequence_counter += 1
188
+ for i, seq_frame in enumerate(motion_sequence):
189
+ frame_name = f"{sequence_counter}_{i+1}.jpg"
190
+ frame_path = os.path.join(output_dir, frame_name)
191
+
192
+ # Apply object detection if enabled
193
+ if use_object_detection:
194
+ try:
195
+ import cvlib as cv
196
+ from cvlib.object_detection import draw_bbox
197
+ bbox, labels, conf = cv.detect_common_objects(seq_frame)
198
+ seq_frame = draw_bbox(seq_frame, bbox, labels, conf)
199
+ except ImportError:
200
+ st.warning("cvlib not available. Skipping object detection.")
201
+
202
+ cv2.imwrite(frame_path, seq_frame)
203
+ st.session_state['saved_frames'].append(frame_path)
204
+
205
+ # Reset sequence
206
+ motion_sequence = []
207
+
208
+ # Display current frame
209
+ frame_display.image(convert_to_image(combined), caption="Processing: Original | Foreground Mask", use_column_width=True)
210
+
211
+ # Update progress
212
+ progress = min(frame_idx / total_frames, 1.0)
213
+ progress_bar.progress(progress)
214
+
215
+ # Display status
216
+ status_text.text(f"Processing frame {frame_idx}/{total_frames} | Motion sequences: {sequence_counter}")
217
+
218
+ # Slow down processing slightly for display
219
+ time.sleep(0.01)
220
+
221
+ # Save final sequence if any
222
+ if len(motion_sequence) >= min_sequence_frames and save_output:
223
+ sequence_counter += 1
224
+ for i, seq_frame in enumerate(motion_sequence):
225
+ frame_name = f"{sequence_counter}_{i+1}.jpg"
226
+ frame_path = os.path.join(output_dir, frame_name)
227
+
228
+ # Apply object detection if enabled
229
+ if use_object_detection:
230
+ try:
231
+ import cvlib as cv
232
+ from cvlib.object_detection import draw_bbox
233
+ bbox, labels, conf = cv.detect_common_objects(seq_frame)
234
+ seq_frame = draw_bbox(seq_frame, bbox, labels, conf)
235
+ except ImportError:
236
+ pass
237
+
238
+ cv2.imwrite(frame_path, seq_frame)
239
+ st.session_state['saved_frames'].append(frame_path)
240
+
241
+ cap.release()
242
+
243
+ # Complete
244
+ progress_bar.progress(1.0)
245
+ status_text.success(f"Processing complete! {sequence_counter} motion sequences detected.")
246
+
247
+ # Clean up
248
+ os.unlink(temp_file.name)
249
+
250
+ # Set results flag
251
+ if save_output and sequence_counter > 0:
252
+ st.session_state['has_results'] = True
253
+ st.info("Results are available in the Results tab.")
254
+
255
+ elif input_source == "Upload Image Sequence":
256
+ uploaded_files = st.file_uploader("Upload image sequence", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
257
+
258
+ if uploaded_files:
259
+ # Create background subtractor
260
+ fgModel = cv2.createBackgroundSubtractorMOG2(history=history, varThreshold=var_threshold, detectShadows=detect_shadows)
261
+
262
+ # Process images
263
+ if st.button("Process Images"):
264
+ # Create output directory if saving results
265
+ if save_output:
266
+ output_dir = tempfile.mkdtemp()
267
+ st.session_state['output_dir'] = output_dir
268
+ st.session_state['saved_frames'] = []
269
+
270
+ # Process frames
271
+ frame_idx = 0
272
+ motion_sequence = []
273
+ sequence_counter = 0
274
+
275
+ progress_bar = st.progress(0)
276
+ frame_display = st.empty()
277
+ status_text = st.empty()
278
+
279
+ total_frames = len(uploaded_files)
280
+
281
+ for file in uploaded_files:
282
+ frame_idx += 1
283
+
284
+ # Read image
285
+ file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
286
+ frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
287
+
288
+ # Reset file pointer
289
+ file.seek(0)
290
+
291
+ # Process frame
292
+ original, mask, combined, has_motion = process_frame(frame, fgModel, min_area)
293
+
294
+ # Update motion sequence
295
+ if has_motion:
296
+ motion_sequence.append(original)
297
+ elif len(motion_sequence) > 0:
298
+ # Save sequence if it meets minimum length
299
+ if len(motion_sequence) >= min_sequence_frames and save_output:
300
+ sequence_counter += 1
301
+ for i, seq_frame in enumerate(motion_sequence):
302
+ frame_name = f"{sequence_counter}_{i+1}.jpg"
303
+ frame_path = os.path.join(output_dir, frame_name)
304
+
305
+ # Apply object detection if enabled
306
+ if use_object_detection:
307
+ try:
308
+ import cvlib as cv
309
+ from cvlib.object_detection import draw_bbox
310
+ bbox, labels, conf = cv.detect_common_objects(seq_frame)
311
+ seq_frame = draw_bbox(seq_frame, bbox, labels, conf)
312
+ except ImportError:
313
+ st.warning("cvlib not available. Skipping object detection.")
314
+
315
+ cv2.imwrite(frame_path, seq_frame)
316
+ st.session_state['saved_frames'].append(frame_path)
317
+
318
+ # Reset sequence
319
+ motion_sequence = []
320
+
321
+ # Display current frame
322
+ frame_display.image(convert_to_image(combined), caption="Processing: Original | Foreground Mask", use_column_width=True)
323
+
324
+ # Update progress
325
+ progress = min(frame_idx / total_frames, 1.0)
326
+ progress_bar.progress(progress)
327
+
328
+ # Display status
329
+ status_text.text(f"Processing frame {frame_idx}/{total_frames} | Motion sequences: {sequence_counter}")
330
+
331
+ # Slow down processing slightly for display
332
+ time.sleep(0.01)
333
+
334
+ # Save final sequence if any
335
+ if len(motion_sequence) >= min_sequence_frames and save_output:
336
+ sequence_counter += 1
337
+ for i, seq_frame in enumerate(motion_sequence):
338
+ frame_name = f"{sequence_counter}_{i+1}.jpg"
339
+ frame_path = os.path.join(output_dir, frame_name)
340
+
341
+ # Apply object detection if enabled
342
+ if use_object_detection:
343
+ try:
344
+ import cvlib as cv
345
+ from cvlib.object_detection import draw_bbox
346
+ bbox, labels, conf = cv.detect_common_objects(seq_frame)
347
+ seq_frame = draw_bbox(seq_frame, bbox, labels, conf)
348
+ except ImportError:
349
+ pass
350
+
351
+ cv2.imwrite(frame_path, seq_frame)
352
+ st.session_state['saved_frames'].append(frame_path)
353
+
354
+ # Complete
355
+ progress_bar.progress(1.0)
356
+ status_text.success(f"Processing complete! {sequence_counter} motion sequences detected.")
357
+
358
+ # Set results flag
359
+ if save_output and sequence_counter > 0:
360
+ st.session_state['has_results'] = True
361
+ st.info("Results are available in the Results tab.")
362
+
363
+ else: # Sample video
364
+ st.info("Using a sample video for demonstration")
365
+
366
+ # Create sample video data - in a real app, you'd use a sample video file
367
+ sample_video = st.selectbox("Select sample video", ["Campus", "Shopping Mall", "Office Room"])
368
+
369
+ # Process sample video
370
+ if st.button("Process Sample Video"):
371
+ # Create background subtractor
372
+ fgModel = cv2.createBackgroundSubtractorMOG2(history=history, varThreshold=var_threshold, detectShadows=detect_shadows)
373
+
374
+ # Create output directory if saving results
375
+ if save_output:
376
+ output_dir = tempfile.mkdtemp()
377
+ st.session_state['output_dir'] = output_dir
378
+ st.session_state['saved_frames'] = []
379
+
380
+ # Generate some dummy frames for demonstration
381
+ total_frames = 100
382
+ progress_bar = st.progress(0)
383
+ frame_display = st.empty()
384
+ status_text = st.empty()
385
+
386
+ sequence_counter = 0
387
+ motion_sequence = []
388
+
389
+ # Simulate processing frames
390
+ for i in range(total_frames):
391
+ # Create a dummy frame with some motion
392
+ frame = np.zeros((400, 600, 3), dtype=np.uint8)
393
+
394
+ # Add some moving objects
395
+ if i % 10 < 5: # Motion every other 5 frames
396
+ cv2.circle(frame, (300 + i % 100, 200), 50, (0, 0, 255), -1)
397
+ has_motion = True
398
+ else:
399
+ has_motion = False
400
+
401
+ # Apply background subtraction (simplified for demo)
402
+ if i == 0:
403
+ fgmask = np.zeros((400, 600), dtype=np.float32)
404
+ else:
405
+ # Process frame - simplified for demo
406
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
407
+ fgmask = fgModel.apply(frame)
408
+ fgmask = np.float32(fgmask)
409
+
410
+ # Create visualization mask
411
+ F = np.zeros(frame.shape, np.uint8)
412
+ F[:, :, 0], F[:, :, 1], F[:, :, 2] = fgmask, fgmask, fgmask
413
+
414
+ # Combine original and mask
415
+ combined = np.hstack((frame, F))
416
+
417
+ # Update motion sequence
418
+ if has_motion:
419
+ motion_sequence.append(frame)
420
+ elif len(motion_sequence) > 0:
421
+ # Save sequence if it meets minimum length
422
+ if len(motion_sequence) >= min_sequence_frames and save_output:
423
+ sequence_counter += 1
424
+ for j, seq_frame in enumerate(motion_sequence):
425
+ frame_name = f"{sequence_counter}_{j+1}.jpg"
426
+ frame_path = os.path.join(output_dir, frame_name)
427
+ cv2.imwrite(frame_path, seq_frame)
428
+ st.session_state['saved_frames'].append(frame_path)
429
+
430
+ # Reset sequence
431
+ motion_sequence = []
432
+
433
+ # Display current frame
434
+ frame_display.image(convert_to_image(combined), caption="Processing: Original | Foreground Mask", use_column_width=True)
435
+
436
+ # Update progress
437
+ progress = (i + 1) / total_frames
438
+ progress_bar.progress(progress)
439
+
440
+ # Display status
441
+ status_text.text(f"Processing frame {i+1}/{total_frames} | Motion sequences: {sequence_counter}")
442
+
443
+ # Slow down processing slightly for display
444
+ time.sleep(0.05)
445
+
446
+ # Save final sequence if any
447
+ if len(motion_sequence) >= min_sequence_frames and save_output:
448
+ sequence_counter += 1
449
+ for j, seq_frame in enumerate(motion_sequence):
450
+ frame_name = f"{sequence_counter}_{j+1}.jpg"
451
+ frame_path = os.path.join(output_dir, frame_name)
452
+ cv2.imwrite(frame_path, seq_frame)
453
+ st.session_state['saved_frames'].append(frame_path)
454
+
455
+ # Complete
456
+ progress_bar.progress(1.0)
457
+ status_text.success(f"Processing complete! {sequence_counter} motion sequences detected.")
458
+
459
+ # Set results flag
460
+ if save_output and sequence_counter > 0:
461
+ st.session_state['has_results'] = True
462
+ st.info("Results are available in the Results tab.")
463
+
464
+ with tab2:
465
+ st.markdown("<div class='sub-header'>Detected Motion Sequences</div>", unsafe_allow_html=True)
466
+
467
+ if 'has_results' in st.session_state and st.session_state['has_results']:
468
+ if 'saved_frames' in st.session_state and st.session_state['saved_frames']:
469
+ # Group frames by sequence
470
+ sequences = {}
471
+ for frame_path in st.session_state['saved_frames']:
472
+ frame_name = os.path.basename(frame_path)
473
+ seq_id = frame_name.split('_')[0]
474
+
475
+ if seq_id not in sequences:
476
+ sequences[seq_id] = []
477
+
478
+ sequences[seq_id].append(frame_path)
479
+
480
+ # Display sequences
481
+ selected_sequence = st.selectbox("Select motion sequence", list(sequences.keys()))
482
+
483
+ if selected_sequence:
484
+ st.write(f"Sequence {selected_sequence} - {len(sequences[selected_sequence])} frames")
485
+
486
+ # Display thumbnails
487
+ cols = st.columns(min(5, len(sequences[selected_sequence])))
488
+ for i, (col, frame_path) in enumerate(zip(cols, sequences[selected_sequence])):
489
+ img = Image.open(frame_path)
490
+ col.image(img, caption=f"Frame {i+1}", use_column_width=True)
491
+
492
+ # Display full sequence
493
+ st.write("Full sequence:")
494
+ sequence_frames = []
495
+ for frame_path in sequences[selected_sequence]:
496
+ img = Image.open(frame_path)
497
+ sequence_frames.append(np.array(img))
498
+
499
+ # Create animated GIF option
500
+ if st.button("Create GIF from Sequence"):
501
+ with st.spinner("Creating GIF..."):
502
+ # Create a temporary file for the GIF
503
+ with tempfile.NamedTemporaryFile(suffix='.gif', delete=False) as temp_gif:
504
+ gif_path = temp_gif.name
505
+
506
+ # Convert frames to GIF
507
+ images = [Image.fromarray(frame) for frame in sequence_frames]
508
+ images[0].save(
509
+ gif_path,
510
+ save_all=True,
511
+ append_images=images[1:],
512
+ duration=200,
513
+ loop=0
514
+ )
515
+
516
+ # Display the GIF
517
+ with open(gif_path, 'rb') as gif_file:
518
+ gif_data = gif_file.read()
519
+
520
+ st.image(gif_data, caption="Motion Sequence GIF")
521
+
522
+ # Provide download link
523
+ st.download_button(
524
+ label="Download GIF",
525
+ data=gif_data,
526
+ file_name=f"sequence_{selected_sequence}.gif",
527
+ mime="image/gif"
528
+ )
529
+
530
+ # Clean up
531
+ os.unlink(gif_path)
532
+
533
+ # Display stacked view
534
+ st.write("All frames in sequence:")
535
+ for i, frame in enumerate(sequence_frames):
536
+ st.image(frame, caption=f"Frame {i+1}", use_column_width=True)
537
+ else:
538
+ st.info("No results available yet. Process a video or image sequence with 'Save Processed Frames' enabled.")
539
+
540
+ with tab3:
541
+ st.markdown("<div class='sub-header'>About this application</div>", unsafe_allow_html=True)
542
+
543
+ st.markdown("""
544
+ This application implements motion detection and object tracking using background subtraction and connected component analysis.
545
+
546
+ ### Features:
547
+ - Process videos or image sequences to detect motion
548
+ - Background subtraction using the MOG2 algorithm
549
+ - Connected component filtering to remove noise
550
+ - Optional object detection (requires cvlib)
551
+ - Save and display motion sequences
552
+ - Export sequences as GIFs
553
+
554
+ ### How to use:
555
+ 1. Select an input source (upload video, image sequence, or use sample)
556
+ 2. Adjust parameters in the sidebar
557
+ 3. Process the input
558
+ 4. View results in the Results tab
559
+
560
+ ### Requirements for deployment:
561
+ ```
562
+ streamlit
563
+ opencv-python-headless
564
+ numpy
565
+ pillow
566
+ matplotlib
567
+ ```
568
+
569
+ For object detection functionality, also install:
570
+ ```
571
+ cvlib
572
+ tensorflow
573
+ ```
574
+
575
+ ### How it works:
576
+ The application uses background subtraction to identify moving objects in a video sequence. It then applies connected component analysis to filter out small noise components and track larger moving objects. Optionally, it can apply object detection to identify the types of objects detected.
577
+ """)
578
+
579
+ # Show parameters explanation
580
+ st.markdown("### Parameter Explanation:")
581
+
582
+ st.markdown("""
583
+ - **Minimum Component Area**: The minimum size (in pixels) of connected components to keep. Smaller components are filtered out as noise.
584
+ - **Background History**: Number of frames used to build the background model in MOG2.
585
+ - **Variance Threshold**: Threshold on the squared Mahalanobis distance to decide whether a pixel is foreground or background.
586
+ - **Detect Shadows**: Enable shadow detection in MOG2 algorithm.
587
+ - **Minimum Frames in Sequence**: The minimum number of consecutive frames with motion to save as a sequence.
588
+ """)
589
+
590
+ # Add deployment instructions
591
+ st.markdown("### Deployment to Streamlit Cloud:")
592
+
593
+ st.markdown("""
594
+ To deploy this application to Streamlit Cloud:
595
+
596
+ 1. Create a GitHub repository with this code
597
+ 2. Include a `requirements.txt` file with the necessary dependencies
598
+ 3. Connect your GitHub repository to Streamlit Cloud
599
+ 4. Deploy the application
600
+
601
+ Example `requirements.txt`:
602
+ ```
603
+ streamlit==1.24.0
604
+ opencv-python-headless==4.7.0.72
605
+ numpy==1.24.3
606
+ pillow==9.5.0
607
+ matplotlib==3.7.1
608
+ ```
609
+ """)