Spaces:
Sleeping
Sleeping
Upload 16 files
Browse files- .gitattributes +5 -0
- README.md +14 -0
- app.py +140 -0
- data/alert_response.mp4 +3 -0
- data/drone_day.mp4 +3 -0
- data/night_intrusion.mp4 +3 -0
- data/shadow_dust_issue.mp4 +3 -0
- data/thermal_hotspot.mp4 +3 -0
- gitattributes +45 -0
- requirements.txt +8 -0
- services/detection_service.py +37 -0
- services/metrics_service.py +6 -0
- services/overlay_service.py +7 -0
- services/salesforce_dispatcher.py +24 -0
- services/shadow_detection.py +5 -0
- services/thermal_service.py +23 -0
- services/video_service.py +33 -0
.gitattributes
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data/alert_response.mp4 filter=lfs diff=lfs merge=lfs -text
|
2 |
+
data/drone_day.mp4 filter=lfs diff=lfs merge=lfs -text
|
3 |
+
data/night_intrusion.mp4 filter=lfs diff=lfs merge=lfs -text
|
4 |
+
data/shadow_dust_issue.mp4 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
data/thermal_hotspot.mp4 filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Solar Surveillance Poc
|
3 |
+
emoji: π
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.26.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
short_description: Solar Panels Suveillance using Tech & AI
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
import random
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
import numpy as np
|
8 |
+
from datetime import datetime
|
9 |
+
from services.video_service import get_next_video_frame, reset_video_index
|
10 |
+
from services.thermal_service import detect_thermal_anomalies
|
11 |
+
from services.overlay_service import overlay_boxes
|
12 |
+
from services.metrics_service import update_metrics
|
13 |
+
|
14 |
+
# Globals
|
15 |
+
paused = False
|
16 |
+
frame_rate = 1
|
17 |
+
frame_count = 0
|
18 |
+
log_entries = []
|
19 |
+
anomaly_counts = []
|
20 |
+
last_frame = None
|
21 |
+
last_metrics = {}
|
22 |
+
last_timestamp = ""
|
23 |
+
last_detected_images = []
|
24 |
+
|
25 |
+
# Constants
|
26 |
+
TEMP_IMAGE_PATH = "temp.jpg"
|
27 |
+
CAPTURED_FRAMES_DIR = "captured_frames"
|
28 |
+
os.makedirs(CAPTURED_FRAMES_DIR, exist_ok=True)
|
29 |
+
|
30 |
+
# Core monitor function
|
31 |
+
def monitor_feed():
|
32 |
+
global paused, frame_count, last_frame, last_metrics, last_timestamp
|
33 |
+
|
34 |
+
if paused and last_frame is not None:
|
35 |
+
frame = last_frame.copy()
|
36 |
+
metrics = last_metrics.copy()
|
37 |
+
else:
|
38 |
+
frame = get_next_video_frame()
|
39 |
+
detected_boxes = detect_thermal_anomalies(frame)
|
40 |
+
frame = overlay_boxes(frame, detected_boxes)
|
41 |
+
cv2.imwrite(TEMP_IMAGE_PATH, frame, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
|
42 |
+
metrics = update_metrics(detected_boxes)
|
43 |
+
|
44 |
+
frame_count += 1
|
45 |
+
last_timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
46 |
+
|
47 |
+
if detected_boxes:
|
48 |
+
captured_frame_path = os.path.join(CAPTURED_FRAMES_DIR, f"frame_{frame_count}.jpg")
|
49 |
+
cv2.imwrite(captured_frame_path, frame)
|
50 |
+
last_detected_images.append(captured_frame_path)
|
51 |
+
if len(last_detected_images) > 5:
|
52 |
+
last_detected_images.pop(0)
|
53 |
+
|
54 |
+
last_frame = frame.copy()
|
55 |
+
last_metrics = metrics.copy()
|
56 |
+
|
57 |
+
frame = cv2.resize(last_frame, (640, 480))
|
58 |
+
cv2.putText(frame, f"Frame: {frame_count}", (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
59 |
+
cv2.putText(frame, f"{last_timestamp}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
|
60 |
+
|
61 |
+
anomaly_detected = len(last_metrics.get('anomalies', []))
|
62 |
+
log_entries.append(f"{last_timestamp} - Frame {frame_count} - Anomalies: {anomaly_detected}")
|
63 |
+
anomaly_counts.append(anomaly_detected)
|
64 |
+
|
65 |
+
if len(log_entries) > 100:
|
66 |
+
log_entries.pop(0)
|
67 |
+
# if len(anomaly_counts) > 100:
|
68 |
+
# anomaly_counts.pop(0)
|
69 |
+
|
70 |
+
metrics_str = "\n".join([f"{k}: {v}" for k, v in last_metrics.items()])
|
71 |
+
|
72 |
+
return frame[:, :, ::-1], metrics_str, "\n".join(log_entries[-10:]), generate_chart(), last_detected_images
|
73 |
+
|
74 |
+
# Chart generator
|
75 |
+
def generate_chart():
|
76 |
+
fig, ax = plt.subplots(figsize=(4, 2))
|
77 |
+
ax.plot(anomaly_counts[-50:], marker='o')
|
78 |
+
ax.set_title("Anomalies Over Time")
|
79 |
+
ax.set_xlabel("Frame")
|
80 |
+
ax.set_ylabel("Count")
|
81 |
+
fig.tight_layout()
|
82 |
+
chart_path = "chart_temp.png"
|
83 |
+
fig.savefig(chart_path)
|
84 |
+
plt.close(fig)
|
85 |
+
return chart_path
|
86 |
+
|
87 |
+
# Gradio UI
|
88 |
+
with gr.Blocks(theme=gr.themes.Soft()) as app:
|
89 |
+
gr.Markdown("# π Thermal Anomaly Monitoring Dashboard", elem_id="main-title")
|
90 |
+
|
91 |
+
status_text = gr.Markdown("**Status:** π’ Running", elem_id="status-banner")
|
92 |
+
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column(scale=3):
|
95 |
+
video_output = gr.Image(label="Live Video Feed", elem_id="video-feed", width=640, height=480)
|
96 |
+
with gr.Column(scale=1):
|
97 |
+
metrics_output = gr.Textbox(label="Live Metrics", lines=5)
|
98 |
+
|
99 |
+
with gr.Row():
|
100 |
+
with gr.Column():
|
101 |
+
logs_output = gr.Textbox(label="Live Logs", lines=10)
|
102 |
+
with gr.Column():
|
103 |
+
chart_output = gr.Image(label="Detection Trends")
|
104 |
+
|
105 |
+
with gr.Row():
|
106 |
+
captured_images = gr.Gallery(label="Last 5 Captured Events", columns=1, height="auto")
|
107 |
+
|
108 |
+
with gr.Row():
|
109 |
+
pause_btn = gr.Button("βΈοΈ Pause")
|
110 |
+
resume_btn = gr.Button("βΆοΈ Resume")
|
111 |
+
frame_slider = gr.Slider(0.0005, 0.5, value=1, label="Frame Interval (seconds)")
|
112 |
+
|
113 |
+
def toggle_pause():
|
114 |
+
global paused
|
115 |
+
paused = True
|
116 |
+
return "**Status:** βΈοΈ Paused"
|
117 |
+
|
118 |
+
def toggle_resume():
|
119 |
+
global paused
|
120 |
+
paused = False
|
121 |
+
return "**Status:** π’ Running"
|
122 |
+
|
123 |
+
def set_frame_rate(val):
|
124 |
+
global frame_rate
|
125 |
+
frame_rate = val
|
126 |
+
|
127 |
+
pause_btn.click(toggle_pause, outputs=status_text)
|
128 |
+
resume_btn.click(toggle_resume, outputs=status_text)
|
129 |
+
frame_slider.change(set_frame_rate, inputs=[frame_slider])
|
130 |
+
|
131 |
+
def streaming_loop():
|
132 |
+
while True:
|
133 |
+
frame, metrics, logs, chart, captured = monitor_feed()
|
134 |
+
yield frame, metrics, logs, chart, captured
|
135 |
+
time.sleep(frame_rate)
|
136 |
+
|
137 |
+
app.load(streaming_loop, outputs=[video_output, metrics_output, logs_output, chart_output, captured_images])
|
138 |
+
|
139 |
+
if __name__ == "__main__":
|
140 |
+
app.launch(share=True)
|
data/alert_response.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90be2a22be37eec0ff56fc17b15c3472c91581b08276bd9c986143ce65199ecb
|
3 |
+
size 2874687
|
data/drone_day.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02277520b32f06d332647c11687e8642efb95b7808a37c5e8d62fb289e128b0e
|
3 |
+
size 97974586
|
data/night_intrusion.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6bc3952af22c9688512f650e668996e0e328a04c3f407b73860f1181da610f8
|
3 |
+
size 2974359
|
data/shadow_dust_issue.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acd094f281fa7fb35ff8425afb7f820a30be44cc9450709d854a0271ea35038b
|
3 |
+
size 231805841
|
data/thermal_hotspot.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e2e2fcc90f9a894ed226192936b92170f9f77b635c2cf257c61f16f51fe0b18
|
3 |
+
size 3349808
|
gitattributes
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
services/alert_response.mp4 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
services/drone_day.mp4 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
services/night_intrusion.mp4 filter=lfs diff=lfs merge=lfs -text
|
39 |
+
services/shadow_dust_issue.mp4 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
services/thermal_hotspot.mp4 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
data/alert_response.mp4 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
data/drone_day.mp4 filter=lfs diff=lfs merge=lfs -text
|
43 |
+
data/night_intrusion.mp4 filter=lfs diff=lfs merge=lfs -text
|
44 |
+
data/shadow_dust_issue.mp4 filter=lfs diff=lfs merge=lfs -text
|
45 |
+
data/thermal_hotspot.mp4 filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.10.0
|
2 |
+
transformers==4.39.3
|
3 |
+
torch>=2.1.0
|
4 |
+
opencv-python-headless
|
5 |
+
requests
|
6 |
+
ultralytics==8.0.176
|
7 |
+
timm>=0.9.2
|
8 |
+
|
services/detection_service.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
from transformers import pipeline
|
3 |
+
import cv2
|
4 |
+
|
5 |
+
object_detector = pipeline("object-detection", model="facebook/detr-resnet-50")
|
6 |
+
|
7 |
+
def detect_objects(image_path):
|
8 |
+
image = cv2.imread(image_path)
|
9 |
+
results = object_detector(image)
|
10 |
+
return [r for r in results if r['score'] > 0.7]
|
11 |
+
|
12 |
+
'''
|
13 |
+
|
14 |
+
# services/detection_service.py
|
15 |
+
|
16 |
+
from transformers import pipeline
|
17 |
+
from PIL import Image
|
18 |
+
|
19 |
+
# β
Load Hugging Face DETR pipeline properly
|
20 |
+
object_detector = pipeline("object-detection", model="facebook/detr-resnet-50")
|
21 |
+
|
22 |
+
def detect_objects(image_path):
|
23 |
+
"""
|
24 |
+
Detect objects using Hugging Face DETR pipeline.
|
25 |
+
- Accepts a file path to a local image.
|
26 |
+
- Converts image to PIL format.
|
27 |
+
- Feeds into Hugging Face detector.
|
28 |
+
- Returns list of high-confidence detections.
|
29 |
+
"""
|
30 |
+
# β
Correct way: Open image properly
|
31 |
+
image = Image.open(image_path).convert("RGB")
|
32 |
+
|
33 |
+
# β
Run inference
|
34 |
+
results = object_detector(image)
|
35 |
+
|
36 |
+
# β
Return only high-confidence detections
|
37 |
+
return [r for r in results if r['score'] > 0.7]
|
services/metrics_service.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def update_metrics(detected_boxes):
|
2 |
+
anomalies = detected_boxes if detected_boxes else []
|
3 |
+
return {
|
4 |
+
"anomalies": anomalies,
|
5 |
+
"total_detected": len(anomalies),
|
6 |
+
}
|
services/overlay_service.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
|
3 |
+
def overlay_boxes(frame, boxes):
|
4 |
+
for box in boxes:
|
5 |
+
x_min, y_min, x_max, y_max = map(int, box)
|
6 |
+
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 0, 255), 2)
|
7 |
+
return frame
|
services/salesforce_dispatcher.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# services/salesforce_dispatcher.py
|
2 |
+
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
+
|
6 |
+
SALESFORCE_WEBHOOK_URL = "https://your-salesforce-instance/services/web-to-case"
|
7 |
+
|
8 |
+
def send_to_salesforce(payload):
|
9 |
+
"""
|
10 |
+
FAKE Salesforce integration β DISABLED in HuggingFace Space demo.
|
11 |
+
Replace SALESFORCE_WEBHOOK_URL later with real endpoint during production setup.
|
12 |
+
"""
|
13 |
+
print("π [Salesforce Dispatch Simulated] Would have sent:", payload)
|
14 |
+
# Commenting actual POST request to avoid Space crash
|
15 |
+
#
|
16 |
+
# alert_type = "Intrusion" if any(d["label"] == "person" for d in payload["detections"]) else "Anomaly"
|
17 |
+
# summary = {
|
18 |
+
# "Alert_Type__c": alert_type,
|
19 |
+
# "ThermalFlag__c": payload["thermal"],
|
20 |
+
# "ShadowFlag__c": payload["shadow_issue"],
|
21 |
+
# "Confidence_Score__c": max([d["score"] for d in payload["detections"]], default=0)
|
22 |
+
# }
|
23 |
+
# headers = {"Content-Type": "application/json"}
|
24 |
+
# requests.post(SALESFORCE_WEBHOOK_URL, json=summary, headers=headers)
|
services/shadow_detection.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
|
3 |
+
def detect_shadow_coverage(image_path):
|
4 |
+
shadow_percent = random.randint(25, 40)
|
5 |
+
return shadow_percent > 30
|
services/thermal_service.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import DetrImageProcessor, DetrForObjectDetection
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
# Load model
|
7 |
+
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
|
8 |
+
model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
|
9 |
+
|
10 |
+
def detect_thermal_anomalies(frame):
|
11 |
+
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
12 |
+
inputs = processor(images=image, return_tensors="pt")
|
13 |
+
outputs = model(**inputs)
|
14 |
+
|
15 |
+
target_sizes = torch.tensor([image.size[::-1]])
|
16 |
+
results = processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[0]
|
17 |
+
|
18 |
+
boxes = []
|
19 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
20 |
+
if score >= 0.9:
|
21 |
+
boxes.append(box.tolist())
|
22 |
+
|
23 |
+
return boxes
|
services/video_service.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Global state
|
5 |
+
VIDEO_DIR = "data"
|
6 |
+
video_files = [os.path.join(VIDEO_DIR, file) for file in sorted(os.listdir(VIDEO_DIR)) if file.endswith((".mp4", ".avi"))]
|
7 |
+
video_index = 0
|
8 |
+
cap = None
|
9 |
+
|
10 |
+
def get_next_video_frame():
|
11 |
+
global cap, video_index
|
12 |
+
|
13 |
+
if not video_files:
|
14 |
+
raise RuntimeError("No video files found in the 'data' directory.")
|
15 |
+
|
16 |
+
if cap is None or not cap.isOpened():
|
17 |
+
cap = cv2.VideoCapture(video_files[video_index])
|
18 |
+
|
19 |
+
ret, frame = cap.read()
|
20 |
+
|
21 |
+
if not ret:
|
22 |
+
cap.release()
|
23 |
+
video_index = (video_index + 1) % len(video_files)
|
24 |
+
cap = cv2.VideoCapture(video_files[video_index])
|
25 |
+
ret, frame = cap.read()
|
26 |
+
if not ret:
|
27 |
+
raise RuntimeError(f"Cannot read video {video_files[video_index]}")
|
28 |
+
|
29 |
+
return frame
|
30 |
+
|
31 |
+
def reset_video_index():
|
32 |
+
global video_index
|
33 |
+
video_index = 0
|