Spaces:
Runtime error
Runtime error
Update video_detection.py
Browse files- video_detection.py +47 -38
video_detection.py
CHANGED
@@ -53,49 +53,58 @@ def video_detection(cap):
|
|
53 |
frame = cv2.resize( frame, (0,0), fx=0.5, fy=0.5)
|
54 |
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
55 |
#if n_frames % SUBSAMPLE == 0:
|
56 |
-
batch.append(frame)
|
57 |
#if len(batch) == 2 * desired_fps:
|
58 |
-
if len(batch) == 4:
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
92 |
segment_file.release()
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
name = f"output_{uuid.uuid4()}.mp4"
|
97 |
segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
iterating, frame = cap.read()
|
100 |
n_frames += 1
|
101 |
"""
|
|
|
53 |
frame = cv2.resize( frame, (0,0), fx=0.5, fy=0.5)
|
54 |
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
55 |
#if n_frames % SUBSAMPLE == 0:
|
56 |
+
#batch.append(frame)
|
57 |
#if len(batch) == 2 * desired_fps:
|
58 |
+
#if len(batch) == 4:
|
59 |
+
#inputs = image_processor(images=batch, return_tensors="pt").to("cuda")
|
60 |
+
|
61 |
+
print(f"starting batch of size {len(batch)}")
|
62 |
+
start = time.time()
|
63 |
+
#with torch.no_grad():
|
64 |
+
# outputs = model(**inputs)
|
65 |
+
results = model(frame, stream=True)
|
66 |
+
end = time.time()
|
67 |
+
print("time taken for inference", end - start)
|
68 |
+
|
69 |
+
start = time.time()
|
70 |
+
#boxes = image_processor.post_process_object_detection(
|
71 |
+
# outputs,
|
72 |
+
# target_sizes=torch.tensor([(height, width)] * len(batch)),
|
73 |
+
# threshold=conf_threshold)
|
74 |
+
"""
|
75 |
+
for i, (array, box) in enumerate(zip(batch, boxes)):
|
76 |
+
pil_image = draw_bounding_boxes(Image.fromarray(array), box, model, conf_threshold)
|
77 |
+
frame = np.array(pil_image)
|
78 |
+
# Convert RGB to BGR
|
79 |
+
frame = frame[:, :, ::-1].copy()
|
80 |
+
segment_file.write(frame)
|
81 |
+
"""
|
82 |
+
for i, r in enumerate(results):
|
83 |
+
# Plot results image
|
84 |
+
im_bgr = r.plot() # BGR-order numpy array
|
85 |
+
im_rgb = Image.fromarray(im_bgr[..., ::-1]) # RGB-order PIL image
|
86 |
+
frame = np.array(im_rgb)
|
87 |
+
# Convert RGB to BGR
|
88 |
+
frame = frame[:, :, ::-1].copy()
|
89 |
+
segment_file.write(frame)
|
90 |
+
|
91 |
+
if n_frames == 3 * fps:
|
92 |
+
n_chunks += 1
|
93 |
segment_file.release()
|
94 |
+
n_frames = 0
|
95 |
+
yield frame, name
|
96 |
+
#name = f"output_{n_chunks}{'.mp4' if stream_as_mp4 else '.ts'}"
|
97 |
name = f"output_{uuid.uuid4()}.mp4"
|
98 |
segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore
|
99 |
|
100 |
+
#batch = []
|
101 |
+
#segment_file.release()
|
102 |
+
#yield None, name
|
103 |
+
#end = time.time()
|
104 |
+
#print("time taken for processing boxes", end - start)
|
105 |
+
#name = f"output_{uuid.uuid4()}.mp4"
|
106 |
+
#segment_file = cv2.VideoWriter(name, video_codec, fps, (width, height)) # type: ignore
|
107 |
+
|
108 |
iterating, frame = cap.read()
|
109 |
n_frames += 1
|
110 |
"""
|