aidas-79 commited on
Commit
eae416f
·
verified ·
1 Parent(s): cde077a

Update video_detection.py

Browse files
Files changed (1) hide show
  1. video_detection.py +49 -7
video_detection.py CHANGED
@@ -6,6 +6,7 @@ import numpy as np
6
  import uuid
7
 
8
  model = YOLO("model/yolo11n_6-2-25.pt")
 
9
 
10
  def draw_boxes(frame, results):
11
  for r in results:
@@ -31,12 +32,6 @@ def draw_boxes(frame, results):
31
  return frame
32
 
33
  def video_detection(cap):
34
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
35
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
36
- fps = int(cap.get(cv2.CAP_PROP_FPS))
37
-
38
- cap = cv2.VideoCapture(video)
39
-
40
  video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
41
  fps = int(cap.get(cv2.CAP_PROP_FPS))
42
 
@@ -51,7 +46,54 @@ def video_detection(cap):
51
  name = f"output_{uuid.uuid4()}.mp4"
52
  segment_file = cv2.VideoWriter(name, video_codec, desired_fps, (width, height)) # type: ignore
53
  batch = []
54
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  """
56
  #@spaces.GPU
57
  def video_detection(cap):
 
6
  import uuid
7
 
8
  model = YOLO("model/yolo11n_6-2-25.pt")
9
+ SUBSAMPLE = 2
10
 
11
  def draw_boxes(frame, results):
12
  for r in results:
 
32
  return frame
33
 
34
  def video_detection(cap):
 
 
 
 
 
 
35
  video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
36
  fps = int(cap.get(cv2.CAP_PROP_FPS))
37
 
 
46
  name = f"output_{uuid.uuid4()}.mp4"
47
  segment_file = cv2.VideoWriter(name, video_codec, desired_fps, (width, height)) # type: ignore
48
  batch = []
49
+ while iterating:
50
+ frame = cv2.resize( frame, (0,0), fx=0.5, fy=0.5)
51
+ #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
52
+ if n_frames % SUBSAMPLE == 0:
53
+ batch.append(frame)
54
+ if len(batch) == 2 * desired_fps:
55
+ #inputs = image_processor(images=batch, return_tensors="pt").to("cuda")
56
+
57
+ print(f"starting batch of size {len(batch)}")
58
+ start = time.time()
59
+ #with torch.no_grad():
60
+ # outputs = model(**inputs)
61
+ results = model(source=batch, stream=True)
62
+ end = time.time()
63
+ print("time taken for inference", end - start)
64
+
65
+ start = time.time()
66
+ #boxes = image_processor.post_process_object_detection(
67
+ # outputs,
68
+ # target_sizes=torch.tensor([(height, width)] * len(batch)),
69
+ # threshold=conf_threshold)
70
+ """
71
+ for i, (array, box) in enumerate(zip(batch, boxes)):
72
+ pil_image = draw_bounding_boxes(Image.fromarray(array), box, model, conf_threshold)
73
+ frame = np.array(pil_image)
74
+ # Convert RGB to BGR
75
+ frame = frame[:, :, ::-1].copy()
76
+ segment_file.write(frame)
77
+ """
78
+ for i, r in enumerate(results):
79
+ # Plot results image
80
+ im_bgr = r.plot() # BGR-order numpy array
81
+ im_rgb = Image.fromarray(im_bgr[..., ::-1]) # RGB-order PIL image
82
+ frame = np.array(im_rgb)
83
+ # Convert RGB to BGR
84
+ frame = frame[:, :, ::-1].copy()
85
+ segment_file.write(frame)
86
+
87
+ batch = []
88
+ segment_file.release()
89
+ yield name
90
+ end = time.time()
91
+ print("time taken for processing boxes", end - start)
92
+ name = f"output_{uuid.uuid4()}.mp4"
93
+ segment_file = cv2.VideoWriter(name, video_codec, desired_fps, (width, height)) # type: ignore
94
+
95
+ iterating, frame = cap.read()
96
+ n_frames += 1
97
  """
98
  #@spaces.GPU
99
  def video_detection(cap):