aidas-79 commited on
Commit
3d4164f
·
verified ·
1 Parent(s): 44aa803

update file

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -0
  2. car.mp4 +3 -0
  3. main.py +53 -0
  4. openh264-1.8.0-win64.dll +3 -0
  5. output_video.mp4 +3 -0
  6. video_detection.py +58 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ car.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ openh264-1.8.0-win64.dll filter=lfs diff=lfs merge=lfs -text
38
+ output_video.mp4 filter=lfs diff=lfs merge=lfs -text
car.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7f7287faa231cf287c433ef8b2f38949e214cac9c473aea286d75f2bdea2330
3
+ size 4708643
main.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import validators
4
+ from cap_from_youtube import cap_from_youtube
5
+
6
+ import video_detection
7
+ from video_detection import video_detection
8
+
9
+
10
+ def preprocess_input(input):
11
+ # if not input:
12
+ # cap = cv2.VideoCapture(0)
13
+ # yield from video_detection(cap)
14
+ if validators.url(input):
15
+ if 'youtu' in input:
16
+ cap = cap_from_youtube(input, resolution='720p')
17
+ yield from video_detection(cap)
18
+ else:
19
+ print("Invalid URL")
20
+ else:
21
+ cap = cv2.VideoCapture(input)
22
+ yield from video_detection(cap)
23
+
24
+ # gradio interface
25
+ input_video = gr.Video(label="Input Video")
26
+ input_url = gr.Textbox(label="Input URL", placeholder="Enter URL")
27
+ output_frames_1 = gr.Image(label="Output Frames")
28
+ output_video_file_1 = gr.Video(label="Output video")
29
+ output_frames_2 = gr.Image(label="Output Frames")
30
+ output_video_file_2 = gr.Video(label="Output video")
31
+ # sample_video=r'sample/car.mp4'
32
+
33
+ file_tab = gr.Interface(
34
+ fn=preprocess_input,
35
+ inputs=[input_video],
36
+ outputs=[output_frames_1, output_video_file_1],
37
+ title=f"Завантажте файл для розпізнавання",
38
+ allow_flagging="never",
39
+ examples=[["car.mp4"]],
40
+ )
41
+
42
+ url_tab = gr.Interface(
43
+ fn=preprocess_input,
44
+ inputs=[input_url],
45
+ outputs=[output_frames_2, output_video_file_2],
46
+ title=f"Введіть URL Youtube відео для розпізнавання",
47
+ allow_flagging="never",
48
+ examples=[["car.mp4"]],
49
+ )
50
+
51
+ app = gr.TabbedInterface([file_tab, url_tab], ["Завантажити файл", "Ввести URL"])
52
+
53
+ app.launch()
openh264-1.8.0-win64.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a5cf05b673a17ebfe95ac6b479607cf4df2289fe4a6d5f3d3ff09aa8f56192
3
+ size 825160
output_video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d28471513da5bc1cba0b90a35965c665a52804d456185201c2c36e4ef2022dc
3
+ size 29715980
video_detection.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import cv2
3
+
4
+ model = YOLO("models/yolov8m.pt")
5
+
6
+ def draw_boxes(frame, results):
7
+ for r in results:
8
+ boxes = r.boxes
9
+
10
+ for box in boxes:
11
+ x1, y1, x2, y2 = box.xyxy[0]
12
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
13
+
14
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 255), 3)
15
+
16
+ cls = r.names[box.cls[0].item()]
17
+
18
+ # object details
19
+ org = [x1, y1]
20
+ font = cv2.FONT_HERSHEY_SIMPLEX
21
+ fontScale = 1
22
+ color = (255, 0, 0)
23
+ thickness = 2
24
+
25
+ cv2.putText(frame, cls, org, font, fontScale, color, thickness)
26
+
27
+ return frame
28
+
29
+
30
+ def video_detection(cap):
31
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
32
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
33
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
34
+
35
+ out = cv2.VideoWriter('output_video.mp4', cv2.VideoWriter_fourcc(*'h264'), fps, (frame_width, frame_height))
36
+
37
+ count = 0
38
+ while cap.isOpened():
39
+ success, frame = cap.read()
40
+
41
+ if not success:
42
+ break
43
+
44
+ results = model(frame, stream=True, device='cuda', verbose=False)
45
+
46
+ frame = draw_boxes(frame, results)
47
+
48
+ out.write(frame)
49
+ if not count % 10:
50
+ yield frame, None
51
+ # print(count)
52
+ count += 1
53
+
54
+ cap.release()
55
+ out.release()
56
+ cv2.destroyAllWindows()
57
+
58
+ yield None, 'output_video.mp4'