File size: 4,356 Bytes
668d89d
 
 
 
1851cdf
668d89d
 
d65319e
668d89d
645663e
7ed4f93
645663e
5b85a92
16b8b27
668d89d
5b85a92
9e299ab
 
 
 
 
16b8b27
 
 
 
 
 
ad8d6d5
39f55d8
30fec0d
39f55d8
 
 
 
e15c356
30fec0d
 
 
39f55d8
c4eb26f
ad8d6d5
9e299ab
 
 
 
 
16b8b27
 
 
 
 
 
9e299ab
30fec0d
9e299ab
 
 
 
 
 
 
 
 
 
 
 
 
 
9a56c59
9e299ab
 
 
 
 
 
39f55d8
ad8d6d5
9e299ab
16b8b27
 
9e299ab
ad8d6d5
 
c4eb26f
d3923d5
acba7af
85f5145
acba7af
c4eb26f
85f5145
 
c4eb26f
 
d3923d5
06f9709
acba7af
85f5145
 
 
 
 
 
 
acba7af
85f5145
 
c4eb26f
2b3685e
 
a28d86d
 
 
 
 
 
2b3685e
c4eb26f
eceaf18
c4eb26f
 
acba7af
b3818eb
06f9709
9e299ab
30fec0d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import gradio as gr
import torch
import cv2
import os
import shutil

# Load the model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt')

def detect_image(image):
    results = model(image)
    return results.render()[0]


def detect_video(video_path):
    video = cv2.VideoCapture(video_path)
    frame_rate = video.get(cv2.CAP_PROP_FPS)
    
    # Create a directory to store the frames
    frames_dir = 'frames'
    os.makedirs(frames_dir, exist_ok=True)
    
    # Remove existing contents of the frames directory
    for file_name in os.listdir(frames_dir):
        file_path = os.path.join(frames_dir, file_name)
        if os.path.isfile(file_path):
            os.remove(file_path)

    frame_count = 0
    process_frame_count = 0
    while True:
        success, frame = video.read()
        if not success:
            break
        if frame_count % 2 == 0:  # Process every 2nd frame (adjust as needed)
            frame_output_path = os.path.join(frames_dir, f'frame_{process_frame_count:04d}.jpg')
            cv2.imwrite(frame_output_path, frame)
            process_frame_count += 1
        frame_count += 1

    video.release()
    cv2.destroyAllWindows()

    # Process the frames with object detection and save the results
    results_dir = 'results'
    os.makedirs(results_dir, exist_ok=True)
    
    # Remove existing contents of the results directory
    for file_name in os.listdir(results_dir):
        file_path = os.path.join(results_dir, file_name)
        if os.path.isfile(file_path):
            os.remove(file_path)

    for i in range(process_frame_count):
        frame_path = os.path.join(frames_dir, f'frame_{i:04d}.jpg')
        frame = cv2.imread(frame_path)
        results = model(frame)
        results_output_path = os.path.join(results_dir, f'results_{i:04d}.jpg')
        cv2.imwrite(results_output_path, results.render()[0])

    # Create the output video from the processed frames
    frame_files = sorted(os.listdir(results_dir))
    frame_path = os.path.join(results_dir, frame_files[0])
    frame = cv2.imread(frame_path)
    height, width, _ = frame.shape

    video_output_path = 'output_video.mp4'  # Replace with your desired output video path
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # You can change the codec as needed
    video_writer = cv2.VideoWriter(video_output_path, fourcc, frame_rate/2, (width, height))  # Adjust frame rate

    for frame_file in frame_files:
        frame_path = os.path.join(results_dir, frame_file)
        frame = cv2.imread(frame_path)
        video_writer.write(frame)

    video_writer.release()

    # Clean up the temporary directories
    shutil.rmtree(frames_dir)
    shutil.rmtree(results_dir)

    return video_output_path

# Create Gradio interfaces for different modes
img_interface = gr.Interface(
    fn=detect_image,
    examples=[os.path.join(os.path.abspath(''), "plastic_bottles1.jpg")],
    inputs=gr.inputs.Image(source="upload"),
    outputs="image",
    title="Image",
    cache_examples=True
)

vid_interface = gr.Interface(
    fn=detect_video,
    inputs=gr.inputs.Video(source="upload"),
    examples=[
        os.path.join(os.path.abspath(''), "a-plastic-bag-is-floating-in-a-sea-ocean-plastic-pollution-big-environmental-p-SBV-347235576-preview.mp4"),
        os.path.join(os.path.abspath(''), "hand-woman-in-yellow-gloves-picking-up-empty-plastic-bottles-cleaning-on-the-b-SBV-346452144-preview.mp4"),
        os.path.join(os.path.abspath(''), "plastic-bottle-being-dumped-in-to-the-sea-ocean-pollution-in-ocean-is-a-big-en-SBV-347235586-preview.mp4"),
        os.path.join(os.path.abspath(''), "pollution-garbages-plastic-and-wastes-on-the-beach-after-winter-storms-SBV-331355306-preview.mp4"),
        os.path.join(os.path.abspath(''), "volunteer-woman-picking-plastic-bottle-into-trash-plastic-bag-black-for-cleani-SBV-346871657-preview.mp4")
    ],
    outputs="video",
    title="Video",
    cache_examples=True
)

# Add examples
# with gr.Blocks() as demo:
#     gr.Examples(
#         inputs= "image",
#         outputs= "image",
#         fn=detect_image,
#     )

# Create a list of interfaces
interfaces = [img_interface, vid_interface]

# Create the tabbed interface
tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"])

# Launch the tabbed interface 
tabbed_interface.launch(debug=True)