EcoSmart / app.py
rosebe's picture
Update app.py
a28d86d
raw
history blame
3.59 kB
import gradio as gr
import torch
import cv2
import os
import shutil
# Load the model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
def detect_image(image):
results = model(image)
return results.render()[0]
def detect_video(video_path):
video = cv2.VideoCapture(video_path)
frame_rate = video.get(cv2.CAP_PROP_FPS)
# Create a directory to store the frames
frames_dir = 'frames'
os.makedirs(frames_dir, exist_ok=True)
# Remove existing contents of the frames directory
for file_name in os.listdir(frames_dir):
file_path = os.path.join(frames_dir, file_name)
if os.path.isfile(file_path):
os.remove(file_path)
frame_count = 0
process_frame_count = 0
while True:
success, frame = video.read()
if not success:
break
if frame_count % 2 == 0: # Process every 2nd frame (adjust as needed)
frame_output_path = os.path.join(frames_dir, f'frame_{process_frame_count:04d}.jpg')
cv2.imwrite(frame_output_path, frame)
process_frame_count += 1
frame_count += 1
video.release()
cv2.destroyAllWindows()
# Process the frames with object detection and save the results
results_dir = 'results'
os.makedirs(results_dir, exist_ok=True)
# Remove existing contents of the results directory
for file_name in os.listdir(results_dir):
file_path = os.path.join(results_dir, file_name)
if os.path.isfile(file_path):
os.remove(file_path)
for i in range(process_frame_count):
frame_path = os.path.join(frames_dir, f'frame_{i:04d}.jpg')
frame = cv2.imread(frame_path)
results = model(frame)
results_output_path = os.path.join(results_dir, f'results_{i:04d}.jpg')
cv2.imwrite(results_output_path, results.render()[0])
# Create the output video from the processed frames
frame_files = sorted(os.listdir(results_dir))
frame_path = os.path.join(results_dir, frame_files[0])
frame = cv2.imread(frame_path)
height, width, _ = frame.shape
video_output_path = 'output_video.mp4' # Replace with your desired output video path
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can change the codec as needed
video_writer = cv2.VideoWriter(video_output_path, fourcc, frame_rate/2, (width, height)) # Adjust frame rate
for frame_file in frame_files:
frame_path = os.path.join(results_dir, frame_file)
frame = cv2.imread(frame_path)
video_writer.write(frame)
video_writer.release()
# Clean up the temporary directories
shutil.rmtree(frames_dir)
shutil.rmtree(results_dir)
return video_output_path
# Create Gradio interfaces for different modes
img_interface = gr.Interface(
fn=detect_image,
inputs=gr.inputs.Image(source="upload"),
outputs="image",
title="Image"
)
vid_interface = gr.Interface(
fn=detect_video,
inputs=gr.inputs.Video(source="upload"),
outputs="video",
title="Video"
)
# Add examples
# with gr.Blocks() as demo:
# gr.Examples(
# examples=[os.path.join(os.path.abspath(''), "plastic_bottles1.jpg")],
# inputs= "image",
# outputs= "image",
# fn=detect_image,
# cache_examples=True,
# )
# Create a list of interfaces
interfaces = [img_interface, examples1, vid_interface]
# Create the tabbed interface
tabbed_interface = gr.TabbedInterface(interfaces, ["Image", "Video"])
# Launch the tabbed interface
tabbed_interface.launch(debug=True)