Spaces:
Running
Running
import cv2 | |
import torch | |
import gradio as gr | |
from ultralytics import YOLO | |
import numpy as np | |
# Load YOLOv5 model (assuming weights are already downloaded) | |
model = YOLO("yolov5s.pt") # You can change to 'yolov5m.pt' or 'yolov5l.pt' for better accuracy | |
def detect_objects_image(image): | |
results = model(image) | |
result_img = results[0].plot() # Render image with bounding boxes | |
return result_img | |
# Video detection function | |
def detect_objects_video(): | |
cap = cv2.VideoCapture(0) # Capture from default webcam | |
cap.set(cv2.CAP_PROP_FPS, 30) # Set FPS | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
results = model(frame) | |
result_img = results[0].plot() | |
_, buffer = cv2.imencode(".jpg", result_img) | |
yield buffer.tobytes() | |
cap.release() | |
def start_video(): | |
return gr.Video(update=detect_objects_video, streaming=True) | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("## Live Object Detection with YOLOv5") | |
with gr.Row(): | |
img_input = gr.Image(type="numpy") | |
img_output = gr.Image() | |
img_button = gr.Button("Detect Objects in Image") | |
img_button.click(detect_objects_image, inputs=img_input, outputs=img_output) | |
with gr.Row(): | |
video_button = gr.Button("Start Live Video Detection") | |
video_output = gr.Video() | |
video_button.click(start_video, outputs=video_output) | |
demo.launch() | |