Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,71 +1,53 @@
|
|
1 |
import cv2
|
2 |
import torch
|
3 |
-
import numpy as np
|
4 |
import gradio as gr
|
5 |
from ultralytics import YOLO
|
6 |
-
import
|
7 |
-
import time
|
8 |
-
|
9 |
-
# Load YOLOv5 model
|
10 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
-
model = YOLO("yolov5s.pt").to(device)
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
frame = np.zeros((480, 640, 3), dtype=np.uint8) # Default blank frame
|
16 |
-
lock = threading.Lock()
|
17 |
|
18 |
-
def
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
image = results[0].plot() # Plot detections directly on image
|
23 |
-
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert back to RGB for Gradio
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
28 |
while cap.isOpened():
|
29 |
-
ret,
|
30 |
if not ret:
|
31 |
-
|
32 |
-
|
33 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB
|
34 |
-
results = model.predict(img, conf=0.4) # Explicitly call predict
|
35 |
-
img = results[0].plot() # Directly draw detections on the frame
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
42 |
|
43 |
-
def
|
44 |
-
|
45 |
-
with lock:
|
46 |
-
return frame
|
47 |
|
48 |
# Gradio UI
|
49 |
with gr.Blocks() as demo:
|
50 |
-
gr.Markdown("
|
51 |
-
|
52 |
-
with gr.
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
with gr.Tab("Upload Image"):
|
66 |
-
image_input = gr.Image(type="numpy", label="Upload Image")
|
67 |
-
image_output = gr.Image(label="Detected Objects")
|
68 |
-
image_button = gr.Button("Detect Objects")
|
69 |
-
image_button.click(detect_objects, inputs=image_input, outputs=image_output)
|
70 |
|
71 |
demo.launch()
|
|
|
1 |
import cv2
|
2 |
import torch
|
|
|
3 |
import gradio as gr
|
4 |
from ultralytics import YOLO
|
5 |
+
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Load YOLOv5 model (assuming weights are already downloaded)
|
8 |
+
model = YOLO("yolov5s.pt") # You can change to 'yolov5m.pt' or 'yolov5l.pt' for better accuracy
|
|
|
|
|
9 |
|
10 |
+
def detect_objects_image(image):
|
11 |
+
results = model(image)
|
12 |
+
result_img = results[0].plot() # Render image with bounding boxes
|
13 |
+
return result_img
|
|
|
|
|
14 |
|
15 |
+
# Video detection function
|
16 |
+
def detect_objects_video():
|
17 |
+
cap = cv2.VideoCapture(0) # Capture from default webcam
|
18 |
+
cap.set(cv2.CAP_PROP_FPS, 30) # Set FPS
|
19 |
+
|
20 |
while cap.isOpened():
|
21 |
+
ret, frame = cap.read()
|
22 |
if not ret:
|
23 |
+
break
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
results = model(frame)
|
26 |
+
result_img = results[0].plot()
|
27 |
+
|
28 |
+
_, buffer = cv2.imencode(".jpg", result_img)
|
29 |
+
yield buffer.tobytes()
|
30 |
+
|
31 |
+
cap.release()
|
32 |
|
33 |
+
def start_video():
|
34 |
+
return gr.Video(update=detect_objects_video, streaming=True)
|
|
|
|
|
35 |
|
36 |
# Gradio UI
|
37 |
with gr.Blocks() as demo:
|
38 |
+
gr.Markdown("## Live Object Detection with YOLOv5")
|
39 |
+
|
40 |
+
with gr.Row():
|
41 |
+
img_input = gr.Image(type="numpy")
|
42 |
+
img_output = gr.Image()
|
43 |
+
img_button = gr.Button("Detect Objects in Image")
|
44 |
+
|
45 |
+
img_button.click(detect_objects_image, inputs=img_input, outputs=img_output)
|
46 |
+
|
47 |
+
with gr.Row():
|
48 |
+
video_button = gr.Button("Start Live Video Detection")
|
49 |
+
video_output = gr.Video()
|
50 |
+
|
51 |
+
video_button.click(start_video, outputs=video_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
demo.launch()
|