Spaces:
Sleeping
Sleeping
File size: 4,405 Bytes
caff61e 359afbb 054b852 359afbb 054b852 fa9a701 054b852 359afbb 054b852 359afbb 054b852 6de980c 359afbb 6de980c 054b852 6de980c 359afbb 054b852 359afbb 054b852 359afbb 054b852 6de980c 054b852 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import torch
import numpy as np
import gradio as gr
import cv2
import time
import os
from pathlib import Path
# Create cache directory for models
os.makedirs("models", exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# Use YOLOv5 Nano for better speed
model_path = Path("models/yolov5n.pt")
if model_path.exists():
print(f"Loading model from cache: {model_path}")
model = torch.hub.load("ultralytics/yolov5", "custom", path=str(model_path), source="local").to(device)
else:
print("Downloading YOLOv5n model and caching...")
model = torch.hub.load("ultralytics/yolov5", "yolov5n", pretrained=True).to(device)
torch.save(model.state_dict(), model_path)
# Optimize model for speed
model.conf = 0.3 # Lower confidence threshold
model.iou = 0.3 # Non-Maximum Suppression IoU threshold
model.classes = None # Detect all classes
if device.type == "cuda":
model.half() # Use FP16 for faster inference
else:
torch.set_num_threads(os.cpu_count())
model.eval()
# Pre-generate colors for bounding boxes
np.random.seed(42)
colors = np.random.uniform(0, 255, size=(len(model.names), 3))
# Track FPS
total_inference_time = 0
inference_count = 0
def preprocess_image(image):
""" Prepares image for YOLOv5 detection. """
input_size = 640
image = cv2.resize(image, (input_size, input_size))
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert to BGR for OpenCV
return image
def detect_objects(image):
global total_inference_time, inference_count
if image is None:
return None
start_time = time.time()
# Preprocess image
image = preprocess_image(image)
with torch.inference_mode(): # Faster than torch.no_grad()
results = model(image, size=640)
inference_time = time.time() - start_time
total_inference_time += inference_time
inference_count += 1
avg_inference_time = total_inference_time / inference_count
detections = results.pred[0].cpu().numpy()
output_image = image.copy()
for *xyxy, conf, cls in detections:
x1, y1, x2, y2 = map(int, xyxy)
class_id = int(cls)
color = colors[class_id].tolist()
# Draw bounding box
cv2.rectangle(output_image, (x1, y1), (x2, y2), color, 3, lineType=cv2.LINE_AA)
label = f"{model.names[class_id]} {conf:.2f}"
font_scale, font_thickness = 0.9, 2
(w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)
# Label background
cv2.rectangle(output_image, (x1, y1 - h - 10), (x1 + w + 10, y1), color, -1)
cv2.putText(output_image, label, (x1 + 5, y1 - 5),
cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255), font_thickness, lineType=cv2.LINE_AA)
fps = 1 / inference_time
# Display FPS
overlay = output_image.copy()
cv2.rectangle(overlay, (10, 10), (300, 80), (0, 0, 0), -1)
output_image = cv2.addWeighted(overlay, 0.6, output_image, 0.4, 0)
cv2.putText(output_image, f"FPS: {fps:.2f}", (20, 40),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, lineType=cv2.LINE_AA)
cv2.putText(output_image, f"Avg FPS: {1/avg_inference_time:.2f}", (20, 70),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, lineType=cv2.LINE_AA)
return output_image
# Gradio UI
example_images = ["spring_street_after.jpg", "pexels-hikaique-109919.jpg"]
os.makedirs("examples", exist_ok=True)
with gr.Blocks(title="Optimized YOLOv5 Object Detection") as demo:
gr.Markdown("""
# Optimized YOLOv5 Object Detection
Detects objects using YOLOv5 with enhanced visualization and FPS tracking.
""")
with gr.Row():
with gr.Column(scale=1):
input_image = gr.Image(label="Input Image", type="numpy")
submit_button = gr.Button("Submit", variant="primary")
clear_button = gr.Button("Clear")
with gr.Column(scale=1):
output_image = gr.Image(label="Detected Objects", type="numpy")
gr.Examples(
examples=example_images,
inputs=input_image,
outputs=output_image,
fn=detect_objects,
cache_examples=True
)
submit_button.click(fn=detect_objects, inputs=input_image, outputs=output_image)
clear_button.click(lambda: (None, None), None, [input_image, output_image])
demo.launch()
|