Spaces:
Running
Running
File size: 1,555 Bytes
caff61e e82b28e a29d5e2 e82b28e 936b8a6 e82b28e 0b7a6d2 caff61e e82b28e a29d5e2 caff61e a29d5e2 e82b28e a29d5e2 46e3370 e82b28e a29d5e2 46e3370 e82b28e 46e3370 e82b28e a29d5e2 46e3370 a29d5e2 e82b28e 46e3370 e82b28e 46e3370 f6ae77b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
import torchvision
from torchvision.transforms import functional as F
from PIL import Image
import cv2
import gradio as gr
from yolov5.models.yolo import Model
from yolov5.utils.general import non_max_suppression
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
model.eval()
def preprocess_image(image):
image_tensor = F.to_tensor(image)
return image_tensor.unsqueeze(0).to(device)
def draw_boxes(image, outputs, threshold=0.3):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
h, w, _ = image.shape
for box in outputs:
score, label, x1, y1, x2, y2 = box[4].item(), int(box[5].item()), box[0].item(), box[1].item(), box[2].item(), box[3].item()
if score > threshold:
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
text = f"{model.names[label]:s}: {score:.2f}"
cv2.putText(image, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def detect_objects(image):
image_tensor = preprocess_image(image)
outputs = model(image_tensor)
outputs = non_max_suppression(outputs)[0]
result_image = draw_boxes(image, outputs)
return result_image
iface = gr.Interface(
fn=detect_objects,
inputs=gr.inputs.Image(type="pil"),
outputs=gr.outputs.Image(type="pil"),
live=True
)
iface.launch()
|