Spaces:
Running
Running
File size: 2,281 Bytes
fb79be2 caff61e e82b28e a29d5e2 e82b28e 936b8a6 e82b28e 0b7a6d2 caff61e e82b28e a29d5e2 caff61e a29d5e2 caaaba0 a29d5e2 46e3370 caaaba0 e82b28e caaaba0 e82b28e caaaba0 a29d5e2 46e3370 a29d5e2 caaaba0 46e3370 e82b28e f3c88f1 8d4b32f e82b28e 46e3370 cb1753f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import subprocess
# Clone the yolov5 repository and install its requirements
if not os.path.exists('yolov5'):
subprocess.run(['git', 'clone', 'https://github.com/ultralytics/yolov5'], check=True)
subprocess.run(['pip', 'install', '-r', 'yolov5/requirements.txt'], check=True)
import torch
import torchvision
from torchvision.transforms import functional as F
from PIL import Image
import cv2
import gradio as gr
from yolov5.models.yolo import Model
from yolov5.utils.general import non_max_suppression
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True).to(device)
model.eval()
def preprocess_image(image):
try:
image_tensor = F.to_tensor(image)
return image_tensor.unsqueeze(0).to(device)
except Exception as e:
print(f"Error in preprocessing image: {e}")
return None
def draw_boxes(image, outputs, threshold=0.3):
try:
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
h, w, _ = image.shape
for box in outputs:
score, label, x1, y1, x2, y2 = box[4].item(), int(box[5].item()), box[0].item(), box[1].item(), box[2].item(), box[3].item()
if score > threshold:
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
text = f"{model.names[label]:s}: {score:.2f}"
cv2.putText(image, text, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except Exception as e:
print(f"Error in drawing boxes: {e}")
return image
def detect_objects(image):
image_tensor = preprocess_image(image)
if image_tensor is None:
return image
try:
outputs = model(image_tensor)
outputs = non_max_suppression(outputs)[0]
result_image = draw_boxes(image, outputs)
return result_image
except Exception as e:
print(f"Error in detecting objects: {e}")
return image
iface = gr.Interface(
fn=detect_objects,
inputs=gr.Image(source="webcam", type="pil"),
outputs=gr.Image(type="pil"),
live=True
)
iface.launch()
|