File size: 1,210 Bytes
901c666
 
 
 
 
5578860
1f5db55
901c666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5578860
 
 
 
 
 
901c666
 
 
5578860
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
import cv2
import numpy as np
import torch

# Load the YOLOv7 model
model = torch.hub.load('WongKinYiu/yolov7', 'yolov7', force_reload=True)

def detect_objects(image):
    img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    results = model(img)  # Perform inference

    # Process results
    detections = results.xyxy[0].numpy()  # Get detections in xyxy format
    annotated_image = image.copy()

    for *box, conf, cls in detections:
        x1, y1, x2, y2 = map(int, box)
        cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (255, 0, 0), 2)
        label = f'{model.names[int(cls)]}: {conf:.2f}'
        cv2.putText(annotated_image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

    return annotated_image

# Create Gradio interface
with gr.Blocks() as app:
    gr.Markdown("# YOLOv7 Object Detection App")
    
    image_input = gr.Image(label="Upload Image", type="numpy")
    output_image = gr.Image(label="Detected Objects", type="numpy")
    
    classify_button = gr.Button("Detect Objects")
    
    classify_button.click(fn=detect_objects, inputs=image_input, outputs=output_image)

# Launch the interface
if __name__ == "__main__":
    app.launch()