import gradio as gr import cv2 import numpy as np import torch # Load the YOLOv7 model model = torch.hub.load('WongKinYiu/yolov7', 'yolov7', force_reload=True) def detect_objects(image): img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) results = model(img) # Perform inference # Process results detections = results.xyxy[0].numpy() # Get detections in xyxy format annotated_image = image.copy() for *box, conf, cls in detections: x1, y1, x2, y2 = map(int, box) cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (255, 0, 0), 2) label = f'{model.names[int(cls)]}: {conf:.2f}' cv2.putText(annotated_image, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2) return annotated_image # Create Gradio interface with gr.Blocks() as app: gr.Markdown("# YOLOv7 Object Detection App") image_input = gr.Image(label="Upload Image", type="numpy") output_image = gr.Image(label="Detected Objects", type="numpy") classify_button = gr.Button("Detect Objects") classify_button.click(fn=detect_objects, inputs=image_input, outputs=output_image) # Launch the interface if __name__ == "__main__": app.launch()