File size: 5,606 Bytes
63eb207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
"""
Gradio app to compare object‑detection models:
  • Ultralytics YOLOv12 (n, s, m, l, x)
  • Ultralytics YOLOv11 (n, s, m, l, x)
  • Roboflow RF‑DETR (Base, Large)
  • Custom fine‑tuned checkpoints for either framework
Requires Python ≥3.9 plus:
  pip install gradio ultralytics rfdetr supervision pillow numpy torch torchvision
If you need ONNX export for RF‑DETR, also: pip install rfdetr[onnxexport]
"""

from __future__ import annotations

import time
from pathlib import Path
from typing import List, Tuple

import numpy as np
from PIL import Image
import gradio as gr
import supervision as sv
from ultralytics import YOLO
from rfdetr import RFDETRBase, RFDETRLarge
from rfdetr.util.coco_classes import COCO_CLASSES

# -----------------------------------------------------------------------------
# Model registry & lazy loader
# -----------------------------------------------------------------------------

YOLO_MODEL_MAP = {
    # YOLOv12 sizes
    "YOLOv12‑n": "yolov12n.pt",
    "YOLOv12‑s": "yolov12s.pt",
    "YOLOv12‑m": "yolov12m.pt",
    "YOLOv12‑l": "yolov12l.pt",
    "YOLOv12‑x": "yolov12x.pt",
    # YOLOv11 sizes
    "YOLOv11‑n": "yolov11n.pt",
    "YOLOv11‑s": "yolov11s.pt",
    "YOLOv11‑m": "yolov11m.pt",
    "YOLOv11‑l": "yolov11l.pt",
    "YOLOv11‑x": "yolov11x.pt",
}

RFDETR_MODEL_MAP = {
    "RF‑DETR‑Base (29M)": "base",  # handled explicitly
    "RF‑DETR‑Large (128M)": "large",
}

ALL_MODELS = list(YOLO_MODEL_MAP.keys()) + list(RFDETR_MODEL_MAP.keys()) + [
    "Custom YOLO (.pt/.pth)",
    "Custom RF‑DETR (.pth)",
]

_loaded = {}

def load_model(choice: str, custom_path: str | None = None):
    """Lazy‑load and cache models to avoid re‑download between inferences."""
    global _loaded
    if choice in _loaded:
        return _loaded[choice]

    if choice in YOLO_MODEL_MAP:
        mdl = YOLO(YOLO_MODEL_MAP[choice])
    elif choice in RFDETR_MODEL_MAP:
        mdl = RFDETRBase() if RFDETR_MODEL_MAP[choice] == "base" else RFDETRLarge()
    elif choice.startswith("Custom YOLO"):
        if not custom_path:
            raise ValueError("Please provide a path to your YOLO checkpoint.")
        mdl = YOLO(custom_path)
    elif choice.startswith("Custom RF‑DETR"):
        if not custom_path:
            raise ValueError("Please provide a path to your RF‑DETR checkpoint.")
        mdl = RFDETRBase(pretrain_weights=custom_path)
    else:
        raise ValueError(f"Unsupported model choice: {choice}")

    _loaded[choice] = mdl
    return mdl

# -----------------------------------------------------------------------------
# Inference helpers
# -----------------------------------------------------------------------------

box_annotator = sv.BoxAnnotator()
label_annotator = sv.LabelAnnotator()

def run_single_inference(model, image: Image.Image, threshold: float) -> Tuple[Image.Image, float]:
    start = time.perf_counter()

    # RF‑DETR already returns sv.Detections
    if isinstance(model, (RFDETRBase, RFDETRLarge)):
        detections = model.predict(image, threshold=threshold)
        label_source = COCO_CLASSES
    else:
        # Ultralytics YOLO inference: returns list of Results
        result = model.predict(image, verbose=False)[0]
        detections = sv.Detections.from_ultralytics(result)
        label_source = model.names  # list of class names
    runtime = time.perf_counter() - start

    labels = [f"{label_source[cid]} {conf:.2f}" for cid, conf in zip(detections.class_id, detections.confidence)]
    annotated = box_annotator.annotate(image.copy(), detections)
    annotated = label_annotator.annotate(annotated, detections, labels)
    return annotated, runtime

# -----------------------------------------------------------------------------
# Gradio UI logic
# -----------------------------------------------------------------------------

def compare_models(models: List[str], img: Image.Image, threshold: float, custom_path: str | None):
    if img.mode != "RGB":
        img = img.convert("RGB")
    results = []
    legends = []
    for m in models:
        model_obj = load_model(m, custom_path)
        annotated, t = run_single_inference(model_obj, img, threshold)
        results.append(annotated)
        legends.append(f"{m}{t*1000:.1f} ms")
    return results, legends

# -----------------------------------------------------------------------------
# Launch Gradio Interface
# -----------------------------------------------------------------------------

def build_demo():
    with gr.Blocks(title="CV Model Comparison") as demo:
        gr.Markdown("""# 🔍 Compare Object‑Detection Models\nUpload an image and select one or more models to see their predictions side‑by‑side.""")

        with gr.Row():
            model_select = gr.CheckboxGroup(choices=ALL_MODELS, value=["YOLOv12‑n"], label="Select models")
            threshold_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Confidence threshold")
        custom_weight_path = gr.Textbox(label="Path to custom checkpoint (if selected)")
        image_in = gr.Image(type="pil", label="Upload image")

        with gr.Row():
            gallery = gr.Gallery(label="Annotated results", columns=2, height="auto")

        legends_out = gr.JSON(label="Runtime (ms)")

        run_btn = gr.Button("Run Inference")
        run_btn.click(compare_models, inputs=[model_select, image_in, threshold_slider, custom_weight_path], outputs=[gallery, legends_out])

    return demo

# Execute when running directly
if __name__ == "__main__":
    build_demo().launch()