Spaces:
Sleeping
Sleeping
File size: 7,327 Bytes
63eb207 abc9620 9503f8d edc92bb 63eb207 abc9620 63eb207 edd3af7 63eb207 9503f8d 3b9517f 9503f8d 63eb207 abc9620 edc92bb 953b4d3 a0fdf82 63eb207 9503f8d 63eb207 edd3af7 63eb207 9503f8d 63eb207 abc9620 3b9517f edc92bb 3b9517f abc9620 63eb207 9503f8d 90903c1 9503f8d 63eb207 edc92bb abc9620 63eb207 abc9620 edd3af7 63eb207 abc9620 63eb207 abc9620 edd3af7 3b9517f abc9620 63eb207 abc9620 3b9517f edd3af7 90903c1 3b9517f 90903c1 3b9517f 63eb207 9503f8d edc92bb 9503f8d 63eb207 abc9620 9503f8d 63eb207 9503f8d edc92bb a6cb66e 3b9517f edc92bb a6cb66e 3b9517f edc92bb edd3af7 3b9517f edc92bb 3b9517f 9503f8d edc92bb 3b9517f abc9620 edc92bb 3b9517f edc92bb edd3af7 edc92bb 63eb207 9503f8d edc92bb 9503f8d 63eb207 edc92bb abc9620 63eb207 abc9620 63eb207 abc9620 edc92bb abc9620 63eb207 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
"""
Gradio app to compare object‑detection models:
• Ultralytics YOLOv12 (n, s, m, l, x)
• Ultralytics YOLOv11 (n, s, m, l, x)
• Roboflow RF‑DETR (Base, Large)
• Custom fine‑tuned checkpoints (.pt/.pth upload)
Revision 2025‑04‑19‑e:
• Gallery items now carry captions so you can see which model produced which image (and latency).
• Captions display as "Model (xx ms)" or error status.
• No other behaviour changed: pre‑loading, progress bar, thin semi‑transparent boxes, concise error labels.
"""
from __future__ import annotations
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from PIL import Image
import gradio as gr
import supervision as sv
from ultralytics import YOLO
from rfdetr import RFDETRBase, RFDETRLarge
from rfdetr.util.coco_classes import COCO_CLASSES
###############################################################################
# Model registry & cache
###############################################################################
YOLO_MODEL_MAP: Dict[str, str] = {
# Ultralytics filenames omit the "v"
"YOLOv12‑n": "yolo12n.pt",
"YOLOv12‑s": "yolo12s.pt",
"YOLOv12‑m": "yolo12m.pt",
"YOLOv12‑l": "yolo12l.pt",
"YOLOv12‑x": "yolo12x.pt",
"YOLOv11‑n": "yolo11n.pt",
"YOLOv11‑s": "yolo11s.pt",
"YOLOv11‑m": "yolo11m.pt",
"YOLOv11‑l": "yolo11l.pt",
"YOLOv11‑x": "yolo11x.pt",
}
RFDETR_MODEL_MAP = {
"RF‑DETR‑Base (29M)": "base",
"RF‑DETR‑Large (128M)": "large",
}
ALL_MODELS = list(YOLO_MODEL_MAP.keys()) + list(RFDETR_MODEL_MAP.keys()) + [
"Custom YOLO (.pt/.pth)",
"Custom RF‑DETR (.pth)",
]
_loaded: Dict[str, object] = {}
def load_model(choice: str, custom_file: Optional[Path] = None):
if choice in _loaded:
return _loaded[choice]
if choice in YOLO_MODEL_MAP:
model = YOLO(YOLO_MODEL_MAP[choice])
elif choice in RFDETR_MODEL_MAP:
model = RFDETRBase() if RFDETR_MODEL_MAP[choice] == "base" else RFDETRLarge()
elif choice.startswith("Custom YOLO"):
if custom_file is None:
raise RuntimeError("Upload a YOLO .pt/.pth checkpoint first.")
model = YOLO(str(custom_file))
elif choice.startswith("Custom RF‑DETR"):
if custom_file is None:
raise RuntimeError("Upload an RF‑DETR .pth checkpoint first.")
model = RFDETRBase(pretrain_weights=str(custom_file))
else:
raise RuntimeError(f"Unsupported model choice: {choice}")
_loaded[choice] = model
return model
###############################################################################
# Inference helpers
###############################################################################
BOX_THICKNESS = 2
BOX_ALPHA = 0.6
box_annotator = sv.BoxAnnotator(thickness=BOX_THICKNESS)
label_annotator = sv.LabelAnnotator()
def _blend(base: np.ndarray, overlay: np.ndarray, alpha: float = BOX_ALPHA) -> np.ndarray:
return cv2.addWeighted(overlay, alpha, base, 1 - alpha, 0)
def run_single_inference(model, image: Image.Image, threshold: float) -> Tuple[Image.Image, float]:
start = time.perf_counter()
if isinstance(model, (RFDETRBase, RFDETRLarge)):
detections = model.predict(image, threshold=threshold)
label_src = COCO_CLASSES
else:
ul_res = model.predict(image, verbose=False)[0]
detections = sv.Detections.from_ultralytics(ul_res)
label_src = model.names # type: ignore
runtime = time.perf_counter() - start
img_bgr = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
overlay = img_bgr.copy()
overlay = box_annotator.annotate(overlay, detections)
overlay = label_annotator.annotate(
overlay,
detections,
[f"{label_src[c]} {p:.2f}" for c, p in zip(detections.class_id, detections.confidence)],
)
blended = _blend(img_bgr, overlay)
return Image.fromarray(cv2.cvtColor(blended, cv2.COLOR_BGR2RGB)), runtime
###############################################################################
# Callback with progress & captions
###############################################################################
def compare_models(
models: List[str],
img: Image.Image,
threshold: float,
custom_file: Optional[Path],
):
if img is None:
raise gr.Error("Please upload an image first.")
if img.mode != "RGB":
img = img.convert("RGB")
total_steps = len(models) * 2
progress = gr.Progress()
detectors: Dict[str, object] = {}
for i, name in enumerate(models, 1):
try:
detectors[name] = load_model(name, custom_file)
except Exception as exc:
detectors[name] = exc
progress(i, total=total_steps, desc=f"Loading {name}")
results: List[Tuple[Image.Image, str]] = []
legends: Dict[str, str] = {}
for j, name in enumerate(models, 1):
item = detectors[name]
step = len(models) + j
if isinstance(item, Exception):
placeholder = Image.new("RGB", img.size, (40, 40, 40))
emsg = str(item)
caption = f"{name} – Unavailable" if "No such file" in emsg or "not found" in emsg else f"{name} – ERROR"
results.append((placeholder, caption))
legends[name] = caption
progress(step, total=total_steps, desc=f"Skipped {name}")
continue
try:
annotated, latency = run_single_inference(item, img, threshold)
caption = f"{name} ({latency*1000:.1f} ms)"
results.append((annotated, caption))
legends[name] = f"{latency*1000:.1f} ms"
except Exception as exc:
placeholder = Image.new("RGB", img.size, (40, 40, 40))
caption = f"{name} – ERROR"
results.append((placeholder, caption))
legends[name] = f"ERROR: {str(exc).splitlines()[0][:120]}"
progress(step, total=total_steps, desc=f"Inference {name}")
yield results, legends
###############################################################################
# UI
###############################################################################
def build_demo():
with gr.Blocks(title="CV Model Comparison") as demo:
gr.Markdown(
"""# 🔍 Compare Object‑Detection Models\nUpload an image, select detectors, and click **Run Inference**.\nCaptions beneath each result show which model (and latency) generated it."""
)
with gr.Row():
sel_models = gr.CheckboxGroup(ALL_MODELS, value=["YOLOv12‑n"], label="Models")
conf_slider = gr.Slider(0.0, 1.0, 0.5, 0.05, label="Confidence")
ckpt_file = gr.File(label="Custom checkpoint (.pt/.pth)", file_types=[".pt", ".pth"], interactive=True)
img_in = gr.Image(type="pil", label="Image", sources=["upload", "webcam"])
with gr.Row():
gallery = gr.Gallery(label="Results", columns=2, height="auto")
legend_out = gr.JSON(label="Latency / status by model")
gr.Button("Run Inference", variant="primary").click(
compare_models, [sel_models, img_in, conf_slider, ckpt_file], [gallery, legend_out]
)
return demo
if __name__ == "__main__":
build_demo().launch()
|