Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import os | |
import gradio as gr | |
import shutil | |
import sys | |
import subprocess | |
import shlex | |
import torch | |
os.system("pip install git+https://github.com/facebookresearch/detectron2.git") | |
os.system("git clone https://github.com/Visual-AI/vCLR.git && cd vCLR && rm -f requirements.txt && cd .. && cp deformable_train_voc_eval_nonvoc.py vCLR/projects/vCLR_deformable_mask/configs/dino-resnet/") | |
subprocess.run( | |
shlex.split( | |
"pip install detrex-0.3.0-cp310-cp310-linux_x86_64.whl" | |
) | |
) | |
sys.path.append("vCLR/") | |
from demo.predictors import VisualizationDemo | |
from detectron2.checkpoint import DetectionCheckpointer | |
from detectron2.config import LazyConfig, instantiate | |
import numpy as np | |
from PIL import Image | |
if __name__ == "__main__": | |
gr.close_all() | |
cfg = LazyConfig.load("vCLR/projects/vCLR_deformable_mask/configs/dino-resnet/deformable_train_voc_eval_nonvoc.py") | |
cfg["model"].device = "cuda" | |
cfg["train"].device = "cuda" | |
# @spaces.GPU(duration=40, progress=gr.Progress(track_tqdm=True)) | |
# def | |
model = instantiate(cfg.model) | |
checkpointer = DetectionCheckpointer(model) | |
# checkpointer.load("https://huggingface.co/allencbzhang/vCLR/resolve/main/vCLR_deformable_train_on_voc.pth") | |
checkpointer.load("https://huggingface.co/allencbzhang/vCLR/resolve/main/vCLR_deformable_train_on_coco.pth") | |
model.eval() | |
model.cuda() | |
vis_demo = VisualizationDemo( | |
model=model, | |
min_size_test=800, | |
max_size_test=1333, | |
img_format="RGB", | |
metadata_dataset="coco_2017_val", | |
) | |
def inference(img, confidence): | |
img = np.array(img) | |
_, results = vis_demo.run_on_image(img, confidence) | |
results = Image.fromarray(results.get_image()[:, :, ::-1]) | |
return results | |
demo = gr.Interface( | |
fn=inference, | |
inputs=[ | |
gr.Image(type="pil", image_mode="RGB"), | |
# gr.Number(precision=2, minimum=0.0, maximum=1.0, value=0.5) | |
gr.Slider(minimum=0.0, maximum=1.0, value=0.5, step=0.05, label="Confidence") | |
], | |
outputs="image", | |
examples=[ | |
], | |
title="[CVPR 2025 highlight] v-CLR: View-Consistent Learning for Open-World Instance Segmentation", | |
description=''' | |
[](https://arxiv.org/abs/2412.10028) | |
[](https://paperswithcode.com/sota/object-detection-on-coco-2017-val?p=mr-detr-instructive-multi-route-training-for) | |
''' | |
) | |
demo.launch() |