Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,24 @@
|
|
1 |
# Copyright (c) Tencent Inc. All rights reserved.
|
|
|
2 |
import os
|
|
|
|
|
|
|
3 |
import sys
|
4 |
import argparse
|
5 |
import os.path as osp
|
6 |
from io import BytesIO
|
7 |
from functools import partial
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import cv2
|
10 |
# import onnx
|
11 |
import torch
|
@@ -15,12 +28,7 @@ import gradio as gr
|
|
15 |
from PIL import Image
|
16 |
import supervision as sv
|
17 |
from torchvision.ops import nms
|
18 |
-
|
19 |
-
from mmengine.dataset import Compose
|
20 |
-
from mmengine.runner.amp import autocast
|
21 |
-
from mmengine.config import Config, DictAction, ConfigDict
|
22 |
-
from mmdet.datasets import CocoDataset
|
23 |
-
from mmyolo.registry import RUNNERS
|
24 |
|
25 |
from transformers import (AutoTokenizer, CLIPTextModelWithProjection)
|
26 |
from transformers import (AutoProcessor, CLIPVisionModelWithProjection)
|
@@ -45,7 +53,7 @@ class LabelAnnotator(sv.LabelAnnotator):
|
|
45 |
LABEL_ANNOTATOR = LabelAnnotator(text_padding=4,
|
46 |
text_scale=0.5,
|
47 |
text_thickness=1)
|
48 |
-
|
49 |
def generate_image_embeddings(prompt_image,
|
50 |
vision_encoder,
|
51 |
vision_processor,
|
@@ -63,7 +71,7 @@ def generate_image_embeddings(prompt_image,
|
|
63 |
img_feats = projector(img_feats)
|
64 |
return img_feats
|
65 |
|
66 |
-
|
67 |
def run_image(runner,
|
68 |
vision_encoder,
|
69 |
vision_processor,
|
@@ -254,6 +262,7 @@ if __name__ == '__main__':
|
|
254 |
checkpoint = "weights/yolo_world_v2_l_image_prompt_adapter-719a7afb.pth"
|
255 |
# load config
|
256 |
cfg = Config.fromfile(config)
|
|
|
257 |
if cfg.get('work_dir', None) is None:
|
258 |
cfg.work_dir = osp.join('./work_dirs',
|
259 |
osp.splitext(osp.basename(config))[0])
|
@@ -264,7 +273,7 @@ if __name__ == '__main__':
|
|
264 |
runner = Runner.from_cfg(cfg)
|
265 |
else:
|
266 |
runner = RUNNERS.build(cfg)
|
267 |
-
|
268 |
runner.call_hook('before_run')
|
269 |
runner.load_or_resume()
|
270 |
pipeline = cfg.test_dataloader.dataset.pipeline
|
@@ -276,7 +285,7 @@ if __name__ == '__main__':
|
|
276 |
clip_model = "openai/clip-vit-base-patch32"
|
277 |
vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model)
|
278 |
processor = AutoProcessor.from_pretrained(clip_model)
|
279 |
-
device = 'cuda
|
280 |
vision_model.to(device)
|
281 |
|
282 |
texts = [' ']
|
|
|
1 |
# Copyright (c) Tencent Inc. All rights reserved.
|
2 |
+
import time
|
3 |
import os
|
4 |
+
os.environ['PYTORCH_JIT'] = "0"
|
5 |
+
os.system('mim install mmcv==2.0.1')
|
6 |
+
# import spaces
|
7 |
import sys
|
8 |
import argparse
|
9 |
import os.path as osp
|
10 |
from io import BytesIO
|
11 |
from functools import partial
|
12 |
|
13 |
+
import spaces
|
14 |
+
|
15 |
+
from mmengine.runner import Runner
|
16 |
+
from mmengine.dataset import Compose
|
17 |
+
from mmengine.runner.amp import autocast
|
18 |
+
from mmengine.config import Config, DictAction, ConfigDict
|
19 |
+
from mmdet.datasets import CocoDataset
|
20 |
+
from mmyolo.registry import RUNNERS
|
21 |
+
|
22 |
import cv2
|
23 |
# import onnx
|
24 |
import torch
|
|
|
28 |
from PIL import Image
|
29 |
import supervision as sv
|
30 |
from torchvision.ops import nms
|
31 |
+
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
from transformers import (AutoTokenizer, CLIPTextModelWithProjection)
|
34 |
from transformers import (AutoProcessor, CLIPVisionModelWithProjection)
|
|
|
53 |
LABEL_ANNOTATOR = LabelAnnotator(text_padding=4,
|
54 |
text_scale=0.5,
|
55 |
text_thickness=1)
|
56 |
+
@spaces.GPU
|
57 |
def generate_image_embeddings(prompt_image,
|
58 |
vision_encoder,
|
59 |
vision_processor,
|
|
|
71 |
img_feats = projector(img_feats)
|
72 |
return img_feats
|
73 |
|
74 |
+
@spaces.GPU
|
75 |
def run_image(runner,
|
76 |
vision_encoder,
|
77 |
vision_processor,
|
|
|
262 |
checkpoint = "weights/yolo_world_v2_l_image_prompt_adapter-719a7afb.pth"
|
263 |
# load config
|
264 |
cfg = Config.fromfile(config)
|
265 |
+
cfg.compile = False
|
266 |
if cfg.get('work_dir', None) is None:
|
267 |
cfg.work_dir = osp.join('./work_dirs',
|
268 |
osp.splitext(osp.basename(config))[0])
|
|
|
273 |
runner = Runner.from_cfg(cfg)
|
274 |
else:
|
275 |
runner = RUNNERS.build(cfg)
|
276 |
+
# runner.test()
|
277 |
runner.call_hook('before_run')
|
278 |
runner.load_or_resume()
|
279 |
pipeline = cfg.test_dataloader.dataset.pipeline
|
|
|
285 |
clip_model = "openai/clip-vit-base-patch32"
|
286 |
vision_model = CLIPVisionModelWithProjection.from_pretrained(clip_model)
|
287 |
processor = AutoProcessor.from_pretrained(clip_model)
|
288 |
+
device = 'cuda'
|
289 |
vision_model.to(device)
|
290 |
|
291 |
texts = [' ']
|