File size: 1,777 Bytes
5d2de62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ce01b8
5d2de62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from detrex.config import get_config
from ..models.dino_r50 import model

import itertools

from omegaconf import OmegaConf

from detectron2.config import LazyCall as L
from detectron2.data import (
    build_detection_test_loader,
    build_detection_train_loader,
    get_detection_dataset_dicts,
)
from detectron2.data.datasets import register_coco_instances

from detectron2.modeling.backbone import ResNet, BasicStem

from projects.vCLR_deformable_mask.modeling import OursDatasetMapper

dataloader = OmegaConf.create()




# get default config
optimizer = get_config("common/optim.py").AdamW
lr_multiplier = get_config("common/coco_schedule.py").lr_multiplier_12ep
train = get_config("common/train.py").train

# modify training config
train.init_checkpoint = "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
train.output_dir = "./output/dino_openworld"

# max training iterations
train.max_iter = 60000
train.eval_period = 5000
train.log_period = 200
train.checkpointer.period = 5000

# gradient clipping for training
train.clip_grad.enabled = True
train.clip_grad.params.max_norm = 0.1
train.clip_grad.params.norm_type = 2

# set training devices
train.device = "cuda"
model.device = train.device

# modify optimizer config
optimizer.lr = 1e-4 # original 1e-4
optimizer.betas = (0.9, 0.999)
optimizer.weight_decay = 1e-4
optimizer.params.lr_factor_func = lambda module_name: 0.1 if "backbone" in module_name else 1




# modify model config
model.dn_number = 100
model.num_classes = 1
model.select_box_nums_for_evaluation=900

# ema
train.model_ema.enabled=True
train.model_ema.decay=0.999

model.num_queries = 2000

model.transformer.encoder.use_checkpoint=False 
model.transformer.decoder.use_checkpoint=False

train.model_ema.use_ema_weights_for_eval_only=True