yolov3_cl / config.py
anh1811's picture
Upload config.py
af0d38a
import albumentations as A
import cv2
import torch
from albumentations.pytorch import ToTensorV2
# from utils import seed_everything
DATASET = 'PASCAL_VOC'
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# seed_everything() # If you want deterministic behavior
NUM_WORKERS = 8
BATCH_SIZE = 32
IMAGE_SIZE = 416
NUM_CLASSES = 20
LEARNING_RATE = 3e-5
WEIGHT_DECAY = 3e-5
NUM_EPOCHS = 150
CONF_THRESHOLD = 0.5
MAP_IOU_THRESH = 0.5
NMS_IOU_THRESH = 0.45
S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
PIN_MEMORY = True
LOAD_MODEL = False
SAVE_MODEL = True
IMG_DIR = DATASET + "/images/"
LABEL_DIR = DATASET + "/labels/"
DISTILL = True
DISTILL_FEATURES = True
DISTILL_LOGITS = True
BASE_CLASS = 19
NEW_CLASS = 1
BASE = False
# CHECKPOINT_FILE = f'my_checkpoint_{BASE_CLASS}.pth.tar'
CHECKPOINT_FILE = "/kaggle/input/yolov3-weight/2007_task2_19_1_mAP_19_1.pth.tar"
#FINETUNE
FINETUNE = True
BATCH_SIZE_FINETUNE = 4
FINETUNE_NUM_IMAGE_PER_STORE = -1
#WARP
WARP = True
TRAIN_WARP = False
TRAIN_WARP_AT_ITR_NO = 3
# WARP_LAYERS = ('layers.15.pred.1.conv.weight', 'layers.22.pred.1.conv.weight', 'layers.29.pred.1.conv.weight')
WARP_LAYERS = ('layers.22.pred.1.conv.weight')
# WARP_LAYERS = ()
# WARP_LAYERS = ('layers.15.pred.1.conv.weight', 'layers.29.pred.1.conv.weight')
NUM_FEATURES_PER_CLASS = 3
NUM_IMAGES_PER_CLASS = 10
BATCH_SIZE_WARP = 8
USE_FEATURE_STORE = False
IMAGE_STORE_LOC = '/kaggle/input/yolov3-weight/'
MOSAIC = True
ADD_IMAGE_STORE = True
BASE_CHECK_POINT = "/kaggle/input/yolov3-weight/2007_base_19_1_mAP_19_1.pth.tar"
ANCHORS = [
[(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
[(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
[(0.02, 0.03), (0.04, 0.07), (0.08, 0.06)],
] # Note these have been rescaled to be between [0, 1]
scale = 1.1
def train_preprocess(height = IMAGE_SIZE, width = IMAGE_SIZE):
max_size = max(height, width)
return A.Compose(
[
A.LongestMaxSize(max_size=int(max_size * scale)),
A.PadIfNeeded(
min_height=int(height * scale),
min_width=int(width * scale),
border_mode=cv2.BORDER_CONSTANT,
),
A.RandomCrop(width=width, height=height),
A.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6, hue=0.6, p=0.4),
A.OneOf(
[
A.ShiftScaleRotate(
rotate_limit=10, p=0.4, border_mode=cv2.BORDER_CONSTANT
),
A.IAAAffine(shear=10, p=0.4, mode="constant"),
],
p=1.0,
),
A.HorizontalFlip(p=0.5),
A.Blur(p=0.1),
A.CLAHE(p=0.1),
A.Posterize(p=0.1),
A.ToGray(p=0.1),
A.ChannelShuffle(p=0.05),
# A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
# ToTensorV2(),
],
bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
)
train_transforms = A.Compose(
[
A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
ToTensorV2(),
],
bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
)
def weak_preprocessing(height = IMAGE_SIZE, width = IMAGE_SIZE):
max_size = max(height, width)
return A.Compose(
[
A.LongestMaxSize(max_size=int(max_size * scale)),
A.PadIfNeeded(
min_height=int(height * scale),
min_width=int(width * scale),
border_mode=cv2.BORDER_CONSTANT,
),
A.RandomCrop(width=width, height=height),
A.HorizontalFlip(p=0.5),
A.Blur(p=0.1),
],
bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[],),
)
test_transforms = A.Compose(
[
A.LongestMaxSize(max_size=IMAGE_SIZE),
A.PadIfNeeded(
min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
),
A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
ToTensorV2(),
],
bbox_params=A.BboxParams(format="yolo", min_visibility=0.4, label_fields=[]),
)
infer_transforms = A.Compose(
[
A.LongestMaxSize(max_size=IMAGE_SIZE),
A.PadIfNeeded(
min_height=IMAGE_SIZE, min_width=IMAGE_SIZE, border_mode=cv2.BORDER_CONSTANT
),
A.Normalize(mean=[0, 0, 0], std=[1, 1, 1], max_pixel_value=255,),
ToTensorV2(),
]
)
PASCAL_CLASSES = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"
]
COCO_LABELS = ['person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush'
]