samewind / configs /backup /crowddet /crowddet-rcnn_r50_fpn_8xb2-30e_crowdhuman.py
scfive
Resolve README.md conflict and continue rebase
e8f2571
raw
history blame contribute delete
7.48 kB
_base_ = ['../_base_/default_runtime.py']
model = dict(
type='CrowdDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
pad_size_divisor=64,
# This option is set according to https://github.com/Purkialo/CrowdDet/
# blob/master/lib/data/CrowdHuman.py The images in the entire batch are
# resize together.
batch_augments=[
dict(type='BatchResize', scale=(1400, 800), pad_size_divisor=64)
]),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
upsample_cfg=dict(mode='bilinear', align_corners=False)),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0, 2.0, 3.0],
strides=[4, 8, 16, 32, 64],
centers=[(8, 8), (8, 8), (8, 8), (8, 8), (8, 8)]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0],
clip_border=False),
loss_cls=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='MultiInstanceRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=-1,
aligned=True,
use_torchvision=True),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='MultiInstanceBBoxHead',
with_refine=False,
num_shared_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
loss_weight=1.0,
use_sigmoid=False,
reduction='none'),
loss_bbox=dict(
type='SmoothL1Loss', loss_weight=1.0, reduction='none'))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=(0.3, 0.7),
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2400,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
assigner=dict(
type='MultiInstanceAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.3,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='MultiInsRandomSampler',
num=512,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1200,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=2),
rcnn=dict(
nms=dict(type='nms', iou_threshold=0.5),
score_thr=0.01,
max_per_img=500)))
dataset_type = 'CrowdHumanDataset'
data_root = 'data/CrowdHuman/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/tracking/CrowdHuman/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/tracking/',
# 'data/': 's3://openmmlab/datasets/tracking/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
'flip_direction'))
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1400, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=4,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=None, # The 'batch_sampler' may decrease the precision
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_train.odgt',
data_prefix=dict(img='Images/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotation_val.odgt',
data_prefix=dict(img='Images/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CrowdHumanMetric',
ann_file=data_root + 'annotation_val.odgt',
metric=['AP', 'MR', 'JI'],
backend_args=backend_args)
test_evaluator = val_evaluator
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=30, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=800),
dict(
type='MultiStepLR',
begin=0,
end=30,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
# optimizer
auto_scale_lr = dict(base_batch_size=16)
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001))