Upload args.py
Browse files
args.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
|
3 |
+
def get_args_parser():
|
4 |
+
parser = argparse.ArgumentParser('PDFNet_swinB training script', add_help=False)
|
5 |
+
parser.add_argument('--COPY', default=True, type=bool)
|
6 |
+
parser.add_argument('--batch_size', default=1, type=int)
|
7 |
+
parser.add_argument('--epochs', default=100, type=int)
|
8 |
+
parser.add_argument('--update_freq', default=1, type=int,
|
9 |
+
help='Number of steps to accumulate gradients when updating parameters, set to 1 to disable this feature')
|
10 |
+
parser.add_argument('--update_half', default=False, type=bool,
|
11 |
+
help='update_half')
|
12 |
+
# Model parameters
|
13 |
+
parser.add_argument('--model', default='PDFNet_swinB', type=str, metavar='MODEL',
|
14 |
+
help='Name of model to train')
|
15 |
+
parser.add_argument('--back_bone', default='PDFNet_swinB', type=str,
|
16 |
+
help='back_bone (default: swinB)')
|
17 |
+
parser.add_argument('--back_bone_channels_stage1', default=128, type=int)
|
18 |
+
parser.add_argument('--back_bone_channels_stage2', default=256, type=int)
|
19 |
+
parser.add_argument('--back_bone_channels_stage3', default=512, type=int)
|
20 |
+
parser.add_argument('--back_bone_channels_stage4', default=1024, type=int)
|
21 |
+
parser.add_argument('--emb', default=128, type=int)
|
22 |
+
parser.add_argument('--input_size', default=1024, type=int, help='images input size')
|
23 |
+
parser.add_argument('--Crop_size', default=1024, type=int, help='images input size')
|
24 |
+
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
|
25 |
+
help='Drop path rate (default: 0.)')
|
26 |
+
|
27 |
+
# Optimizer parameters
|
28 |
+
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
|
29 |
+
help='Optimizer (default: "adamw"')
|
30 |
+
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
|
31 |
+
help='Optimizer Epsilon (default: 1e-8)')
|
32 |
+
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
|
33 |
+
help='Optimizer Betas (default: None, use opt default)')
|
34 |
+
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
|
35 |
+
help='Clip gradient norm (default: None, no clipping)')
|
36 |
+
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
|
37 |
+
help='SGD momentum (default: 0.9)')
|
38 |
+
parser.add_argument('--weight_decay', type=float, default=0.0001,
|
39 |
+
help='weight decay (default: 0.05)')
|
40 |
+
|
41 |
+
# Learning rate schedule parameters
|
42 |
+
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
|
43 |
+
help='LR scheduler (default: "cosine"')
|
44 |
+
parser.add_argument('--lr', type=float, default=1e-5, metavar='LR',
|
45 |
+
help='learning rate (default: 5e-4)')
|
46 |
+
parser.add_argument('--warmup_lr', type=float, default=1e-5, metavar='LR',
|
47 |
+
help='warmup learning rate (default: 1e-6)')
|
48 |
+
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR',
|
49 |
+
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
|
50 |
+
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
|
51 |
+
help='epochs to warmup LR, if scheduler supports')
|
52 |
+
parser.add_argument('--decay_epochs', type=float, default=300, metavar='N',
|
53 |
+
help='epoch interval to decay LR')
|
54 |
+
parser.add_argument('--cooldown_epochs', type=int, default=0, metavar='N',
|
55 |
+
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
|
56 |
+
parser.add_argument('--patience_epochs', type=int, default=10, metavar='N',
|
57 |
+
help='patience epochs for Plateau LR scheduler (default: 10')
|
58 |
+
parser.add_argument('--decay_rate', '--dr', type=float, default=0.1, metavar='RATE',
|
59 |
+
help='LR decay rate (default: 0.1)')
|
60 |
+
|
61 |
+
# * Finetuning params
|
62 |
+
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
|
63 |
+
parser.add_argument('--finetune_epoch', default=0, type=int)
|
64 |
+
# Dataset parameters
|
65 |
+
parser.add_argument('--data_path', default='DATA/DIS-DATA/', type=str,
|
66 |
+
help='dataset path')
|
67 |
+
parser.add_argument('--chached', default=False, type=bool,
|
68 |
+
help='dataset chached')
|
69 |
+
|
70 |
+
parser.add_argument('--checkpoints_save_path', default='checkpoints/PDFNet', type=str,
|
71 |
+
help='path where to save')
|
72 |
+
parser.add_argument('--output_dir', default='',
|
73 |
+
help='path where to save, empty for no saving')
|
74 |
+
parser.add_argument('--device', default='cuda',
|
75 |
+
help='device to use for training / testing')
|
76 |
+
parser.add_argument('--seed', default=0, type=int)
|
77 |
+
parser.add_argument('--num_workers', default=8, type=int)
|
78 |
+
parser.add_argument('--eval', default=True, type=bool,
|
79 |
+
help='Do evaluation epoch once after training')
|
80 |
+
|
81 |
+
parser.add_argument('--eval_metric', default='F1', type=str,help='F1 or MAE')
|
82 |
+
|
83 |
+
parser.add_argument('--DEBUG', default=False, type=bool,
|
84 |
+
help='DEBUG MODE')
|
85 |
+
|
86 |
+
return parser
|