File size: 4,843 Bytes
9206874
 
b2cfa3d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e9a4aaa
9206874
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2cfa3d
9206874
 
 
 
 
 
 
 
 
e9a4aaa
b2cfa3d
9206874
 
59d116a
9206874
59d116a
b2cfa3d
 
9206874
b2cfa3d
9206874
 
 
 
b2cfa3d
 
 
 
 
 
 
 
9206874
59d116a
 
 
 
 
 
 
 
 
9206874
 
 
59d116a
9206874
59d116a
 
 
 
b2cfa3d
59d116a
 
b2cfa3d
 
 
 
 
 
 
 
 
59d116a
 
 
b2cfa3d
59d116a
 
 
9206874
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/bin/bash

# Parameters
USE_GPU_TUNING=true   # Set this to true to enable GPU tuning
RESUME_TRAINING=false # Set this to true to resume training from last checkpoint
MAX_ITER=100000        # Default maximum iterations
CHECKPOINT_PERIOD=$(($MAX_ITER / 10))  # Set to 10% of MAX_ITER

# Default values
IMS_PER_BATCH=2
BATCH_SIZE_PER_IMAGE=512
BASE_LR=0.00025

# If USE_GPU_TUNING is true, adjust parameters based on GPU memory
if [ "$USE_GPU_TUNING" = true ]; then
    # Get the GPU memory in MB
    GPU_MEMORY=$(nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits | head -n 1)

    # Set the batch size and learning rate based on GPU memory
    if [ "$GPU_MEMORY" -ge 24576 ]; then
        IMS_PER_BATCH=8   # For 24GB+ GPUs
        BATCH_SIZE_PER_IMAGE=512
    elif [ "$GPU_MEMORY" -ge 12288 ]; then
        IMS_PER_BATCH=4   # For 12GB+ GPUs
        BATCH_SIZE_PER_IMAGE=256
    else
        IMS_PER_BATCH=2   # For smaller GPUs
        BATCH_SIZE_PER_IMAGE=128
    fi

    # Adjust learning rate based on batch size
    BASE_LR=$(echo "0.00025 * $IMS_PER_BATCH / 2" | bc -l)
fi

# Get script dir
SCRIPT_DIR=$(cd $(dirname $0); pwd)

# Set the paths for the dataset and configuration files
TRAIN_ANNOTATION=$SCRIPT_DIR/../export_coco/annotations/train.json
TRAIN_IMAGE_DIR=$SCRIPT_DIR/../export_coco/train
VAL_ANNOTATION=$SCRIPT_DIR/../export_coco/annotations/val.json
VAL_IMAGE_DIR=$SCRIPT_DIR/../export_coco/val
COCO_CONFIG_FILE=$SCRIPT_DIR/../export_coco/config/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml
BASE_CONFIG_FILE=$SCRIPT_DIR/../export_coco/config/Base-RCNN-FPN.yaml  # Base config file path
CONFIG_URL="https://raw.githubusercontent.com/facebookresearch/detectron2/main/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
BASE_CONFIG_URL="https://raw.githubusercontent.com/facebookresearch/detectron2/main/configs/Base-RCNN-FPN.yaml"  # Base config URL
TRAIN_NET_DIR=$SCRIPT_DIR/../export_coco/detectron2
TRAIN_NET_FILE=$TRAIN_NET_DIR/train_net.py
TRAIN_NET_URL="https://raw.githubusercontent.com/facebookresearch/detectron2/main/tools/train_net.py"
OUTPUT_DIR=$SCRIPT_DIR/../export_coco/output

# Create necessary directories
mkdir -p $(dirname $COCO_CONFIG_FILE)
mkdir -p $TRAIN_NET_DIR
mkdir -p $OUTPUT_DIR

# Download the COCO config file if it doesn't exist
if [ ! -f "$COCO_CONFIG_FILE" ]; then
    echo "Downloading Mask R-CNN configuration file..."
    wget $CONFIG_URL -O $COCO_CONFIG_FILE
fi

# Download the base config file if it doesn't exist
if [ ! -f "$BASE_CONFIG_FILE" ]; then
    echo "Downloading Base-RCNN-FPN.yaml configuration file..."
    wget $BASE_CONFIG_URL -O $BASE_CONFIG_FILE
fi

# Download train_net.py if it doesn't exist
if [ ! -f "$TRAIN_NET_FILE" ]; then
    echo "Downloading train_net.py file..."
    wget $TRAIN_NET_URL -O $TRAIN_NET_FILE
fi

# Python script to configure and run the training
python3 - <<END
import os
import json
from detectron2.data.datasets import register_coco_instances
from detectron2.data import DatasetCatalog
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg

# Paths from Bash script
train_annotation = "$TRAIN_ANNOTATION"
train_image_dir = "$TRAIN_IMAGE_DIR"
val_annotation = "$VAL_ANNOTATION"
val_image_dir = "$VAL_IMAGE_DIR"
resume_training = $([ "$RESUME_TRAINING" = true ] && echo True || echo False)  # Convert bash boolean to Python boolean
max_iter = $MAX_ITER
checkpoint_period = $CHECKPOINT_PERIOD
output_dir = "$OUTPUT_DIR"
coco_config_file = "$COCO_CONFIG_FILE"
ims_per_batch = $IMS_PER_BATCH
batch_size_per_image = $BATCH_SIZE_PER_IMAGE
base_lr = $BASE_LR

# Load the COCO annotation file to detect number of classes
with open(train_annotation, 'r') as f:
    coco_data = json.load(f)

# Extract number of unique categories
num_classes = len(coco_data['categories'])
print(f"Detected {num_classes} classes from the dataset.")

# Register the datasets
register_coco_instances("coco_roboone_train", {}, train_annotation, train_image_dir)
register_coco_instances("coco_roboone_val", {}, val_annotation, val_image_dir)

# Confirm the datasets are registered
print("Datasets registered successfully.")
print("Available datasets:", DatasetCatalog.list())

# Set up configuration
cfg = get_cfg()
cfg.merge_from_file(coco_config_file)
cfg.DATASETS.TRAIN = ("coco_roboone_train",)
cfg.DATASETS.TEST = ("coco_roboone_val",)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes
cfg.OUTPUT_DIR = output_dir

# Set solver parameters
cfg.SOLVER.MAX_ITER = max_iter
cfg.SOLVER.CHECKPOINT_PERIOD = checkpoint_period
cfg.SOLVER.IMS_PER_BATCH = ims_per_batch
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_size_per_image
cfg.SOLVER.BASE_LR = base_lr

# Train the model
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=resume_training)
trainer.train()

print("Mask R-CNN training completed.")
END