Kin-Yiu, Wong commited on
Commit
30d2275
·
unverified ·
1 Parent(s): 576457c

Create layers.py

Browse files
Files changed (1) hide show
  1. yolov9/models/layers.py +267 -0
yolov9/models/layers.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ # basic
5
+
6
+ class Conv(nn.Module):
7
+ # basic convlution
8
+ def __init__(self, in_channels, out_channels, kernel_size,
9
+ stride=1, padding=0, dilation=1, groups=1, act=nn.ReLU(),
10
+ bias=False, auto_padding=True, padding_mode='zeros'):
11
+
12
+ super().__init__()
13
+
14
+ # not yet handle the case when dilation is a tuple
15
+ if auto_padding:
16
+ if isinstance(kernel_size, int):
17
+ padding = (dilation * (kernel_size - 1) + 1) // 2
18
+ else:
19
+ padding = [(dilation * (k - 1) + 1) // 2 for k in kernel_size]
20
+
21
+ self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, groups=groups, dilation=dilation, bias=bias)
22
+ self.bn = nn.BatchNorm2d(out_channels)
23
+ self.act = act if isinstance(act, nn.Module) else nn.Identity()
24
+
25
+ def forward(self, x):
26
+ return self.act(self.bn(self.conv(x)))
27
+
28
+ def forward_fuse(self, x):
29
+ return self.act(self.conv(x))
30
+
31
+ # to be implement
32
+ # def fuse_conv_bn(self):
33
+
34
+
35
+ # RepVGG
36
+
37
+ class RepConv(nn.Module):
38
+ # https://github.com/DingXiaoH/RepVGG
39
+ def __init__(self, in_channels, out_channels, kernel_size=3,
40
+ stride=1, groups=1, act=nn.ReLU()):
41
+
42
+ super().__init__()
43
+
44
+ self.conv1 = Conv(in_channels, out_channels, kernel_size, stride, groups=groups, act=False)
45
+ self.conv2 = Conv(in_channels, out_channels, 1, stride, groups=groups, act=False)
46
+ self.act = act if isinstance(act, nn.Module) else nn.Identity()
47
+
48
+ def forward(self, x):
49
+ return self.act(self.conv1(x) + self.conv2(x))
50
+
51
+ def forward_fuse(self, x):
52
+ return self.act(self.conv(x))
53
+
54
+ # to be implement
55
+ # def fuse_convs(self):
56
+
57
+
58
+ # ResNet
59
+
60
+ class Res(nn.Module):
61
+ # ResNet bottleneck
62
+ def __init__(self, in_channels, out_channels,
63
+ groups=1, act=nn.ReLU(), ratio=0.25):
64
+
65
+ super().__init__()
66
+
67
+ h_channels = int(in_channels * ratio)
68
+ self.cv1 = Conv(in_channels, h_channels, 1, 1, act=act)
69
+ self.cv2 = Conv(h_channels, h_channels, 3, 1, groups=groups, act=act)
70
+ self.cv3 = Conv(h_channels, out_channels, 1, 1, act=act)
71
+
72
+ def forward(self, x):
73
+ return x + self.cv3(self.cv2(self.cv1(x)))
74
+
75
+
76
+ class RepRes(nn.Module):
77
+ # RepResNet bottleneck
78
+ def __init__(self, in_channels, out_channels,
79
+ groups=1, act=nn.ReLU(), ratio=0.25):
80
+
81
+ super().__init__()
82
+
83
+ h_channels = int(in_channels * ratio)
84
+ self.cv1 = Conv(in_channels, h_channels, 1, 1, act=act)
85
+ self.cv2 = RepConv(h_channels, h_channels, 3, 1, groups=groups, act=act)
86
+ self.cv3 = Conv(h_channels, out_channels, 1, 1, act=act)
87
+
88
+ def forward(self, x):
89
+ return x + self.cv3(self.cv2(self.cv1(x)))
90
+
91
+
92
+ class ConvBlock(nn.Module):
93
+ # ConvBlock
94
+ def __init__(self, in_channels,
95
+ repeat=1, act=nn.ReLU(), ratio=1.0):
96
+
97
+ super().__init__()
98
+
99
+ h_channels = int(in_channels * ratio)
100
+ self.cv1 = Conv(in_channels, in_channels, 3, 1, act=act) if repeat == 1 else Conv(in_channels, h_channels, 3, 1, act=act)
101
+ self.cb = nn.Sequential(*(Conv(in_channels, in_channels, 3, 1, act=act) for _ in range(repeat-2))) if repeat > 2 else nn.Identity()
102
+ self.cv2 = nn.Identity() if repeat == 1 else Conv(h_channels, in_channels, 3, 1, act=act)
103
+
104
+ def forward(self, x):
105
+ return self.cv2(self.cb(self.cv1(x)))
106
+
107
+
108
+ class RepConvBlock(nn.Module):
109
+ # ConvBlock
110
+ def __init__(self, in_channels,
111
+ repeat=1, act=nn.ReLU(), ratio=1.0):
112
+
113
+ super().__init__()
114
+
115
+ h_channels = int(in_channels * ratio)
116
+ self.cv1 = Conv(in_channels, in_channels, 3, 1, act=act) if repeat == 1 else RepConv(in_channels, h_channels, 3, 1, act=act)
117
+ self.cb = nn.Sequential(*(RepConv(in_channels, in_channels, 3, 1, act=act) for _ in range(repeat-2))) if repeat > 2 else nn.Identity()
118
+ self.cv2 = nn.Identity() if repeat == 1 else Conv(h_channels, in_channels, 3, 1, act=act)
119
+
120
+ def forward(self, x):
121
+ return self.cv2(self.cb(self.cv1(x)))
122
+
123
+
124
+ class ResConvBlock(nn.Module):
125
+ # ResConvBlock
126
+ def __init__(self, in_channels,
127
+ repeat=1, act=nn.ReLU(), ratio=1.0):
128
+
129
+ super().__init__()
130
+
131
+ h_channels = int(in_channels * ratio)
132
+ self.cv1 = Conv(in_channels, in_channels, 3, 1, act=act) if repeat == 1 else Conv(in_channels, h_channels, 3, 1, act=act)
133
+ self.cb = nn.Sequential(*(Conv(in_channels, in_channels, 3, 1, act=act) for _ in range(repeat-2))) if repeat > 2 else nn.Identity()
134
+ self.cv2 = nn.Identity() if repeat == 1 else Conv(h_channels, in_channels, 3, 1, act=act)
135
+
136
+ def forward(self, x):
137
+ return x + self.cv2(self.cb(self.cv1(x)))
138
+
139
+
140
+ class ResRepConvBlock(nn.Module):
141
+ # ResConvBlock
142
+ def __init__(self, in_channels,
143
+ repeat=1, act=nn.ReLU(), ratio=1.0):
144
+
145
+ super().__init__()
146
+
147
+ h_channels = int(in_channels * ratio)
148
+ self.cv1 = Conv(in_channels, in_channels, 3, 1, act=act) if repeat == 1 else RepConv(in_channels, h_channels, 3, 1, act=act)
149
+ self.cb = nn.Sequential(*(RepConv(in_channels, in_channels, 3, 1, act=act) for _ in range(repeat-2))) if repeat > 2 else nn.Identity()
150
+ self.cv2 = nn.Identity() if repeat == 1 else Conv(h_channels, in_channels, 3, 1, act=act)
151
+
152
+ def forward(self, x):
153
+ return x + self.cv2(self.cb(self.cv1(x)))
154
+
155
+
156
+ # Darknet
157
+
158
+ class Dark(nn.Module):
159
+ # DarkNet bottleneck
160
+ def __init__(self, in_channels, out_channels,
161
+ groups=1, act=nn.ReLU(), ratio=0.5):
162
+
163
+ super().__init__()
164
+
165
+ h_channels = int(in_channels * ratio)
166
+ self.cv1 = Conv(in_channels, h_channels, 1, 1, act=act)
167
+ self.cv2 = Conv(h_channels, out_channels, 3, 1, groups=groups, act=act)
168
+
169
+ def forward(self, x):
170
+ return x + self.cv2(self.cv1(x))
171
+
172
+
173
+ class RepDark(nn.Module):
174
+ # RepDarkNet bottleneck
175
+ def __init__(self, in_channels, out_channels,
176
+ groups=1, act=nn.ReLU(), ratio=0.5):
177
+
178
+ super().__init__()
179
+
180
+ h_channels = int(in_channels * ratio)
181
+ self.cv1 = RepConv(in_channels, h_channels, 3, 1, groups=groups, act=act)
182
+ self.cv2 = Conv(h_channels, out_channels, 1, 1, act=act)
183
+
184
+ def forward(self, x):
185
+ return x + self.cv2(self.cv1(x))
186
+
187
+
188
+ # CSPNet
189
+
190
+ class CSP(nn.Module):
191
+ # CSPNet
192
+ def __init__(self, in_channels, out_channels,
193
+ repeat=1, cb_repeat=2, act=nn.ReLU(), ratio=1.0):
194
+
195
+ super().__init__()
196
+
197
+ h_channels = in_channels // 2
198
+ self.cv1 = Conv(in_channels, in_channels, 1, 1, act=act)
199
+ self.cb = nn.Sequential(*(ResConvBlock(h_channels, act=act, repeat=cb_repeat) for _ in range(repeat)))
200
+ self.cv2 = Conv(2 * h_channels, out_channels, 1, 1, act=act)
201
+
202
+ def forward(self, x):
203
+
204
+ y = list(self.cv1(x).chunk(2, 1))
205
+
206
+ return self.cv2(torch.cat((self.cb(y[0]), y[1]), 1))
207
+
208
+
209
+ class CSPDark(nn.Module):
210
+ # CSPNet
211
+ def __init__(self, in_channels, out_channels,
212
+ repeat=1, groups=1, act=nn.ReLU(), ratio=1.0):
213
+
214
+ super().__init__()
215
+
216
+ h_channels = in_channels // 2
217
+ self.cv1 = Conv(in_channels, in_channels, 1, 1, act=act)
218
+ self.cb = nn.Sequential(*(Dark(h_channels, h_channels, groups=groups, act=act, ratio=ratio) for _ in range(repeat)))
219
+ self.cv2 = Conv(2 * h_channels, out_channels, 1, 1, act=act)
220
+
221
+ def forward(self, x):
222
+
223
+ y = list(self.cv1(x).chunk(2, 1))
224
+
225
+ return self.cv2(torch.cat((self.cb(y[0]), y[1]), 1))
226
+
227
+
228
+ # ELAN
229
+
230
+ class ELAN(nn.Module):
231
+ # ELAN
232
+ def __init__(self, in_channels, out_channels, med_channels,
233
+ elan_repeat=2, cb_repeat=2, ratio=1.0):
234
+
235
+ super().__init__()
236
+
237
+ h_channels = med_channels // 2
238
+ self.cv1 = Conv(in_channels, med_channels, 1, 1)
239
+ self.cb = nn.ModuleList(ConvBlock(h_channels, repeat=cb_repeat, ratio=ratio) for _ in range(elan_repeat))
240
+ self.cv2 = Conv((2+elan_repeat) * h_channels, out_channels, 1, 1)
241
+
242
+ def forward(self, x):
243
+
244
+ y = list(self.cv1(x).chunk(2, 1))
245
+ y.extend((m(y[-1])) for m in self.cb)
246
+
247
+ return self.cv2(torch.cat(y, 1))
248
+
249
+
250
+ class CSPELAN(nn.Module):
251
+ # ELAN
252
+ def __init__(self, in_channels, out_channels, med_channels,
253
+ elan_repeat=2, cb_repeat=2, ratio=1.0):
254
+
255
+ super().__init__()
256
+
257
+ h_channels = med_channels // 2
258
+ self.cv1 = Conv(in_channels, med_channels, 1, 1)
259
+ self.cb = nn.ModuleList(CSP(h_channels, h_channels, repeat=cb_repeat, ratio=ratio) for _ in range(elan_repeat))
260
+ self.cv2 = Conv((2+elan_repeat) * h_channels, out_channels, 1, 1)
261
+
262
+ def forward(self, x):
263
+
264
+ y = list(self.cv1(x).chunk(2, 1))
265
+ y.extend((m(y[-1])) for m in self.cb)
266
+
267
+ return self.cv2(torch.cat(y, 1))