Spaces:
Runtime error
Runtime error
File size: 3,949 Bytes
3094730 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import OptMultiConfig
from mmengine.model import BaseModule
from mmyolo.registry import MODELS
class ChannelAttention(BaseModule):
"""ChannelAttention.
Args:
channels (int): The input (and output) channels of the
ChannelAttention.
reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate
channel will be ``int(channels/ratio)``. Defaults to 16.
act_cfg (dict): Config dict for activation layer
Defaults to dict(type='ReLU').
"""
def __init__(self,
channels: int,
reduce_ratio: int = 16,
act_cfg: dict = dict(type='ReLU')):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(
ConvModule(
in_channels=channels,
out_channels=int(channels / reduce_ratio),
kernel_size=1,
stride=1,
conv_cfg=None,
act_cfg=act_cfg),
ConvModule(
in_channels=int(channels / reduce_ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=None,
act_cfg=None))
self.sigmoid = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
avgpool_out = self.fc(self.avg_pool(x))
maxpool_out = self.fc(self.max_pool(x))
out = self.sigmoid(avgpool_out + maxpool_out)
return out
class SpatialAttention(BaseModule):
"""SpatialAttention
Args:
kernel_size (int): The size of the convolution kernel in
SpatialAttention. Defaults to 7.
"""
def __init__(self, kernel_size: int = 7):
super().__init__()
self.conv = ConvModule(
in_channels=2,
out_channels=1,
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=None,
act_cfg=dict(type='Sigmoid'))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avg_out, max_out], dim=1)
out = self.conv(out)
return out
@MODELS.register_module()
class CBAM(BaseModule):
"""Convolutional Block Attention Module. arxiv link:
https://arxiv.org/abs/1807.06521v2.
Args:
in_channels (int): The input (and output) channels of the CBAM.
reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate
channel will be ``int(channels/ratio)``. Defaults to 16.
kernel_size (int): The size of the convolution kernel in
SpatialAttention. Defaults to 7.
act_cfg (dict): Config dict for activation layer in ChannelAttention
Defaults to dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
reduce_ratio: int = 16,
kernel_size: int = 7,
act_cfg: dict = dict(type='ReLU'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg)
self.channel_attention = ChannelAttention(
channels=in_channels, reduce_ratio=reduce_ratio, act_cfg=act_cfg)
self.spatial_attention = SpatialAttention(kernel_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
out = self.channel_attention(x) * x
out = self.spatial_attention(out) * out
return out
|