VarGFaceNet

举报
AI浩 发表于 2021/12/23 00:00:44 2021/12/23
【摘要】 import torch import torch.nn as nn import torchvision class SqueezeAndExcite(nn.Module): def __i...
import torch
import torch.nn as nn
import torchvision


class SqueezeAndExcite(nn.Module):
    def __init__(self, in_channels, out_channels, divide=4):
        super(SqueezeAndExcite, self).__init__()
        mid_channels = in_channels // divide
        self.pool = nn.AdaptiveAvgPool2d(1)
        self.SEblock = nn.Sequential(
            nn.Linear(in_features=in_channels, out_features=mid_channels),
            nn.ReLU6(inplace=True),
            nn.Linear(in_features=mid_channels, out_features=out_channels),
            nn.ReLU6(inplace=True),
        )

    def forward(self, x):
        b, c, h, w = x.size()
        out = self.pool(x)
        out = out.view(b, -1)
        out = self.SEblock(out)
        out = out.view(b, c, 1, 1)
        return out * x


def Conv1x1BNReLU(in_channels,out_channels):
    return nn.Sequential(
            nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU6(inplace=True)
        )

def Conv3x3BNReLU(in_channels,out_channels,stride):
    return nn.Sequential(
            nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU6(inplace=True)
        )


def VarGConv(in_channels,out_channels,kernel_size,stride,S):
    return nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=kernel_size // 2, groups=in_channels // S,
                  bias=False),
        nn.BatchNorm2d(out_channels),
        nn.PReLU(),
    )

def VarGPointConv(in_channels, out_channels,stride,S,isRelu):
    return nn.Sequential(
        nn.Conv2d(in_channels, out_channels, 1, stride, padding=0, groups=in_channels // S,
                  bias=False),
        nn.BatchNorm2d(out_channels),
        nn.PReLU() if isRelu else nn.Sequential(),
    )

class VarGBlock_S1(nn.Module):
    def __init__(self, in_plances,kernel_size, stride=1, S=8):
        super(VarGBlock_S1, self).__init__()
        plances = 2 * in_plances
        self.varGConv1 = VarGConv(in_plances, plances, kernel_size, stride, S)
        self.varGPointConv1 = VarGPointConv(plances, in_plances, stride, S, isRelu=True)
        self.varGConv2 = VarGConv(in_plances, plances, kernel_size, stride, S)
        self.varGPointConv2 = VarGPointConv(plances, in_plances, stride, S, isRelu=False)
        self.se =  SqueezeAndExcite(in_plances,in_plances)
        self.prelu = nn.PReLU()

    def forward(self, x):
        out = x
        x = self.varGPointConv1(self.varGConv1(x))
        x = self.varGPointConv2(self.varGConv2(x))
        x = self.se(x)
        out += x
        return self.prelu(out)

class VarGBlock_S2(nn.Module):
    def __init__(self, in_plances,kernel_size, stride=2, S=8):
        super(VarGBlock_S2, self).__init__()
        plances = 2 * in_plances

        self.varGConvBlock_branch1 = nn.Sequential(
            VarGConv(in_plances, plances, kernel_size, stride, S),
            VarGPointConv(plances, plances, 1, S, isRelu=True),
        )
        self.varGConvBlock_branch2 = nn.Sequential(
            VarGConv(in_plances, plances, kernel_size, stride, S),
            VarGPointConv(plances, plances, 1, S, isRelu=True),
        )

        self.varGConvBlock_3 = nn.Sequential(
            VarGConv(plances, plances*2, kernel_size, 1, S),
            VarGPointConv(plances*2, plances, 1, S, isRelu=False),
        )
        self.shortcut = nn.Sequential(
            VarGConv(in_plances, plances, kernel_size, stride, S),
            VarGPointConv(plances, plances, 1, S, isRelu=False),
        )
        self.prelu = nn.PReLU()

    def forward(self, x):
        out = self.shortcut(x)
        x1 = x2 = x
        x1= self.varGConvBlock_branch1(x1)
        x2 = self.varGConvBlock_branch2(x2)
        x_new = x1 + x2
        x_new = self.varGConvBlock_3(x_new)
        out += x_new
        return self.prelu(out)


class HeadBlock(nn.Module):
    def __init__(self, in_plances, kernel_size, S=8):
        super(HeadBlock, self).__init__()

        self.varGConvBlock = nn.Sequential(
            VarGConv(in_plances, in_plances, kernel_size, 2, S),
            VarGPointConv(in_plances, in_plances, 1, S, isRelu=True),
            VarGConv(in_plances, in_plances, kernel_size, 1, S),
            VarGPointConv(in_plances, in_plances, 1, S, isRelu=False),
         )

        self.shortcut = nn.Sequential(
            VarGConv(in_plances, in_plances, kernel_size, 2, S),
            VarGPointConv(in_plances, in_plances, 1, S, isRelu=False),
        )

    def forward(self, x):
        out = self.shortcut(x)
        x = self.varGConvBlock(x)
        out += x
        return out


class TailEmbedding(nn.Module):
    def __init__(self, in_plances, plances=512, S=8):
        super(TailEmbedding, self).__init__()
        self.embedding = nn.Sequential(
            Conv1x1BNReLU(in_plances, 1024),
            nn.Conv2d(1024, 1024, 7, 1, padding=0, groups=1024 // S,
                      bias=False),
            nn.Conv2d(1024, 512, 1, 1, padding=0, groups=512, bias=False),
        )

        self.fc = nn.Linear(in_features=512,out_features=plances)

    def forward(self, x):
        x = self.embedding(x)
        x = x.view(x.size(0),-1)
        out = self.fc(x)
        return out


class VarGFaceNet(nn.Module):
    def __init__(self, num_classes=512):
        super(VarGFaceNet, self).__init__()
        S = 8

        self.conv1 = Conv3x3BNReLU(3, 40, 1)
        self.head = HeadBlock(40,3)
        self.stage2 = nn.Sequential(
            VarGBlock_S2(40,3,2),
            VarGBlock_S1(80, 3, 1),
            VarGBlock_S1(80, 3, 1),
        )
        self.stage3 = nn.Sequential(
            VarGBlock_S2(80, 3, 2),
            VarGBlock_S1(160, 3, 1),
            VarGBlock_S1(160, 3, 1),
            VarGBlock_S1(160, 3, 1),
            VarGBlock_S1(160, 3, 1),
            VarGBlock_S1(160, 3, 1),
            VarGBlock_S1(160, 3, 1),
        )
        self.stage4 = nn.Sequential(
            VarGBlock_S2(160, 3, 2),
            VarGBlock_S1(320, 3, 1),
            VarGBlock_S1(320, 3, 1),
            VarGBlock_S1(320, 3, 1),
        )

        self.tail = TailEmbedding(320,num_classes)

    def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.conv1(x)
        x = self.head(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)
        out= self.tail(x)
        return out


if __name__ == '__main__':
    model = VarGFaceNet()
    print(model)

    input = torch.randn(1, 3, 112, 112)
    out = model(input)
    print(out.shape)

  
 
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209

文章来源: wanghao.blog.csdn.net,作者:AI浩,版权归原作者所有,如需转载,请联系作者。

原文链接:wanghao.blog.csdn.net/article/details/121607214

【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。