RepVGG
【摘要】
# !/usr/bin/env python
# -- coding: utf-8 --
import numpy as np
import torch
import torch.nn as nn
...
# !/usr/bin/env python
# -- coding: utf-8 --
import numpy as np
import torch
import torch.nn as nn
def Conv1x1BN(in_channels,out_channels, stride=1, groups=1, bias=False):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, groups=groups, bias=bias),
nn.BatchNorm2d(out_channels)
)
def Conv3x3BN(in_channels,out_channels, stride=1, groups=1, bias=False):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=3,stride=stride,padding=1, groups=groups, bias=bias),
nn.BatchNorm2d(out_channels)
)
class RepVGGBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1, deploy=False):
super(RepVGGBlock, self).__init__()
self.deploy = deploy
if self.deploy:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, dilation=1, groups=groups, bias=True)
else:
self.conv1 = Conv3x3BN(in_channels, out_channels, stride=stride, groups=groups, bias=False)
self.conv2 = Conv1x1BN(in_channels, out_channels, stride=stride, groups=groups, bias=False)
self.identity = nn.BatchNorm2d(in_channels) if out_channels == in_channels and stride == 1 else None
self.act = nn.ReLU(inplace=True)
def forward(self, x):
if self.deploy:
return self.act(self.conv(x))
if self.identity is None:
return self.act(self.conv1(x) + self.conv2(x))
else:
return self.act(self.conv1(x) + self.conv2(x) + self.identity(x))
class RepVGG(nn.Module):
def __init__(self, block_nums, width_multiplier=None, group=1, num_classes=1000, deploy=False):
super(RepVGG, self).__init__()
self.deploy = deploy
self.group = group
assert len(width_multiplier) == 4
self.stage0 = RepVGGBlock(in_channels=3,out_channels=min(64, int(64 * width_multiplier[0])), stride=2, deploy=self.deploy)
self.cur_layer_idx = 1
self.stage1 = self._make_layers(in_channels=min(64, int(64 * width_multiplier[0])), out_channels= int(64 * width_multiplier[0]), stride=2, block_num=block_nums[0])
self.stage2 = self._make_layers(in_channels=int(64 * width_multiplier[0]), out_channels=int(128 * width_multiplier[1]), stride=2, block_num=block_nums[1])
self.stage3 = self._make_layers(in_channels=int(128 * width_multiplier[1]), out_channels=int(256 * width_multiplier[2]), stride=2, block_num=block_nums[2])
self.stage4 = self._make_layers(in_channels=int(256 * width_multiplier[2]), out_channels=int(512 * width_multiplier[3]), stride=2, block_num=block_nums[3])
self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.linear = nn.Linear(int(512 * width_multiplier[3]), num_classes)
self._init_params()
def _make_layers(self, in_channels, out_channels, stride, block_num):
layers = []
layers.append(RepVGGBlock(in_channels,out_channels, stride=stride, groups=self.group if self.cur_layer_idx%2==0 else 1, deploy=self.deploy))
self.cur_layer_idx += 1
for i in range(block_num):
layers.append(RepVGGBlock(out_channels,out_channels, stride=1, groups=self.group if self.cur_layer_idx%2==0 else 1, deploy=self.deploy))
self.cur_layer_idx += 1
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.stage0(x)
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
out = self.linear(x)
return out
def RepVGG_A0(deploy=False):
return RepVGG(block_nums=[2, 4, 14, 1], num_classes=1000,
width_multiplier=[0.75, 0.75, 0.75, 2.5], group=1, deploy=deploy)
def RepVGG_A1(deploy=False):
return RepVGG(block_nums=[2, 4, 14, 1], num_classes=1000,
width_multiplier=[1, 1, 1, 2.5], group=1, deploy=deploy)
def RepVGG_A2(deploy=False):
return RepVGG(block_nums=[2, 4, 14, 1], num_classes=1000,
width_multiplier=[1.5, 1.5, 1.5, 2.75], group=1, deploy=deploy)
def RepVGG_B0(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[1, 1, 1, 2.5], group=1, deploy=deploy)
def RepVGG_B1(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2, 2, 2, 4], group=1, deploy=deploy)
def RepVGG_B1g2(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2, 2, 2, 4], group=2, deploy=deploy)
def RepVGG_B1g4(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2, 2, 2, 4], group=4, deploy=deploy)
def RepVGG_B2(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2.5, 2.5, 2.5, 5], group=1, deploy=deploy)
def RepVGG_B2g2(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2.5, 2.5, 2.5, 5], group=2, deploy=deploy)
def RepVGG_B2g4(deploy=False):
return RepVGG(block_nums=[4, 6, 16, 1], num_classes=1000,
width_multiplier=[2.5, 2.5, 2.5, 5], group=4, deploy=deploy)
def RepVGG_B3(deploy=False):
return RepVGG(block_nums=[1, 4, 6, 16, 1], num_classes=1000,
width_multiplier=[3, 3, 3, 5], group=1, deploy=deploy)
def RepVGG_B3g2(deploy=False):
return RepVGG(block_nums=[1, 4, 6, 16, 1], num_classes=1000,
width_multiplier=[3, 3, 3, 5], group=2, deploy=deploy)
def RepVGG_B3g4(deploy=False):
return RepVGG(block_nums=[1, 4, 6, 16, 1], num_classes=1000,
width_multiplier=[3, 3, 3, 5], group=4, deploy=deploy)
if __name__ == '__main__':
model = RepVGG_A1()
print(model)
input = torch.randn(1,3,224,224)
out = model(input)
print(out.shape)
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
文章来源: wanghao.blog.csdn.net,作者:AI浩,版权归原作者所有,如需转载,请联系作者。
原文链接:wanghao.blog.csdn.net/article/details/121573980
【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱:
cloudbbs@huaweicloud.com
- 点赞
- 收藏
- 关注作者
评论(0)