复现ResNeXt50和Reset50 pytorch代码

举报
AI浩 发表于 2021/12/23 01:37:21 2021/12/23
【摘要】 Reset50和ResNeXt50网络图 Reset50 101 152 pytorch代码复现 import torchimport torch.nn as nnimport torchvisionimport numpy as np print("PyTorch Version: ",torch.__version__)pr...

Reset50和ResNeXt50网络图

Reset50 101 152 pytorch代码复现


  
  1. import torch
  2. import torch.nn as nn
  3. import torchvision
  4. import numpy as np
  5. print("PyTorch Version: ",torch.__version__)
  6. print("Torchvision Version: ",torchvision.__version__)
  7. __all__ = ['ResNet50', 'ResNet101','ResNet152']
  8. def Conv1(in_planes, places, stride=2):
  9. return nn.Sequential(
  10. nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=7,stride=stride,padding=3, bias=False),
  11. nn.BatchNorm2d(places),
  12. nn.ReLU(inplace=True),
  13. nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  14. )
  15. class Bottleneck(nn.Module):
  16. def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 4):
  17. super(Bottleneck,self).__init__()
  18. self.expansion = expansion
  19. self.downsampling = downsampling
  20. self.bottleneck = nn.Sequential(
  21. nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
  22. nn.BatchNorm2d(places),
  23. nn.ReLU(inplace=True),
  24. nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
  25. nn.BatchNorm2d(places),
  26. nn.ReLU(inplace=True),
  27. nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
  28. nn.BatchNorm2d(places*self.expansion),
  29. )
  30. if self.downsampling:
  31. self.downsample = nn.Sequential(
  32. nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
  33. nn.BatchNorm2d(places*self.expansion)
  34. )
  35. self.relu = nn.ReLU(inplace=True)
  36. def forward(self, x):
  37. residual = x
  38. out = self.bottleneck(x)
  39. if self.downsampling:
  40. residual = self.downsample(x)
  41. out += residual
  42. out = self.relu(out)
  43. return out
  44. class ResNet(nn.Module):
  45. def __init__(self,blocks, num_classes=1000, expansion = 4):
  46. super(ResNet,self).__init__()
  47. self.expansion = expansion
  48. self.conv1 = Conv1(in_planes = 3, places= 64)
  49. self.layer1 = self.make_layer(in_places = 64, places= 64, block=blocks[0], stride=1)
  50. self.layer2 = self.make_layer(in_places = 256,places=128, block=blocks[1], stride=2)
  51. self.layer3 = self.make_layer(in_places=512,places=256, block=blocks[2], stride=2)
  52. self.layer4 = self.make_layer(in_places=1024,places=512, block=blocks[3], stride=2)
  53. self.avgpool = nn.AvgPool2d(7, stride=1)
  54. self.fc = nn.Linear(2048,num_classes)
  55. for m in self.modules():
  56. if isinstance(m, nn.Conv2d):
  57. nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
  58. elif isinstance(m, nn.BatchNorm2d):
  59. nn.init.constant_(m.weight, 1)
  60. nn.init.constant_(m.bias, 0)
  61. def make_layer(self, in_places, places, block, stride):
  62. layers = []
  63. layers.append(Bottleneck(in_places, places,stride, downsampling =True))
  64. for i in range(1, block):
  65. layers.append(Bottleneck(places*self.expansion, places))
  66. return nn.Sequential(*layers)
  67. def forward(self, x):
  68. x = self.conv1(x)
  69. x = self.layer1(x)
  70. x = self.layer2(x)
  71. x = self.layer3(x)
  72. x = self.layer4(x)
  73. x = self.avgpool(x)
  74. x = x.view(x.size(0), -1)
  75. x = self.fc(x)
  76. return x
  77. def ResNet50():
  78. return ResNet([3, 4, 6, 3])
  79. def ResNet101():
  80. return ResNet([3, 4, 23, 3])
  81. def ResNet152():
  82. return ResNet([3, 8, 36, 3])
  83. if __name__=='__main__':
  84. #model = torchvision.models.resnet50()
  85. model = ResNet50()
  86. print(model)
  87. input = torch.randn(1, 3, 224, 224)
  88. out = model(input)
  89. print(out.shape)

ResNeXt50 pytorch代码 


  
  1. import torch
  2. import torch.nn as nn
  3. class Block(nn.Module):
  4. def __init__(self,in_channels, out_channels, stride=1, is_shortcut=False):
  5. super(Block,self).__init__()
  6. self.relu = nn.ReLU(inplace=True)
  7. self.is_shortcut = is_shortcut
  8. self.conv1 = nn.Sequential(
  9. nn.Conv2d(in_channels, out_channels // 2, kernel_size=1,stride=stride,bias=False),
  10. nn.BatchNorm2d(out_channels // 2),
  11. nn.ReLU()
  12. )
  13. self.conv2 = nn.Sequential(
  14. nn.Conv2d(out_channels // 2, out_channels // 2, kernel_size=3, stride=1, padding=1, groups=32,
  15. bias=False),
  16. nn.BatchNorm2d(out_channels // 2),
  17. nn.ReLU()
  18. )
  19. self.conv3 = nn.Sequential(
  20. nn.Conv2d(out_channels // 2, out_channels, kernel_size=1,stride=1,bias=False),
  21. nn.BatchNorm2d(out_channels),
  22. )
  23. if is_shortcut:
  24. self.shortcut = nn.Sequential(
  25. nn.Conv2d(in_channels,out_channels,kernel_size=1,stride=stride,bias=1),
  26. nn.BatchNorm2d(out_channels)
  27. )
  28. def forward(self, x):
  29. x_shortcut = x
  30. x = self.conv1(x)
  31. x = self.conv2(x)
  32. x = self.conv3(x)
  33. if self.is_shortcut:
  34. x_shortcut = self.shortcut(x_shortcut)
  35. x = x + x_shortcut
  36. x = self.relu(x)
  37. return x
  38. class Resnext(nn.Module):
  39. def __init__(self,num_classes,layer=[3,4,6,3]):
  40. super(Resnext,self).__init__()
  41. self.conv1 = nn.Sequential(
  42. nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
  43. nn.BatchNorm2d(64),
  44. nn.ReLU(),
  45. nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  46. )
  47. self.conv2 = self._make_layer(64,256,1,num=layer[0])
  48. self.conv3 = self._make_layer(256,512,2,num=layer[1])
  49. self.conv4 = self._make_layer(512,1024,2,num=layer[2])
  50. self.conv5 = self._make_layer(1024,2048,2,num=layer[3])
  51. self.global_average_pool = nn.AvgPool2d(kernel_size=7, stride=1)
  52. self.fc = nn.Linear(2048,num_classes)
  53. def forward(self, x):
  54. x = self.conv1(x)
  55. x = self.conv2(x)
  56. x = self.conv3(x)
  57. x = self.conv4(x)
  58. x = self.conv5(x)
  59. x = self.global_average_pool(x)
  60. x = torch.flatten(x,1)
  61. x = self.fc(x)
  62. return x
  63. def _make_layer(self,in_channels,out_channels,stride,num):
  64. layers = []
  65. block_1=Block(in_channels, out_channels,stride=stride,is_shortcut=True)
  66. layers.append(block_1)
  67. for i in range(1, num):
  68. layers.append(Block(out_channels,out_channels,stride=1,is_shortcut=False))
  69. return nn.Sequential(*layers)
  70. net = Resnext(10)
  71. x = torch.rand((10, 3, 224, 224))
  72. for name,layer in net.named_children():
  73. if name != "fc":
  74. x = layer(x)
  75. print(name, 'output shaoe:', x.shape)
  76. else:
  77. x = x.view(x.size(0), -1)
  78. x = layer(x)
  79. print(name, 'output shaoe:', x.shape)

文章来源: wanghao.blog.csdn.net,作者:AI浩,版权归原作者所有,如需转载,请联系作者。

原文链接:wanghao.blog.csdn.net/article/details/115477386

【版权声明】本文为华为云社区用户转载文章,如果您发现本社区中有涉嫌抄袭的内容,欢迎发送邮件进行举报,并提供相关证据,一经查实,本社区将立刻删除涉嫌侵权内容,举报邮箱: cloudbbs@huaweicloud.com
  • 点赞
  • 收藏
  • 关注作者

评论(0

0/1000
抱歉,系统识别当前为高风险访问,暂不支持该操作

全部回复

上滑加载中

设置昵称

在此一键设置昵称,即可参与社区互动!

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。

*长度不超过10个汉字或20个英文字符,设置后3个月内不可修改。