wideresnet.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. # https://github.com/uoguelph-mlrg/Cutout
  2. import math
  3. import torch
  4. import torch.nn as nn
  5. import torch.nn.functional as F
  6. class BasicBlock(nn.Module):
  7. def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
  8. super().__init__()
  9. self.bn1 = nn.BatchNorm2d(in_planes)
  10. self.relu1 = nn.ReLU(inplace=True)
  11. self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
  12. padding=1, bias=False)
  13. self.bn2 = nn.BatchNorm2d(out_planes)
  14. self.relu2 = nn.ReLU(inplace=True)
  15. self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
  16. padding=1, bias=False)
  17. self.droprate = dropRate
  18. self.equalInOut = (in_planes == out_planes)
  19. self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
  20. padding=0, bias=False) or None
  21. def forward(self, x):
  22. if not self.equalInOut: x = self.relu1(self.bn1(x))
  23. else: out = self.relu1(self.bn1(x))
  24. out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
  25. if self.droprate > 0:
  26. out = F.dropout(out, p=self.droprate, training=self.training)
  27. out = self.conv2(out)
  28. return torch.add(x if self.equalInOut else self.convShortcut(x), out)
  29. class NetworkBlock(nn.Module):
  30. def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
  31. super().__init__()
  32. self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
  33. def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
  34. layers = []
  35. for i in range(nb_layers):
  36. layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
  37. return nn.Sequential(*layers)
  38. def forward(self, x): return self.layer(x)
  39. class WideResNet(nn.Module):
  40. def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
  41. super().__init__()
  42. nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
  43. assert((depth - 4) % 6 == 0)
  44. n = (depth - 4) // 6
  45. block = BasicBlock
  46. # 1st conv before any network block
  47. self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
  48. padding=1, bias=False)
  49. self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
  50. self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
  51. self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
  52. self.bn1 = nn.BatchNorm2d(nChannels[3])
  53. self.relu = nn.ReLU(inplace=True)
  54. self.fc = nn.Linear(nChannels[3], num_classes)
  55. self.nChannels = nChannels[3]
  56. for m in self.modules():
  57. if isinstance(m, nn.Conv2d):
  58. n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
  59. m.weight.data.normal_(0, math.sqrt(2. / n))
  60. elif isinstance(m, nn.BatchNorm2d):
  61. m.weight.data.fill_(1)
  62. m.bias.data.zero_()
  63. elif isinstance(m, nn.Linear): m.bias.data.zero_()
  64. def forward(self, x):
  65. out = self.conv1(x)
  66. out = self.block1(out)
  67. out = self.block2(out)
  68. out = self.block3(out)
  69. out = self.relu(self.bn1(out))
  70. out = F.adaptive_avg_pool2d(out, 1)
  71. out = out.view(-1, self.nChannels)
  72. return self.fc(out)