You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

80 lines
2.7 KiB

  1. import torch
  2. import torch.nn as nn
  3. import torchvision.transforms as transforms
  4. from torch.autograd import Function
  5. from .binarized_modules import BinarizeLinear,BinarizeConv2d
  6. class VGG_Cifar10(nn.Module):
  7. def __init__(self, num_classes=1000):
  8. super(VGG_Cifar10, self).__init__()
  9. self.infl_ratio=3;
  10. self.features = nn.Sequential(
  11. BinarizeConv2d(3, 128*self.infl_ratio, kernel_size=3, stride=1, padding=1,
  12. bias=True),
  13. nn.BatchNorm2d(128*self.infl_ratio),
  14. nn.Hardtanh(inplace=True),
  15. BinarizeConv2d(128*self.infl_ratio, 128*self.infl_ratio, kernel_size=3, padding=1, bias=True),
  16. nn.MaxPool2d(kernel_size=2, stride=2),
  17. nn.BatchNorm2d(128*self.infl_ratio),
  18. nn.Hardtanh(inplace=True),
  19. BinarizeConv2d(128*self.infl_ratio, 256*self.infl_ratio, kernel_size=3, padding=1, bias=True),
  20. nn.BatchNorm2d(256*self.infl_ratio),
  21. nn.Hardtanh(inplace=True),
  22. BinarizeConv2d(256*self.infl_ratio, 256*self.infl_ratio, kernel_size=3, padding=1, bias=True),
  23. nn.MaxPool2d(kernel_size=2, stride=2),
  24. nn.BatchNorm2d(256*self.infl_ratio),
  25. nn.Hardtanh(inplace=True),
  26. BinarizeConv2d(256*self.infl_ratio, 512*self.infl_ratio, kernel_size=3, padding=1, bias=True),
  27. nn.BatchNorm2d(512*self.infl_ratio),
  28. nn.Hardtanh(inplace=True),
  29. BinarizeConv2d(512*self.infl_ratio, 512, kernel_size=3, padding=1, bias=True),
  30. nn.MaxPool2d(kernel_size=2, stride=2),
  31. nn.BatchNorm2d(512),
  32. nn.Hardtanh(inplace=True)
  33. )
  34. self.classifier = nn.Sequential(
  35. BinarizeLinear(512 * 4 * 4, 1024, bias=True),
  36. nn.BatchNorm1d(1024),
  37. nn.Hardtanh(inplace=True),
  38. #nn.Dropout(0.5),
  39. BinarizeLinear(1024, 1024, bias=True),
  40. nn.BatchNorm1d(1024),
  41. nn.Hardtanh(inplace=True),
  42. #nn.Dropout(0.5),
  43. BinarizeLinear(1024, num_classes, bias=True),
  44. nn.BatchNorm1d(num_classes, affine=False),
  45. nn.LogSoftmax()
  46. )
  47. self.regime = {
  48. 0: {'optimizer': 'Adam', 'betas': (0.9, 0.999),'lr': 5e-3},
  49. 40: {'lr': 1e-3},
  50. 80: {'lr': 5e-4},
  51. 100: {'lr': 1e-4},
  52. 120: {'lr': 5e-5},
  53. 140: {'lr': 1e-5}
  54. }
  55. def forward(self, x):
  56. x = self.features(x)
  57. x = x.view(-1, 512 * 4 * 4)
  58. x = self.classifier(x)
  59. return x
  60. def vgg_cifar10_binary(**kwargs):
  61. num_classes = kwargs.get( 'num_classes', 10)
  62. return VGG_Cifar10(num_classes)