CNN_Layers.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. import torch
  2. # from torch import add
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. import torch.optim as optim
  6. import copy
  7. class Conv_elu_maxpool_drop(nn.Module):
  8. def __init__(self, input_size, output_size, kernel_size, prps, stride=(1,1,1), pool = False, sep_conv = False, padding = 0):
  9. super(Conv_elu_maxpool_drop, self).__init__()
  10. self.input_size = input_size
  11. self.output_size = output_size
  12. self.pool_status = pool
  13. self.sep_conv_status = sep_conv
  14. # LAYERS
  15. # TODO Check here, how many groups? just 2? or groups=input_size?
  16. if(self.sep_conv_status):
  17. self.sepConvDepthwise = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  18. padding=padding, dilation=prps['dilation'], groups=2, bias=prps["bias"], padding_mode=prps["padding_mode"])
  19. self.conv = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  20. padding=padding, groups=1, bias=prps["bias"], padding_mode=prps["padding_mode"])
  21. self.normalization = nn.BatchNorm3d(output_size)
  22. self.elu = nn.ELU()
  23. self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=0)
  24. self.dropout = nn.Dropout(p=prps['drop_rate'])
  25. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  26. self.bias = nn.Parameter(torch.randn(output_size))
  27. def forward(self, x):
  28. # print(f"Forward Input: {x.size()}")
  29. if(self.sep_conv_status): x = self.sepConvDepthwise(x)
  30. else: x = self.conv(x)
  31. x = self.normalization(x)
  32. x = self.elu(x)
  33. if(self.pool_status): self.maxpool(x)
  34. x = self.dropout(x)
  35. # return torch.matmul(x, self.weight) + self.bias
  36. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  37. class Mid_flow(nn.Module):
  38. def __init__(self, input_size, output_size, prps):
  39. super(Mid_flow, self).__init__()
  40. self.input_size = input_size
  41. self.output_size = output_size
  42. # LAYERS
  43. self.conv = Conv_elu_maxpool_drop(input_size, output_size, kernel_size=(3,3,3), stride=(1,1,1), sep_conv=True, padding='same', prps=prps)
  44. self.elu = nn.ELU()
  45. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  46. self.bias = nn.Parameter(torch.randn(output_size))
  47. def forward(self, x):
  48. # print("AT MIDFLOW!")
  49. residual = x.clone()
  50. # print(f"Input: {x.size()}")
  51. x = self.conv(x)
  52. x = self.conv(x)
  53. x = self.conv(x)
  54. # print(f"Output: {x.size()}")
  55. x = torch.add(x, residual)
  56. x = self.elu(x)
  57. # return torch.matmul(x, self.weight) + self.bias # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  58. return x
  59. class Fc_elu_drop(nn.Module):
  60. def __init__(self, input_size, output_size, softmax, prps):
  61. super(Fc_elu_drop, self).__init__()
  62. self.input_size = input_size
  63. self.output_size = output_size
  64. # LAYERS
  65. self.linear = nn.Linear(input_size, output_size)
  66. self.normalization = nn.BatchNorm1d(output_size)
  67. self.elu = nn.ELU()
  68. self.dropout = nn.Dropout(p=prps['drop_rate'])
  69. self.softmax_status = softmax
  70. if(softmax): self.softmax = nn.Softmax()
  71. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  72. self.bias = nn.Parameter(torch.randn(output_size))
  73. def forward(self, x):
  74. # print("AT FC")
  75. # print(f"Forward Input: {x.size()}")
  76. x = self.linear(x)
  77. # print(f"After Linear: {x.size()}")
  78. x = self.normalization(x)
  79. x = self.elu(x)
  80. x = self.dropout(x)
  81. if(self.softmax_status): x = self.softmax(x)
  82. # return torch.matmul(x, self.weight) + self.bias
  83. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?