CNN_Layers.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. from torch import device, cuda
  2. import torch
  3. from torch import add
  4. import torch.nn as nn
  5. import torch.nn.functional as F
  6. import torch.optim as optim
  7. import copy
  8. class Conv_elu_maxpool_drop(nn.Module):
  9. def __init__(self, input_size, output_size, kernel_size, prps, stride=(1,1,1), pool = False, sep_conv = False, padding = 0):
  10. super(Conv_elu_maxpool_drop, self).__init__()
  11. self.input_size = input_size
  12. self.output_size = output_size
  13. self.pool_status = pool
  14. self.sep_conv_status = sep_conv
  15. # LAYERS
  16. # TODO Check here, how many groups? just 2? or groups=input_size?
  17. if(self.sep_conv_status):
  18. self.sepConvDepthwise = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  19. padding=padding, dilation=prps['dilation'], groups=2, bias=prps["bias"], padding_mode=prps["padding_mode"])
  20. self.conv = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  21. padding=padding, groups=1, bias=prps["bias"], padding_mode=prps["padding_mode"])
  22. self.normalization = nn.BatchNorm3d(output_size)
  23. self.elu = nn.ELU()
  24. self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=0)
  25. self.dropout = nn.Dropout(p=prps['drop_rate'])
  26. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  27. self.bias = nn.Parameter(torch.randn(output_size))
  28. def forward(self, x):
  29. # print(f"Forward Input: {x.size()}")
  30. if(self.sep_conv_status): x = self.sepConvDepthwise(x)
  31. else: x = self.conv(x)
  32. x = self.normalization(x)
  33. x = self.elu(x)
  34. if(self.pool_status): self.maxpool(x)
  35. x = self.dropout(x)
  36. # return torch.matmul(x, self.weight) + self.bias
  37. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  38. class Mid_flow(nn.Module):
  39. def __init__(self, input_size, output_size, prps):
  40. super(Mid_flow, self).__init__()
  41. self.input_size = input_size
  42. self.output_size = output_size
  43. # LAYERS
  44. self.conv = Conv_elu_maxpool_drop(input_size, output_size, kernel_size=(3,3,3), stride=(1,1,1), sep_conv=True, padding='same', prps=prps)
  45. self.elu = nn.ELU()
  46. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  47. self.bias = nn.Parameter(torch.randn(output_size))
  48. def forward(self, x):
  49. # print("AT MIDFLOW!")
  50. residual = x.clone()
  51. # print(f"Input: {x.size()}")
  52. x = self.conv(x)
  53. x = self.conv(x)
  54. x = self.conv(x)
  55. # print(f"Output: {x.size()}")
  56. x = add(x, residual)
  57. x = self.elu(x)
  58. # return torch.matmul(x, self.weight) + self.bias # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  59. return x
  60. class Fc_elu_drop(nn.Module):
  61. def __init__(self, input_size, output_size, softmax, prps):
  62. super(Fc_elu_drop, self).__init__()
  63. self.input_size = input_size
  64. self.output_size = output_size
  65. # LAYERS
  66. self.linear = nn.Linear(input_size, output_size)
  67. self.normalization = nn.BatchNorm1d(output_size)
  68. self.elu = nn.ELU()
  69. self.dropout = nn.Dropout(p=prps['drop_rate'])
  70. self.softmax_status = softmax
  71. if(softmax): self.softmax = nn.Softmax()
  72. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  73. self.bias = nn.Parameter(torch.randn(output_size))
  74. def forward(self, x):
  75. # print("AT FC")
  76. # print(f"Forward Input: {x.size()}")
  77. x = self.linear(x)
  78. # print(f"After Linear: {x.size()}")
  79. x = self.normalization(x)
  80. x = self.elu(x)
  81. x = self.dropout(x)
  82. if(self.softmax_status): x = self.softmax(x)
  83. # return torch.matmul(x, self.weight) + self.bias
  84. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?