newCNN_Layers.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. from torch import device, cuda
  2. import torch
  3. from torch import add
  4. import torch.nn as nn
  5. import torch.nn.functional as F
  6. import torch.optim as optim
  7. import utils.CNN_methods as CNN
  8. import copy
  9. class Conv_elu_maxpool_drop(nn.Module):
  10. def __init__(self, input_size, output_size, kernel_size, prps, stride=(1,1,1), pool = False, sep_conv = False, padding = 0):
  11. super(Conv_elu_maxpool_drop, self).__init__()
  12. self.input_size = input_size
  13. self.output_size = output_size
  14. self.pool_status = pool
  15. self.sep_conv_status = sep_conv
  16. # LAYERS
  17. # TODO Check here, how many groups? just 2? or groups=input_size?
  18. if(self.sep_conv_status):
  19. self.sepConvDepthwise = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  20. padding=padding, dilation=prps['dilation'], groups=2, bias=prps["bias"], padding_mode=prps["padding_mode"])
  21. self.conv = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
  22. padding=padding, groups=1, bias=prps["bias"], padding_mode=prps["padding_mode"])
  23. self.normalization = nn.BatchNorm3d(output_size)
  24. self.elu = nn.ELU()
  25. self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=0)
  26. self.dropout = nn.Dropout(p=prps['drop_rate'])
  27. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  28. self.bias = nn.Parameter(torch.randn(output_size))
  29. def forward(self, x):
  30. # print(f"Forward Input: {x.size()}")
  31. if(self.sep_conv_status): x = self.sepConvDepthwise(x)
  32. else: x = self.conv(x)
  33. x = self.normalization(x)
  34. x = self.elu(x)
  35. if(self.pool_status): self.maxpool(x)
  36. x = self.dropout(x)
  37. # return torch.matmul(x, self.weight) + self.bias
  38. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  39. class Mid_flow(nn.Module):
  40. def __init__(self, input_size, output_size, prps):
  41. super(Mid_flow, self).__init__()
  42. self.input_size = input_size
  43. self.output_size = output_size
  44. # LAYERS
  45. self.conv = Conv_elu_maxpool_drop(input_size, output_size, kernel_size=(3,3,3), stride=(1,1,1), sep_conv=True, padding='same', prps=prps)
  46. self.elu = nn.ELU()
  47. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  48. self.bias = nn.Parameter(torch.randn(output_size))
  49. def forward(self, x):
  50. # print("AT MIDFLOW!")
  51. residual = x.clone()
  52. # print(f"Input: {x.size()}")
  53. x = self.conv(x)
  54. x = self.conv(x)
  55. x = self.conv(x)
  56. # print(f"Output: {x.size()}")
  57. x = add(x, residual)
  58. x = self.elu(x)
  59. # return torch.matmul(x, self.weight) + self.bias # TODO WHAT??? WEIGHT & BIAS YES OR NO?
  60. return x
  61. class Fc_elu_drop(nn.Module):
  62. def __init__(self, input_size, output_size, prps):
  63. super(Fc_elu_drop, self).__init__()
  64. self.input_size = input_size
  65. self.output_size = output_size
  66. # LAYERS
  67. self.linear = nn.Linear(input_size, output_size)
  68. self.normalization = nn.BatchNorm1d(output_size)
  69. self.elu = nn.ELU()
  70. self.dropout = nn.Dropout(p=prps['drop_rate'])
  71. self.weight = nn.Parameter(torch.randn(input_size, output_size))
  72. self.bias = nn.Parameter(torch.randn(output_size))
  73. def forward(self, x):
  74. # print("AT FC")
  75. # print(f"Forward Input: {x.size()}")
  76. x = self.linear(x)
  77. # print(f"After Linear: {x.size()}")
  78. x = self.normalization(x)
  79. x = self.elu(x)
  80. x = self.dropout(x)
  81. # return torch.matmul(x, self.weight) + self.bias
  82. return x # TODO WHAT??? WEIGHT & BIAS YES OR NO?