12345678910111213141516171819202122232425262728293031323334353637383940 |
- from torch import device, cuda
- import torch.nn as nn
- import utils.CNN_Layers as CustomLayers
- class CNN_Net(nn.Module):
- def __init__(self, prps, final_layer_size=5):
- super(CNN_Net, self).__init__()
- self.final_layer_size = final_layer_size
- self.device = device('cuda:0' if cuda.is_available() else 'cpu')
- print("CNN Initialized. Using: " + str(self.device))
- # LAYERS
- print(f"CNN Model Initialization")
- self.conv1 = CustomLayers.Conv_elu_maxpool_drop(1, 192, (11, 13, 11), stride=(4,4,4), pool=True, prps=prps)
- self.conv2 = CustomLayers.Conv_elu_maxpool_drop(192, 384, (5, 6, 5), stride=(1,1,1), pool=True, prps=prps)
- self.conv3_mid_flow = CustomLayers.Mid_flow(384, 384, prps=prps)
- self.conv4_sepConv = CustomLayers.Conv_elu_maxpool_drop(384, 96,(3, 4, 3), stride=(1,1,1), pool=True, prps=prps,
- sep_conv=True)
- self.conv5_sepConv = CustomLayers.Conv_elu_maxpool_drop(96, 48, (3, 4, 3), stride=(1, 1, 1), pool=True,
- prps=prps, sep_conv=True)
- self.fc1 = CustomLayers.Fc_elu_drop(113568, 20, prps=prps, softmax=False) # TODO, concatenate clinical data after this
- self.fc2 = CustomLayers.Fc_elu_drop(20, final_layer_size, prps=prps, softmax=True) # For now this works as output layer, though may be incorrect
- # FORWARDS
- def forward(self, x):
- x = self.conv1(x)
- x = self.conv2(x)
- x = self.conv3_mid_flow(x)
- x = self.conv4_sepConv(x)
- x = self.conv5_sepConv(x)
- # FLATTEN x
- flatten_size = x.size(1) * x.size(2) * x.size(3) * x.size(4)
- x = x.view(-1, flatten_size)
- x = self.fc1(x)
- x = self.fc2(x)
- return x
|