|
@@ -3,88 +3,12 @@ from torchvision.transforms import ToTensor
|
|
import os
|
|
import os
|
|
import pandas as pd
|
|
import pandas as pd
|
|
import numpy as np
|
|
import numpy as np
|
|
|
|
+import utils.layers as ly
|
|
|
|
|
|
import torch
|
|
import torch
|
|
import torchvision
|
|
import torchvision
|
|
|
|
|
|
|
|
|
|
-class SeperableConv3d(nn.Module):
|
|
|
|
- def __init__(
|
|
|
|
- self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False
|
|
|
|
- ):
|
|
|
|
- super(SeperableConv3d, self).__init__()
|
|
|
|
- self.depthwise = nn.Conv3d(
|
|
|
|
- in_channels,
|
|
|
|
- in_channels,
|
|
|
|
- kernel_size,
|
|
|
|
- groups=in_channels,
|
|
|
|
- padding=padding,
|
|
|
|
- bias=bias,
|
|
|
|
- stride=stride,
|
|
|
|
- )
|
|
|
|
- self.pointwise = nn.Conv3d(
|
|
|
|
- in_channels, out_channels, 1, padding=padding, bias=bias, stride=stride
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
- def forward(self, x):
|
|
|
|
- x = self.depthwise(x)
|
|
|
|
- x = self.pointwise(x)
|
|
|
|
- return x
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-class SplitConvBlock(nn.Module):
|
|
|
|
- def __init__(self, in_channels, mid_channels, out_channels, split_dim, drop_rate):
|
|
|
|
- super(SplitConvBlock, self).__init__()
|
|
|
|
-
|
|
|
|
- self.split_dim = split_dim
|
|
|
|
-
|
|
|
|
- self.leftconv_1 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (3, 4, 3), in_channels //2, mid_channels //2, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
- self.rightconv_1 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (4, 3, 3), in_channels //2, mid_channels //2, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
- self.leftconv_2 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (3, 4, 3), mid_channels //2, out_channels //2, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
- self.rightconv_2 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (4, 3, 3), mid_channels //2, out_channels //2, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-
|
|
|
|
- def forward(self, x):
|
|
|
|
- (left, right) = torch.tensor_split(x, 2, dim=self.split_dim)
|
|
|
|
-
|
|
|
|
- self.leftblock = nn.Sequential(self.leftconv_1, self.leftconv_2)
|
|
|
|
- self.rightblock = nn.Sequential(self.rightconv_1, self.rightconv_2)
|
|
|
|
-
|
|
|
|
- left = self.leftblock(left)
|
|
|
|
- right = self.rightblock(right)
|
|
|
|
- return torch.cat((left, right), dim=self.split_dim)
|
|
|
|
-
|
|
|
|
-
|
|
|
|
-class MidFlowBlock(nn.Module):
|
|
|
|
- def __init__(self, channels, drop_rate):
|
|
|
|
- super(MidFlowBlock, self).__init__()
|
|
|
|
-
|
|
|
|
- self.conv1 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (3, 3, 3), channels, channels, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
- self.conv2 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (3, 3, 3), channels, channels, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
- self.conv3 = CNN_Net.SeperableConvolutionalBlock(
|
|
|
|
- (3, 3, 3), channels, channels, droprate=drop_rate
|
|
|
|
- )
|
|
|
|
-
|
|
|
|
- self.block = nn.Sequential(self.conv1, self.conv2, self.conv3)
|
|
|
|
-
|
|
|
|
- def forward(self, x):
|
|
|
|
- return nn.ELU(self.block(x) + x)
|
|
|
|
-
|
|
|
|
-
|
|
|
|
class Parameters:
|
|
class Parameters:
|
|
def __init__(self, param_dict):
|
|
def __init__(self, param_dict):
|
|
self.CNN_w_regularizer = param_dict["CNN_w_regularizer"]
|
|
self.CNN_w_regularizer = param_dict["CNN_w_regularizer"]
|
|
@@ -103,94 +27,50 @@ class Parameters:
|
|
|
|
|
|
|
|
|
|
class CNN_Net(nn.Module):
|
|
class CNN_Net(nn.Module):
|
|
- def ConvolutionalBlock(
|
|
|
|
- kernel_size,
|
|
|
|
- in_channels,
|
|
|
|
- out_channels,
|
|
|
|
- stride=(1, 1, 1),
|
|
|
|
- padding="valid",
|
|
|
|
- droprate=None,
|
|
|
|
- pool=False,
|
|
|
|
- ):
|
|
|
|
- conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
|
|
|
|
- norm = nn.BatchNorm3d(out_channels)
|
|
|
|
- elu = nn.ELU()
|
|
|
|
- dropout = nn.Dropout(droprate)
|
|
|
|
-
|
|
|
|
- if pool:
|
|
|
|
- maxpool = nn.MaxPool3d(3, stride=2)
|
|
|
|
- return nn.Sequential(conv, norm, elu, maxpool, dropout)
|
|
|
|
- else:
|
|
|
|
- return nn.Sequential(conv, norm, elu, dropout)
|
|
|
|
-
|
|
|
|
- def FullyConnectedBlock(in_channels, out_channels, droprate=0.0):
|
|
|
|
- dense = nn.Linear(in_channels, out_channels)
|
|
|
|
- norm = nn.BatchNorm1d(out_channels)
|
|
|
|
- elu = nn.ELU()
|
|
|
|
- dropout = nn.Dropout(droprate)
|
|
|
|
- return nn.Sequential(dense, norm, elu, dropout)
|
|
|
|
-
|
|
|
|
- def SeperableConvolutionalBlock(
|
|
|
|
- kernel_size,
|
|
|
|
- in_channels,
|
|
|
|
- out_channels,
|
|
|
|
- stride=(1, 1, 1),
|
|
|
|
- padding="valid",
|
|
|
|
- droprate=None,
|
|
|
|
- pool=False,
|
|
|
|
- ):
|
|
|
|
- conv = SeperableConv3d(in_channels, out_channels, kernel_size, stride, padding)
|
|
|
|
- norm = nn.BatchNorm3d(out_channels)
|
|
|
|
- elu = nn.ELU()
|
|
|
|
- dropout = nn.Dropout(droprate)
|
|
|
|
-
|
|
|
|
- if pool:
|
|
|
|
- maxpool = nn.MaxPool3d(3, stride=2)
|
|
|
|
- return nn.Sequential(conv, norm, elu, maxpool, dropout)
|
|
|
|
- else:
|
|
|
|
- return nn.Sequential(conv, norm, elu, dropout)
|
|
|
|
-
|
|
|
|
- def __init__(self, image_channels, clin_data_channels, droprate, final_layer_size):
|
|
|
|
|
|
+
|
|
|
|
+ def __init__(self, image_channels, clin_data_channels, droprate):
|
|
super().__init__()
|
|
super().__init__()
|
|
|
|
|
|
# Initial Convolutional Blocks
|
|
# Initial Convolutional Blocks
|
|
- self.conv1 = CNN_Net.ConvolutionalBlock(
|
|
|
|
- (11, 13, 11), image_channels, 192, stride=(4, 4, 4), droprate=droprate, pool=True
|
|
|
|
|
|
+ self.conv1 = ly.ConvolutionalBlock(
|
|
|
|
+ image_channels, 192, (11, 13, 11), stride=(4, 4, 4), droprate=droprate, pool=True
|
|
)
|
|
)
|
|
- self.conv2 = CNN_Net.ConvolutionalBlock(
|
|
|
|
- (5, 6, 5), 192, 384, droprate=droprate, pool=True
|
|
|
|
|
|
+ self.conv2 = ly.ConvolutionalBlock(
|
|
|
|
+ 192, 384, (5, 6, 5), droprate=droprate, pool=True
|
|
)
|
|
)
|
|
|
|
|
|
# Midflow Block
|
|
# Midflow Block
|
|
- self.midflow = MidFlowBlock(384, droprate)
|
|
|
|
|
|
+ self.midflow = ly.MidFlowBlock(384, droprate)
|
|
|
|
|
|
# Combine
|
|
# Combine
|
|
self.combined = nn.Sequential(self.conv1, self.conv2, self.midflow)
|
|
self.combined = nn.Sequential(self.conv1, self.conv2, self.midflow)
|
|
|
|
|
|
# Split Convolutional Block
|
|
# Split Convolutional Block
|
|
- self.splitconv = SplitConvBlock(384, 192, 96, 4, droprate)
|
|
|
|
|
|
+ self.splitconv = ly.SplitConvBlock(384, 192, 96, 4, droprate)
|
|
|
|
|
|
#Fully Connected Block
|
|
#Fully Connected Block
|
|
- self.fc1 = CNN_Net.FullyConnectedBlock(96, 20, droprate=droprate)
|
|
|
|
|
|
+ self.fc1 = ly.FullyConnectedBlock(96, 20, droprate=droprate)
|
|
|
|
|
|
self.image_layers = nn.Sequential(self.combined, self.splitconv).double()
|
|
self.image_layers = nn.Sequential(self.combined, self.splitconv).double()
|
|
|
|
|
|
|
|
|
|
#Data Layers, fully connected
|
|
#Data Layers, fully connected
|
|
- self.fc1 = CNN_Net.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
|
|
|
|
- self.fc2 = CNN_Net.FullyConnectedBlock(64, 20, droprate=droprate)
|
|
|
|
|
|
+ self.fc1 = ly.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
|
|
|
|
+ self.fc2 = ly.FullyConnectedBlock(64, 20, droprate=droprate)
|
|
|
|
|
|
- #Conntect Data
|
|
|
|
|
|
+ #Connect Data
|
|
self.data_layers = nn.Sequential(self.fc1, self.fc2).double()
|
|
self.data_layers = nn.Sequential(self.fc1, self.fc2).double()
|
|
|
|
|
|
#Final Dense Layer
|
|
#Final Dense Layer
|
|
- self.dense1 = nn.Linear(40, final_layer_size)
|
|
|
|
- self.dense2 = nn.Linear(final_layer_size, 2)
|
|
|
|
|
|
+ self.dense1 = nn.Linear(40, 5)
|
|
|
|
+ self.dense2 = nn.Linear(5, 2)
|
|
self.softmax = nn.Softmax()
|
|
self.softmax = nn.Softmax()
|
|
|
|
|
|
self.final_layers = nn.Sequential(self.dense1, self.dense2, self.softmax)
|
|
self.final_layers = nn.Sequential(self.dense1, self.dense2, self.softmax)
|
|
|
|
|
|
- def forward(self, image, clin_data):
|
|
|
|
|
|
+ def forward(self, x):
|
|
|
|
+
|
|
|
|
+ image, clin_data = x
|
|
|
|
|
|
print(image.shape)
|
|
print(image.shape)
|
|
|
|
|