123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205 |
- from torch import nn
- from torchvision.transforms import ToTensor
- import os
- import pandas as pd
- import numpy as np
- import torch
- import torchvision
- class SeperableConv3d(nn.Module):
- def __init__(
- self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False
- ):
- super(SeperableConv3d, self).__init__()
- self.depthwise = nn.Conv3d(
- in_channels,
- in_channels,
- kernel_size,
- groups=in_channels,
- padding=padding,
- bias=bias,
- stride=stride,
- )
- self.pointwise = nn.Conv3d(
- in_channels, out_channels, 1, padding=padding, bias=bias, stride=stride
- )
- def forward(self, x):
- x = self.depthwise(x)
- x = self.pointwise(x)
- return x
- class SplitConvBlock(nn.Module):
- def __init__(self, in_channels, mid_channels, out_channels, split_dim, drop_rate):
- super(SplitConvBlock, self).__init__()
- self.split_dim = split_dim
- self.leftconv_1 = CNN_Net.SeperableConvolutionalBlock(
- (3, 4, 3), in_channels //2, mid_channels //2, droprate=drop_rate
- )
- self.rightconv_1 = CNN_Net.SeperableConvolutionalBlock(
- (4, 3, 3), in_channels //2, mid_channels //2, droprate=drop_rate
- )
- self.leftconv_2 = CNN_Net.SeperableConvolutionalBlock(
- (3, 4, 3), mid_channels //2, out_channels //2, droprate=drop_rate
- )
- self.rightconv_2 = CNN_Net.SeperableConvolutionalBlock(
- (4, 3, 3), mid_channels //2, out_channels //2, droprate=drop_rate
- )
-
- def forward(self, x):
- (left, right) = torch.tensor_split(x, 2, dim=self.split_dim)
- self.leftblock = nn.Sequential(self.leftconv_1, self.leftconv_2)
- self.rightblock = nn.Sequential(self.rightconv_1, self.rightconv_2)
- left = self.leftblock(left)
- right = self.rightblock(right)
- return torch.cat((left, right), dim=self.split_dim)
- class MidFlowBlock(nn.Module):
- def __init__(self, channels, drop_rate):
- super(MidFlowBlock, self).__init__()
- self.conv1 = CNN_Net.SeperableConvolutionalBlock(
- (3, 3, 3), channels, channels, droprate=drop_rate
- )
- self.conv2 = CNN_Net.SeperableConvolutionalBlock(
- (3, 3, 3), channels, channels, droprate=drop_rate
- )
- self.conv3 = CNN_Net.SeperableConvolutionalBlock(
- (3, 3, 3), channels, channels, droprate=drop_rate
- )
- self.block = nn.Sequential(self.conv1, self.conv2, self.conv3)
- def forward(self, x):
- return nn.ELU(self.block(x) + x)
- class Parameters:
- def __init__(self, param_dict):
- self.CNN_w_regularizer = param_dict["CNN_w_regularizer"]
- self.RNN_w_regularizer = param_dict["RNN_w_regularizer"]
- self.CNN_batch_size = param_dict["CNN_batch_size"]
- self.RNN_batch_size = param_dict["RNN_batch_size"]
- self.CNN_drop_rate = param_dict["CNN_drop_rate"]
- self.RNN_drop_rate = param_dict["RNN_drop_rate"]
- self.epochs = param_dict["epochs"]
- self.gpu = param_dict["gpu"]
- self.model_filepath = param_dict["model_filepath"] + "/net.h5"
- self.num_clinical = param_dict["num_clinical"]
- self.image_shape = param_dict["image_shape"]
- self.final_layer_size = param_dict["final_layer_size"]
- self.optimizer = param_dict["optimizer"]
- class CNN_Net(nn.Module):
- def ConvolutionalBlock(
- kernel_size,
- in_channels,
- out_channels,
- stride=(1, 1, 1),
- padding="valid",
- droprate=None,
- pool=False,
- ):
- conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
- norm = nn.BatchNorm3d(out_channels)
- elu = nn.ELU()
- dropout = nn.Dropout(droprate)
- if pool:
- maxpool = nn.MaxPool3d(3, stride=2)
- return nn.Sequential(conv, norm, elu, maxpool, dropout)
- else:
- return nn.Sequential(conv, norm, elu, dropout)
- def FullyConnectedBlock(in_channels, out_channels, droprate=0.0):
- dense = nn.Linear(in_channels, out_channels)
- norm = nn.BatchNorm1d(out_channels)
- elu = nn.ELU()
- dropout = nn.Dropout(droprate)
- return nn.Sequential(dense, norm, elu, dropout)
- def SeperableConvolutionalBlock(
- kernel_size,
- in_channels,
- out_channels,
- stride=(1, 1, 1),
- padding="valid",
- droprate=None,
- pool=False,
- ):
- conv = SeperableConv3d(in_channels, out_channels, kernel_size, stride, padding)
- norm = nn.BatchNorm3d(out_channels)
- elu = nn.ELU()
- dropout = nn.Dropout(droprate)
- if pool:
- maxpool = nn.MaxPool3d(3, stride=2)
- return nn.Sequential(conv, norm, elu, maxpool, dropout)
- else:
- return nn.Sequential(conv, norm, elu, dropout)
- def __init__(self, image_channels, clin_data_channels, droprate, final_layer_size):
- super().__init__()
- # Initial Convolutional Blocks
- self.conv1 = CNN_Net.ConvolutionalBlock(
- (11, 13, 11), image_channels, 192, stride=(4, 4, 4), droprate=droprate, pool=True
- )
- self.conv2 = CNN_Net.ConvolutionalBlock(
- (5, 6, 5), 192, 384, droprate=droprate, pool=True
- )
- # Midflow Block
- self.midflow = MidFlowBlock(384, droprate)
- # Combine
- self.combined = nn.Sequential(self.conv1, self.conv2, self.midflow)
- # Split Convolutional Block
- self.splitconv = SplitConvBlock(384, 192, 96, 4, droprate)
- #Fully Connected Block
- self.fc1 = CNN_Net.FullyConnectedBlock(96, 20, droprate=droprate)
- self.image_layers = nn.Sequential(self.combined, self.splitconv).double()
- #Data Layers, fully connected
- self.fc1 = CNN_Net.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
- self.fc2 = CNN_Net.FullyConnectedBlock(64, 20, droprate=droprate)
-
- #Conntect Data
- self.data_layers = nn.Sequential(self.fc1, self.fc2).double()
- #Final Dense Layer
- self.dense1 = nn.Linear(40, final_layer_size)
- self.dense2 = nn.Linear(final_layer_size, 2)
- self.softmax = nn.Softmax()
- self.final_layers = nn.Sequential(self.dense1, self.dense2, self.softmax)
- def forward(self, image, clin_data):
- print(image.shape)
-
- image = self.image_layers(image)
- x = torch.cat((image, clin_data), dim=1)
- x = self.final_layers(x)
- return x
-
|