|
@@ -1,222 +0,0 @@
|
|
|
-from torch import device, cuda
|
|
|
-import torch
|
|
|
-import torch.nn as nn
|
|
|
-import torch.nn.functional as F
|
|
|
-import torch.optim as optim
|
|
|
-import utils.CNN_methods as CNN
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-class CNN_Net(nn.Module):
|
|
|
-
|
|
|
-
|
|
|
- def __init__(self, mri_volume, params):
|
|
|
- super().__init__()
|
|
|
-
|
|
|
-
|
|
|
- self.model = xalex3D(mri_volume)
|
|
|
- self.device = device('cuda:0' if cuda.is_available() else 'cpu')
|
|
|
-
|
|
|
- print("CNN Initialized. Using: " + str(self.device))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def forward(self, x):
|
|
|
- x = F.relu(self.model.f(x, []))
|
|
|
- return x
|
|
|
-
|
|
|
-
|
|
|
- def train(self, trainloader, PATH):
|
|
|
- criterion = nn.CrossEntropyLoss()
|
|
|
- optimizer = optim.Adam(self.parameters(), lr=1e-5)
|
|
|
-
|
|
|
- for epoch in range(2):
|
|
|
-
|
|
|
- running_loss = 0.0
|
|
|
- for i, data in enumerate(trainloader, 0):
|
|
|
-
|
|
|
- inputs, labels = data[0].to(self.device), data[1].to(self.device)
|
|
|
-
|
|
|
-
|
|
|
- optimizer.zero_grad()
|
|
|
-
|
|
|
-
|
|
|
- outputs = self.forward(inputs)
|
|
|
- loss = criterion(outputs, labels)
|
|
|
- loss.backward()
|
|
|
- optimizer.step()
|
|
|
-
|
|
|
-
|
|
|
- running_loss += loss.item()
|
|
|
- if i % 2000 == 1999:
|
|
|
- print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
|
|
|
- running_loss = 0.0
|
|
|
-
|
|
|
- print('Finished Training')
|
|
|
-
|
|
|
- torch.save(self.state_dict(), PATH)
|
|
|
-
|
|
|
-
|
|
|
- def test(self, testloader):
|
|
|
- correct = 0
|
|
|
- total = 0
|
|
|
-
|
|
|
- with torch.no_grad():
|
|
|
- for data in testloader:
|
|
|
- images, labels = data[0].to(self.device), data[1].to(self.devie)
|
|
|
-
|
|
|
- outputs = self.forward(images)
|
|
|
-
|
|
|
- _, predicted = torch.max(outputs.data, 1)
|
|
|
- total += labels.size(0)
|
|
|
- correct += (predicted == labels).sum().item()
|
|
|
-
|
|
|
- print(f'Accuracy of the network: {100 * correct // total} %')
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-'''
|
|
|
-XAlex3D model.
|
|
|
-
|
|
|
-Functions used:
|
|
|
-- conv_elu_maxpool_drop(in_channel, filters, kernel_size, stride=(1,1,1), padding=0, dilation=1,
|
|
|
- groups=1, bias=True, padding_mode='zeros', pool=False, drop_rate=0, sep_conv = False)
|
|
|
-'''
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-class xalex3D(nn.Module):
|
|
|
- def __init__(self, mri_volume, drop_rate=0, final_layer_size=50):
|
|
|
- self.drop_rate = drop_rate
|
|
|
- self.final_layer_size = final_layer_size
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def f(self, mri_volume, clinical_inputs):
|
|
|
-
|
|
|
- conv1 = CNN.conv_elu_maxpool_drop(mri_volume.size(), 192, (11, 13, 11), stride=(4, 4, 4), drop_rate=self.drop_rate, pool=True)(mri_volume)
|
|
|
-
|
|
|
- conv2 = CNN.conv_elu_maxpool_drop(conv1.size(), 384, (5, 6, 5), stride=(1, 1, 1), drop_rate=self.drop_rate, pool=True)(conv1)
|
|
|
-
|
|
|
-
|
|
|
- print(f"Residual: {conv2.shape}")
|
|
|
- conv_mid_3 = CNN.mid_flow(conv2, self.drop_rate, filters=384)
|
|
|
-
|
|
|
-
|
|
|
- groupConv4 = CNN.conv_elu_maxpool_drop(conv_mid_3.size(), 96, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
|
|
|
- pool=True, groups=2)(conv_mid_3)
|
|
|
- groupConv5 = CNN.conv_elu_maxpool_drop(groupConv4.size(), 48, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
|
|
|
- pool=True, groups=2)(groupConv4)
|
|
|
-
|
|
|
-
|
|
|
- fc1 = CNN.fc_elu_drop(groupConv5.size(), 20, drop_rate=self.drop_rate)(groupConv5)
|
|
|
-
|
|
|
- fc2 = CNN.fc_elu_drop(fc1.size(), 50, drop_rate=self.drop_rate)(fc1)
|
|
|
-
|
|
|
- return fc2
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-""" LAST PART:
|
|
|
-
|
|
|
-
|
|
|
- flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
|
|
|
-
|
|
|
-
|
|
|
- vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
|
|
|
- drop_rate=drop_rate)(clinical_inputs)
|
|
|
-
|
|
|
- flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
|
|
|
- drop_rate=drop_rate)(vol_fc1)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
|
|
|
- flat = concatenate([fc1, flat_volume])
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
|
|
|
- flat)
|
|
|
-
|
|
|
-
|
|
|
-''' FULL CODE:
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- conv1_left = _conv_bn_relu_pool_drop(192, 11, 13, 11, strides=(4, 4, 4), w_regularizer=w_regularizer,
|
|
|
- drop_rate=drop_rate, pool=True)(mri_volume)
|
|
|
-
|
|
|
-
|
|
|
- conv2_left = _conv_bn_relu_pool_drop(384, 5, 6, 5, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
|
|
|
- conv1_left)
|
|
|
-
|
|
|
-
|
|
|
- print('residual shape ' + str(conv2_left.shape))
|
|
|
- conv_mid_1 = mid_flow(conv2_left, drop_rate, w_regularizer,
|
|
|
- filters=384)
|
|
|
-
|
|
|
-
|
|
|
- conv_mid_1_1 = Lambda(lambda x: x[:, :, :, :, :192])(conv_mid_1)
|
|
|
- conv_mid_1_2 = Lambda(lambda x: x[:, :, :, :, 192:])(conv_mid_1)
|
|
|
-
|
|
|
- conv5_left = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
|
|
|
- conv_mid_1_1)
|
|
|
-
|
|
|
- conv5_right = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
|
|
|
- conv_mid_1_2)
|
|
|
-
|
|
|
- conv6_left = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
|
|
|
- conv5_left)
|
|
|
-
|
|
|
- conv6_right = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
|
|
|
- conv5_right)
|
|
|
-
|
|
|
- conv6_concat = concatenate([conv6_left, conv6_right], axis=-1)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
|
|
|
-
|
|
|
-
|
|
|
- vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
|
|
|
- drop_rate=drop_rate)(clinical_inputs)
|
|
|
-
|
|
|
- flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
|
|
|
- drop_rate=drop_rate)(vol_fc1)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
|
|
|
-
|
|
|
- flat = concatenate([fc1, flat_volume])
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
|
|
|
- flat)
|
|
|
- '''
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|