ソースを参照

Model clean-up

Nicholas Schense 11 ヶ月 前
コミット
22dc3a8802

+ 3 - 0
.gitignore

@@ -0,0 +1,3 @@
+.venv
+__pycache__
+utils/__pycache__

+ 8 - 0
main.py

@@ -54,6 +54,8 @@ xls_file = './Lp_ADNIMERGE.csv'
 
 
 def evaluate_model(seed):
 def evaluate_model(seed):
     training_data, val_data, test_data = prepare_datasets(mri_datapath, xls_file, val_split, seed)
     training_data, val_data, test_data = prepare_datasets(mri_datapath, xls_file, val_split, seed)
+    
+
     batch_size = 64
     batch_size = 64
 
 
     # Create data loaders
     # Create data loaders
@@ -61,6 +63,12 @@ def evaluate_model(seed):
     test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
     test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
     val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
     val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
 
 
+    #Print Shape of Image Data
+    print("Shape of MRI Data: ", training_data[0][0].shape)
+    print("Shape of XLS Data: ", training_data[0][1].shape)
+
+
+
     model_CNN = models.CNN_Net(1, 1, 0.5).double()
     model_CNN = models.CNN_Net(1, 1, 0.5).double()
     criterion = nn.CrossEntropyLoss()
     criterion = nn.CrossEntropyLoss()
     optimizer = optim.Adam(model_CNN.parameters(), lr=0.001)
     optimizer = optim.Adam(model_CNN.parameters(), lr=0.001)

BIN
utils/__pycache__/CNN.cpython-38.pyc


BIN
utils/__pycache__/CNN_Layers.cpython-38.pyc


BIN
utils/__pycache__/layers.cpython-38.pyc


BIN
utils/__pycache__/models.cpython-38.pyc


BIN
utils/__pycache__/preprocess.cpython-38.pyc


BIN
utils/__pycache__/show_image.cpython-38.pyc


+ 23 - 21
utils/layers.py

@@ -8,11 +8,11 @@ import torch
 import torchvision
 import torchvision
 
 
 
 
-class SeperableConv3d(nn.Module):
+class SepConv3d(nn.Module):
     def __init__(
     def __init__(
         self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False
         self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False
     ):
     ):
-        super(SeperableConv3d, self).__init__()
+        super(SepConv3d, self).__init__()
         self.depthwise = nn.Conv3d(
         self.depthwise = nn.Conv3d(
             in_channels,
             in_channels,
             out_channels,
             out_channels,
@@ -34,17 +34,17 @@ class SplitConvBlock(nn.Module):
 
 
         self.split_dim = split_dim
         self.split_dim = split_dim
 
 
-        self.leftconv_1 = SeperableConvolutionalBlock(
+        self.leftconv_1 = SepConvBlock(
             in_channels //2, mid_channels //2, (3, 4, 3),  droprate=drop_rate
             in_channels //2, mid_channels //2, (3, 4, 3),  droprate=drop_rate
         )
         )
-        self.rightconv_1 = SeperableConvolutionalBlock(
+        self.rightconv_1 = SepConvBlock(
             in_channels //2, mid_channels //2, (3, 4, 3),  droprate=drop_rate
             in_channels //2, mid_channels //2, (3, 4, 3),  droprate=drop_rate
         )
         )
 
 
-        self.leftconv_2 = SeperableConvolutionalBlock(
+        self.leftconv_2 = SepConvBlock(
             mid_channels //2, out_channels //2, (3, 4, 3),  droprate=drop_rate
             mid_channels //2, out_channels //2, (3, 4, 3),  droprate=drop_rate
         )
         )
-        self.rightconv_2 = SeperableConvolutionalBlock(
+        self.rightconv_2 = SepConvBlock(
             mid_channels //2, out_channels //2, (3, 4, 3),  droprate=drop_rate
             mid_channels //2, out_channels //2, (3, 4, 3),  droprate=drop_rate
         )
         )
 
 
@@ -53,6 +53,8 @@ class SplitConvBlock(nn.Module):
     def forward(self, x):
     def forward(self, x):
         (left, right) = torch.tensor_split(x, 2, dim=self.split_dim)
         (left, right) = torch.tensor_split(x, 2, dim=self.split_dim)
 
 
+        print(left.shape, right.shape)
+
         self.leftblock = nn.Sequential(self.leftconv_1, self.leftconv_2)
         self.leftblock = nn.Sequential(self.leftconv_1, self.leftconv_2)
         self.rightblock = nn.Sequential(self.rightconv_1, self.rightconv_2)
         self.rightblock = nn.Sequential(self.rightconv_1, self.rightconv_2)
 
 
@@ -66,25 +68,25 @@ class MidFlowBlock(nn.Module):
     def __init__(self, channels, drop_rate):
     def __init__(self, channels, drop_rate):
         super(MidFlowBlock, self).__init__()
         super(MidFlowBlock, self).__init__()
 
 
-        self.conv1 = ConvolutionalBlock(
-            channels, channels, (3, 3, 3),  droprate=drop_rate
+        self.conv1 = ConvBlock(
+            channels, channels, (3, 3, 3),  droprate=drop_rate, padding="same"
         )
         )
-        self.conv2 = ConvolutionalBlock(
-            channels, channels, (3, 3, 3), droprate=drop_rate
+        self.conv2 = ConvBlock(
+            channels, channels, (3, 3, 3), droprate=drop_rate, padding="same"
         )
         )
-        self.conv3 = ConvolutionalBlock(
-            channels, channels, (3, 3, 3),  droprate=drop_rate
+        self.conv3 = ConvBlock(
+            channels, channels, (3, 3, 3),  droprate=drop_rate, padding="same"
         )
         )
 
 
         #self.block = nn.Sequential(self.conv1, self.conv2, self.conv3)
         #self.block = nn.Sequential(self.conv1, self.conv2, self.conv3)
         self.block = self.conv1
         self.block = self.conv1
 
 
     def forward(self, x):
     def forward(self, x):
-        x = nn.ELU(self.block(x) + x)
-        return 
+        x = nn.ELU()(self.block(x) + x)
+        return x
 
 
         
         
-class ConvolutionalBlock(nn.Module):
+class ConvBlock(nn.Module):
     def __init__(
     def __init__(
             self,
             self,
             in_channels,
             in_channels,
@@ -95,7 +97,7 @@ class ConvolutionalBlock(nn.Module):
             droprate=None,
             droprate=None,
             pool=False,
             pool=False,
     ):
     ):
-        super(ConvolutionalBlock, self).__init__()
+        super(ConvBlock, self).__init__()
         self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
         self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
         self.norm = nn.BatchNorm3d(out_channels)
         self.norm = nn.BatchNorm3d(out_channels)
         self.elu = nn.ELU()
         self.elu = nn.ELU()
@@ -120,9 +122,9 @@ class ConvolutionalBlock(nn.Module):
         return x
         return x
 
 
 
 
-class FullyConnectedBlock(nn.Module):
+class FullConnBlock(nn.Module):
     def __init__(self, in_channels, out_channels, droprate=0.0):
     def __init__(self, in_channels, out_channels, droprate=0.0):
-        super(FullyConnectedBlock, self).__init__()
+        super(FullConnBlock, self).__init__()
         self.dense = nn.Linear(in_channels, out_channels)
         self.dense = nn.Linear(in_channels, out_channels)
         self.norm = nn.BatchNorm1d(out_channels)
         self.norm = nn.BatchNorm1d(out_channels)
         self.elu = nn.ELU()
         self.elu = nn.ELU()
@@ -136,7 +138,7 @@ class FullyConnectedBlock(nn.Module):
         return x
         return x
     
     
 
 
-class SeperableConvolutionalBlock(nn.Module):
+class SepConvBlock(nn.Module):
     def __init__(
     def __init__(
       self,
       self,
       in_channels,
       in_channels,
@@ -147,8 +149,8 @@ class SeperableConvolutionalBlock(nn.Module):
       droprate = None,
       droprate = None,
       pool = False,      
       pool = False,      
     ):
     ):
-        super(SeperableConvolutionalBlock, self).__init__()
-        self.conv = SeperableConv3d(in_channels, out_channels, kernel_size, stride, padding)
+        super(SepConvBlock, self).__init__()
+        self.conv = SepConv3d(in_channels, out_channels, kernel_size, stride, padding)
         self.norm = nn.BatchNorm3d(out_channels)
         self.norm = nn.BatchNorm3d(out_channels)
         self.elu = nn.ELU()
         self.elu = nn.ELU()
         self.dropout = nn.Dropout(droprate)
         self.dropout = nn.Dropout(droprate)

+ 45 - 14
utils/models.py

@@ -3,10 +3,11 @@ from torchvision.transforms import ToTensor
 import os
 import os
 import pandas as pd
 import pandas as pd
 import numpy as np
 import numpy as np
-import utils.layers as ly
+import layers as ly
 
 
 import torch
 import torch
 import torchvision
 import torchvision
+import torchsummary as ts
 
 
 
 
 class Parameters:
 class Parameters:
@@ -32,11 +33,11 @@ class CNN_Net(nn.Module):
         super().__init__()
         super().__init__()
 
 
         # Initial Convolutional Blocks
         # Initial Convolutional Blocks
-        self.conv1 = ly.ConvolutionalBlock(
-            image_channels, 192, (11, 13, 11), stride=(4, 4, 4), droprate=droprate, pool=True
+        self.conv1 = ly.ConvBlock(
+            image_channels, 192, (11, 13, 11), stride=(4, 4, 4), droprate=droprate, pool=False
         )
         )
-        self.conv2 = ly.ConvolutionalBlock(
-            192, 384, (5, 6, 5), droprate=droprate, pool=True
+        self.conv2 = ly.ConvBlock(
+            192, 384, (5, 6, 5), droprate=droprate, pool=False
         )
         )
 
 
         # Midflow Block
         # Midflow Block
@@ -48,13 +49,13 @@ class CNN_Net(nn.Module):
         self.splitconv = ly.SplitConvBlock(384, 192, 96, 4, droprate)
         self.splitconv = ly.SplitConvBlock(384, 192, 96, 4, droprate)
 
 
         #Fully Connected Block
         #Fully Connected Block
-        self.fc_image = ly.FullyConnectedBlock(96, 20, droprate=droprate)
+        self.fc_image = ly.FullConnBlock(96, 20, droprate=droprate)
 
 
 
 
 
 
         #Data Layers, fully connected
         #Data Layers, fully connected
-        self.fc_clin1 = ly.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
-        self.fc_clin2 = ly.FullyConnectedBlock(64, 20, droprate=droprate)
+        self.fc_clin1 = ly.FullConnBlock(clin_data_channels, 64, droprate=droprate)
+        self.fc_clin2 = ly.FullConnBlock(64, 20, droprate=droprate)
         
         
 
 
         #Final Dense Layer
         #Final Dense Layer
@@ -68,18 +69,12 @@ class CNN_Net(nn.Module):
 
 
         image, clin_data = x
         image, clin_data = x
 
 
-        print("Input image shape:", image.shape)
     
     
         image = self.conv1(image)
         image = self.conv1(image)
-        print("Conv1 shape:", image.shape)
         image = self.conv2(image)
         image = self.conv2(image)
-        print("Conv2 shape:", image.shape)
         image = self.midflow(image)
         image = self.midflow(image)
-        print("Midflow shape:", image.shape)
         image = self.splitconv(image)
         image = self.splitconv(image)
-        print("Splitconv shape:", image.shape)
         image = torch.flatten(image, 1)
         image = torch.flatten(image, 1)
-        print("Flatten shape:", image.shape)
         image = self.fc_image(image)
         image = self.fc_image(image)
 
 
         clin_data = self.fc_clin1(clin_data)
         clin_data = self.fc_clin1(clin_data)
@@ -96,4 +91,40 @@ class CNN_Net(nn.Module):
 
 
 
 
 
 
+
+class CNN_Image_Section(nn.Module):
+    def __init__(self, image_channels, droprate):
+        super().__init__()
+            # Initial Convolutional Blocks
+        self.conv1 = ly.ConvBlock(
+            image_channels, 192, (11, 13, 11), stride=(4, 4, 4), droprate=droprate, pool=False
+        )
+        self.conv2 = ly.ConvBlock(
+            192, 384, (5, 6, 5), droprate=droprate, pool=False
+        )
+
+        # Midflow Block
+        self.midflow = ly.MidFlowBlock(384, droprate)
+
+        
+
+        # Split Convolutional Block
+        self.splitconv = ly.SplitConvBlock(384, 192, 96, 1, droprate)
+
+        #Fully Connected Block
+        self.fc_image = ly.FullConnBlock(227136, 20, droprate=droprate)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.conv2(x)
+        x = self.midflow(x)
+        x = self.splitconv(x)
+        print(x.shape)
+        x = torch.flatten(x, 1)
+        x = self.fc_image(x)
+
+
+
+print(ts.summary(CNN_Image_Section(1, 0.5).cuda(), (1, 91, 109, 91)))
+