Nicholas Schense 11 месяцев назад
Родитель
Сommit
c00923afdb

+ 5 - 1
main.py

@@ -61,7 +61,7 @@ def evaluate_model(seed):
     test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
     val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
 
-    model_CNN = models.CNN_Net(1, 1, 0.5)
+    model_CNN = models.CNN_Net(1, 1, 0.5).double()
     criterion = nn.CrossEntropyLoss()
     optimizer = optim.Adam(model_CNN.parameters(), lr=0.001)
     print("Seed: ", seed)
@@ -74,6 +74,10 @@ def evaluate_model(seed):
 
             optimizer.zero_grad()
 
+            mri = mri.double()
+            xls = xls.double()
+
+
             outputs = model_CNN((mri, xls))
             loss = criterion(outputs, label)
             loss.backward()

BIN
utils/__pycache__/layers.cpython-38.pyc


BIN
utils/__pycache__/models.cpython-38.pyc


BIN
utils/__pycache__/preprocess.cpython-38.pyc


BIN
utils/__pycache__/show_image.cpython-38.pyc


+ 0 - 10
utils/layers.py

@@ -51,7 +51,6 @@ class SplitConvBlock(nn.Module):
         
 
     def forward(self, x):
-        print("SplitConvBlock in: ", x.shape)
         (left, right) = torch.tensor_split(x, 2, dim=self.split_dim)
 
         self.leftblock = nn.Sequential(self.leftconv_1, self.leftconv_2)
@@ -60,7 +59,6 @@ class SplitConvBlock(nn.Module):
         left = self.leftblock(left)
         right = self.rightblock(right)
         x = torch.cat((left, right), dim=self.split_dim)
-        print("SplitConvBlock out: ", x.shape)
         return x
     
 
@@ -82,9 +80,7 @@ class MidFlowBlock(nn.Module):
         self.block = self.conv1
 
     def forward(self, x):
-        print("MidFlowBlock in: ", x.shape)
         x = nn.ELU(self.block(x) + x)
-        print("MidFlowBlock out: ", x.shape)
         return 
 
         
@@ -111,7 +107,6 @@ class ConvolutionalBlock(nn.Module):
             self.maxpool = None
 
     def forward(self, x):
-        print("ConvBlock in: ", x.shape)
         x = self.conv(x)
         x = self.norm(x)
         x = self.elu(x)
@@ -121,7 +116,6 @@ class ConvolutionalBlock(nn.Module):
             x = self.maxpool(x)
 
         x = self.dropout(x)
-        print("ConvBlock out: ", x.shape)
             
         return x
 
@@ -135,12 +129,10 @@ class FullyConnectedBlock(nn.Module):
         self.dropout = nn.Dropout(droprate)
 
     def forward(self, x):
-        print("FullyConnectedBlock in: ", x.shape)
         x = self.dense(x)
         x = self.norm(x)
         x = self.elu(x)
         x = self.dropout(x)
-        print("FullyConnectedBlock out: ", x.shape)
         return x
     
 
@@ -167,7 +159,6 @@ class SeperableConvolutionalBlock(nn.Module):
             self.maxpool = None
 
     def forward(self, x):
-        print("SeperableConvBlock in: ", x.shape)
         x = self.conv(x)
         x = self.norm(x)
         x = self.elu(x)
@@ -177,6 +168,5 @@ class SeperableConvolutionalBlock(nn.Module):
             x = self.maxpool(x)
 
         x = self.dropout(x)
-        print("SeperableConvBlock out: ", x.shape)
 
         return x

+ 26 - 12
utils/models.py

@@ -42,41 +42,55 @@ class CNN_Net(nn.Module):
         # Midflow Block
         self.midflow = ly.MidFlowBlock(384, droprate)
 
-        # Combine
-        self.combined = nn.Sequential(self.conv1, self.conv2, self.midflow)
+        
 
         # Split Convolutional Block
         self.splitconv = ly.SplitConvBlock(384, 192, 96, 4, droprate)
 
         #Fully Connected Block
-        self.fc1 = ly.FullyConnectedBlock(96, 20, droprate=droprate)
+        self.fc_image = ly.FullyConnectedBlock(96, 20, droprate=droprate)
 
-        self.image_layers = nn.Sequential(self.combined, self.splitconv).double()
 
 
         #Data Layers, fully connected
-        self.fc1 = ly.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
-        self.fc2 = ly.FullyConnectedBlock(64, 20, droprate=droprate)
+        self.fc_clin1 = ly.FullyConnectedBlock(clin_data_channels, 64, droprate=droprate)
+        self.fc_clin2 = ly.FullyConnectedBlock(64, 20, droprate=droprate)
         
-        #Connect Data 
-        self.data_layers = nn.Sequential(self.fc1, self.fc2).double()
 
         #Final Dense Layer
         self.dense1 = nn.Linear(40, 5)
         self.dense2 = nn.Linear(5, 2)
         self.softmax = nn.Softmax()
 
-        self.final_layers = nn.Sequential(self.dense1, self.dense2, self.softmax)
+       
 
     def forward(self, x):
 
         image, clin_data = x
 
-        print(image.shape)
+        print("Input image shape:", image.shape)
     
-        image = self.image_layers(image)
+        image = self.conv1(image)
+        print("Conv1 shape:", image.shape)
+        image = self.conv2(image)
+        print("Conv2 shape:", image.shape)
+        image = self.midflow(image)
+        print("Midflow shape:", image.shape)
+        image = self.splitconv(image)
+        print("Splitconv shape:", image.shape)
+        image = torch.flatten(image, 1)
+        print("Flatten shape:", image.shape)
+        image = self.fc_image(image)
+
+        clin_data = self.fc_clin1(clin_data)
+        clin_data = self.fc_clin2(clin_data)
+
+
+
         x = torch.cat((image, clin_data), dim=1)
-        x = self.final_layers(x)
+        x = self.dense1(x)
+        x = self.dense2(x)
+        x = self.softmax(x)
         return x