2 Commits bc49cac40a ... a452f1345c

Author SHA1 Message Date
  Ruben a452f1345c Added average loss curve in training 7 months ago
  Ruben 3457c8f0f8 Initial working CNN model 7 months ago
50 changed files with 693 additions and 60 deletions
  1. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11767_masked_brain.nii.nii
  2. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11879_masked_brain.nii.nii
  3. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I12061_masked_brain.nii.nii
  4. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13509_masked_brain.nii.nii
  5. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13807_masked_brain.nii.nii
  6. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14166_masked_brain.nii.nii
  7. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14808_masked_brain.nii.nii
  8. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16169_masked_brain.nii.nii
  9. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16238_masked_brain.nii.nii
  10. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16740_masked_brain.nii.nii
  11. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16828_masked_brain.nii.nii
  12. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17363_masked_brain.nii.nii
  13. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17415_masked_brain.nii.nii
  14. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17585_masked_brain.nii.nii
  15. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20080_masked_brain.nii.nii
  16. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20332_masked_brain.nii.nii
  17. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20506_masked_brain.nii.nii
  18. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20771_masked_brain.nii.nii
  19. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23153_masked_brain.nii.nii
  20. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23431_masked_brain.nii.nii
  21. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I11771_masked_brain.nii.nii
  22. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I12151_masked_brain.nii.nii
  23. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I13447_masked_brain.nii.nii
  24. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14114_masked_brain.nii.nii
  25. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14783_masked_brain.nii.nii
  26. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I16867_masked_brain.nii.nii
  27. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I17508_masked_brain.nii.nii
  28. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I18931_masked_brain.nii.nii
  29. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20550_masked_brain.nii.nii
  30. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20726_masked_brain.nii.nii
  31. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23089_masked_brain.nii.nii
  32. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23667_masked_brain.nii.nii
  33. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23798_masked_brain.nii.nii
  34. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23901_masked_brain.nii.nii
  35. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I24641_masked_brain.nii.nii
  36. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I25715_masked_brain.nii.nii
  37. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26030_masked_brain.nii.nii
  38. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26940_masked_brain.nii.nii
  39. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26947_masked_brain.nii.nii
  40. 0 0
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I28549_masked_brain.nii.nii
  41. 97 44
      main.py
  42. 1 1
      original_model/mci_train.py
  43. 10 6
      original_model/utils/models.py
  44. 3 3
      original_model/utils/preprocess.py
  45. 78 0
      utils/CNN_methods.py
  46. 222 0
      utils/models.py
  47. 136 0
      utils/newCNN.py
  48. 107 0
      utils/newCNN_Layers.py
  49. 38 5
      utils/preprocess.py
  50. 1 1
      utils/show_image.py

+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11767_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11767_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11879_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11879_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I12061_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I12061_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13509_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13509_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13807_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13807_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14166_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14166_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14808_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14808_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16169_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16169_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16238_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16238_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16740_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16740_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16828_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16828_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17363_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17363_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17415_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17415_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17585_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17585_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20080_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20080_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20332_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20332_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20506_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20506_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20771_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20771_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23153_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23153_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23431_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23431_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I11771_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I11771_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I12151_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I12151_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I13447_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I13447_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14114_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14114_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14783_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14783_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I16867_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I16867_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I17508_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I17508_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I18931_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I18931_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20550_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20550_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20726_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20726_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23089_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23089_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23667_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23667_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23798_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23798_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23901_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23901_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I24641_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I24641_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I25715_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I25715_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26030_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26030_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26940_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26940_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26947_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26947_masked_brain.nii.nii


+ 0 - 0
ADNI_volumes_customtemplate_float32/Inf_NaN_stableNL__I28549_masked_brain.nii.nii → MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I28549_masked_brain.nii.nii


+ 97 - 44
main.py

@@ -2,8 +2,9 @@ import torch
 import torchvision
 import torchvision
 
 
 # FOR DATA
 # FOR DATA
-from utils.preprocess import prepare_datasets
+from utils.preprocess import prepare_datasets, prepare_predict
 from utils.show_image import show_image
 from utils.show_image import show_image
+from utils.newCNN import CNN_Net
 from torch.utils.data import DataLoader
 from torch.utils.data import DataLoader
 from torchvision import datasets
 from torchvision import datasets
 
 
@@ -42,63 +43,115 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # use id from $ nvidia-smi
 # data & training properties:
 # data & training properties:
 val_split = 0.2     # % of val and test, rest will be train
 val_split = 0.2     # % of val and test, rest will be train
 seed = 12       # TODO Randomize seed
 seed = 12       # TODO Randomize seed
-'''
-target_rows = 91
-target_cols = 109
-depth = 91
-axis = 1
-num_clinical = 2
-CNN_drop_rate = 0.3
-RNN_drop_rate = 0.1
-CNN_w_regularizer = regularizers.l2(2e-2)
-RNN_w_regularizer = regularizers.l2(1e-6)
-CNN_batch_size = 10
-RNN_batch_size = 5
-val_split = 0.2
-optimizer = Adam(lr=1e-5)
-final_layer_size = 5
-'''
+
+# params = {
+#     "target_rows": 91,
+#     "target_cols": 109,
+#     "depth": 91,
+#     "axis": 1,
+#     "num_clinical": 2,
+#     "CNN_drop_rate": 0.3,
+#     "RNN_drop_rate": 0.1,
+#     # "CNN_w_regularizer": regularizers.l2(2e-2),
+#     # "RNN_w_regularizer": regularizers.l2(1e-6),
+#     "CNN_batch_size": 10,
+#     "RNN_batch_size": 5,
+#     "val_split": 0.2,
+#     "final_layer_size": 5
+# }
+
+properties = {
+    "batch_size":4,
+    "padding":0,
+    "dilation":1,
+    "groups":1,
+    "bias":True,
+    "padding_mode":"zeros",
+    "drop_rate":0
+}
 
 
 
 
 # Might have to replace datapaths or separate between training and testing
 # Might have to replace datapaths or separate between training and testing
-model_filepath = '//data/data_wnx1/rschuurs/Pytorch_CNN-RNN'
-mri_datapath = './ADNI_volumes_customtemplate_float32/'
-annotations_datapath = './LP_ADNIMERGE.csv'
+model_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN'
+CNN_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/cnn_net.pth'
+mri_datapath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/MRI_volumes_customtemplate_float32/'
+annotations_datapath = './data/data_wnx1/rschuurs/Pytorch_CNN-RNN/LP_ADNIMERGE.csv'
 
 
 # annotations_file = pd.read_csv(annotations_datapath)    # DataFrame
 # annotations_file = pd.read_csv(annotations_datapath)    # DataFrame
-
 # show_image(17508)
 # show_image(17508)
 
 
 # TODO: Datasets include multiple labels, such as medical info
 # TODO: Datasets include multiple labels, such as medical info
 training_data, val_data, test_data = prepare_datasets(mri_datapath, val_split, seed)
 training_data, val_data, test_data = prepare_datasets(mri_datapath, val_split, seed)
-batch_size = 64
 
 
 # Create data loaders
 # Create data loaders
-train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
-test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
-val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
-
-for X, y in train_dataloader:
-    print(f"Shape of X [N, C, H, W]: {X.shape}")
-    print(f"Shape of y: {y.shape} {y.dtype}")
-    break
-
-
-# Display 10 images and labels.
-x = 0
-while x < 10:
-    train_features, train_labels = next(iter(train_dataloader))
-    print(f"Feature batch shape: {train_features.size()}")
-    img = train_features[0].squeeze()
-    image = img[:, :, 40]
-    label = train_labels[0]
-    plt.imshow(image, cmap="gray")
-    plt.show()
-    print(f"Label: {label}")
-    x = x+1
+train_dataloader = DataLoader(training_data, batch_size=properties['batch_size'], shuffle=True)
+test_dataloader = DataLoader(test_data, batch_size=properties['batch_size'], shuffle=True)
+val_dataloader = DataLoader(val_data, batch_size=properties['batch_size'], shuffle=True)
+
+# for X, y in train_dataloader:
+#     print(f"Shape of X [Channels (colors), Y, X, Z]: {X.shape}")   # X & Y are from TOP LOOKING DOWN
+#     print(f"Shape of Y (Dataset?): {y.shape} {y.dtype}")
+#     break
+
+
+# Display 4 images and labels.
+# x = 1
+# while x < 1:
+#     train_features, train_labels = next(iter(train_dataloader))
+#     print(f"Feature batch shape: {train_features.size()}")
+#     img = train_features[0].squeeze()
+#     print(f"Feature batch shape: {img.size()}")
+#     image = img[:, :, 40]
+#     print(f"Feature batch shape: {image.size()}")
+#     label = train_labels[0]
+#     print(f"Label: {label}")
+#     plt.imshow(image, cmap="gray")
+#     plt.show()
+#     x = x+1
+
+
+train = True
+predict = False
+CNN = CNN_Net(train_dataloader, prps=properties, final_layer_size=2)
+CNN.cuda()
+
+# RUN CNN
+if(train):
+    CNN.train_model(train_dataloader, CNN_filepath, epochs=5)
+    CNN.evaluate_model(val_dataloader)
+
+else:
+    CNN.load_state_dict(torch.load(CNN_filepath))
+    CNN.evaluate_model(val_dataloader)
+
+
+# PREDICT MODE TO TEST INDIVIDUAL IMAGES
+if(predict):
+    on = True
+    print("---- Predict mode ----")
+    print("Integer for image")
+    print("x or X for exit")
+
+    while(on):
+        inp = input("Next image: ")
+        if(inp == None or inp.lower() == 'x' or not inp.isdigit()): on = False
+        else:
+            dataloader = DataLoader(prepare_predict(mri_datapath, [inp]), batch_size=properties['batch_size'], shuffle=True)
+            prediction = CNN.predict(dataloader)
+
+            features, labels = next(iter(dataloader), )
+            img = features[0].squeeze()
+            image = img[:, :, 40]
+            print(f"Expected class: {labels}")
+            print(f"Prediction: {prediction}")
+            plt.imshow(image, cmap="gray")
+            plt.show()
 
 
 print("--- END ---")
 print("--- END ---")
 
 
+
+
+
 # EXTRA
 # EXTRA
 
 
 # will I need these params?
 # will I need these params?

+ 1 - 1
original_model/mci_train.py

@@ -51,7 +51,7 @@ optimizer = Adam(lr=1e-5)
 final_layer_size = 5
 final_layer_size = 5
 
 
 model_filepath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data'
 model_filepath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data'
-mri_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/ADNI_volumes_customtemplate_float32'
+mri_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/MRI_volumes_customtemplate_float32'
 
 
 
 
 params_dict = { 'CNN_w_regularizer': CNN_w_regularizer, 'RNN_w_regularizer': RNN_w_regularizer,
 params_dict = { 'CNN_w_regularizer': CNN_w_regularizer, 'RNN_w_regularizer': RNN_w_regularizer,

+ 10 - 6
original_model/utils/models.py

@@ -46,9 +46,9 @@ class CNN_Net ():
     def __init__ (self, params):
     def __init__ (self, params):
         self.params = params
         self.params = params
 
 
-        self.xls = Input (shape = (self.params.num_clinical,),name='input_xls')
-        self.mri = Input (shape = (self.params.image_shape),name='input_mri')
-        self.jac = Input (shape = (self.params.image_shape),name='input_jac')
+        self.xls = Input (shape = (self.params.num_clinical,),name='input_xls')     # MEDICAL DATA
+        self.mri = Input (shape = (self.params.image_shape),name='input_mri')       # MRI SCAN
+        self.jac = Input (shape = (self.params.image_shape),name='input_jac')       # JAC
         
         
         xalex3D = XAlex3D(w_regularizer = self.params.CNN_w_regularizer, drop_rate = self.params.CNN_drop_rate, final_layer_size=self.params.final_layer_size)
         xalex3D = XAlex3D(w_regularizer = self.params.CNN_w_regularizer, drop_rate = self.params.CNN_drop_rate, final_layer_size=self.params.final_layer_size)
     
     
@@ -74,8 +74,8 @@ class CNN_Net ():
         self.optimizer = self.params.optimizer
         self.optimizer = self.params.optimizer
         self.model.compile(optimizer = self.optimizer, loss = 'sparse_categorical_crossentropy', metrics =['acc']) 
         self.model.compile(optimizer = self.optimizer, loss = 'sparse_categorical_crossentropy', metrics =['acc']) 
         self.model.summary()
         self.model.summary()
-        
-        history = self.model.fit_generator (data_flow_train,
+
+        history = self.model.fit_generator(data_flow_train,
                    steps_per_epoch = train_samples/self.params.CNN_batch_size,
                    steps_per_epoch = train_samples/self.params.CNN_batch_size,
                    epochs = self.params.epochs,
                    epochs = self.params.epochs,
                    callbacks = callback,
                    callbacks = callback,
@@ -323,7 +323,8 @@ def XAlex3D(w_regularizer = None, drop_rate = 0., final_layer_size = 50) :
         #                     padding="same", kernel_regularizer = w_regularizer)(conv6_concat)
         #                     padding="same", kernel_regularizer = w_regularizer)(conv6_concat)
     
     
         #Flatten 3D conv network representations
         #Flatten 3D conv network representations
-        flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)    
+        flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
+        print(flat_conv_6.shape)
 
 
         #2-layer Dense network for clinical features
         #2-layer Dense network for clinical features
         vol_fc1 = _fc_bn_relu_drop(64,  w_regularizer = w_regularizer,
         vol_fc1 = _fc_bn_relu_drop(64,  w_regularizer = w_regularizer,
@@ -347,6 +348,7 @@ def XAlex3D(w_regularizer = None, drop_rate = 0., final_layer_size = 50) :
         return fc2
         return fc2
     return f
     return f
 
 
+
 ###Define pieces of CNN
 ###Define pieces of CNN
 def _fc_bn_relu_drop (units, w_regularizer = None, drop_rate = 0., name = None):
 def _fc_bn_relu_drop (units, w_regularizer = None, drop_rate = 0., name = None):
     #Defines Fully connected block (see fig. 3 in paper)
     #Defines Fully connected block (see fig. 3 in paper)
@@ -358,6 +360,7 @@ def _fc_bn_relu_drop (units, w_regularizer = None, drop_rate = 0., name = None):
         return fc
         return fc
     return f
     return f
 
 
+
 def _conv_bn_relu_pool_drop(filters, height, width, depth, strides=(1, 1, 1), padding = 'same', w_regularizer = None, 
 def _conv_bn_relu_pool_drop(filters, height, width, depth, strides=(1, 1, 1), padding = 'same', w_regularizer = None, 
                             drop_rate = None, name = None, pool = False):
                             drop_rate = None, name = None, pool = False):
    #Defines convolutional block (see fig. 3 in paper)
    #Defines convolutional block (see fig. 3 in paper)
@@ -372,6 +375,7 @@ def _conv_bn_relu_pool_drop(filters, height, width, depth, strides=(1, 1, 1), pa
        return Dropout(drop_rate) (elu)
        return Dropout(drop_rate) (elu)
    return f
    return f
 
 
+
 def _sepconv_bn_relu_pool_drop (filters, height, width, depth, strides = (1, 1, 1), padding = 'same', depth_multiplier = 1, w_regularizer = None, 
 def _sepconv_bn_relu_pool_drop (filters, height, width, depth, strides = (1, 1, 1), padding = 'same', depth_multiplier = 1, w_regularizer = None, 
                             drop_rate = None, name = None, pool = False):
                             drop_rate = None, name = None, pool = False):
     #Defines separable convolutional block (see fig. 3 in paper)
     #Defines separable convolutional block (see fig. 3 in paper)

+ 3 - 3
original_model/utils/preprocess.py

@@ -12,7 +12,7 @@ from utils.patientsort import PatientSorter
 ##for 2 class model CNN + RNN ##
 ##for 2 class model CNN + RNN ##
 
 
 class DataLoader:
 class DataLoader:
-    """The DataLoader class is intended to be used on images placed in folder ../ADNI_volumes_customtemplate_float32
+    """The DataLoader class is intended to be used on images placed in folder ../MRI_volumes_customtemplate_float32
         
         
         naming convention is: class_subjectID_imageType.nii.gz
         naming convention is: class_subjectID_imageType.nii.gz
         masked_brain denotes structural MRI, JD_masked_brain denotes Jacobian Determinant 
         masked_brain denotes structural MRI, JD_masked_brain denotes Jacobian Determinant 
@@ -26,7 +26,7 @@ class DataLoader:
     
     
     
     
     def __init__(self, target_shape, seed = None):
     def __init__(self, target_shape, seed = None):
-        self.mri_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/ADNI_volumes_customtemplate_float32'
+        self.mri_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/MRI_volumes_customtemplate_float32'
         self.xls_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data'
         self.xls_datapath = '//data/data_wnx3/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data'
         self.target_shape = target_shape
         self.target_shape = target_shape
         self.seed = seed
         self.seed = seed
@@ -39,7 +39,7 @@ class DataLoader:
 
 
     
     
     def get_filenames (self,mri_datapath):
     def get_filenames (self,mri_datapath):
-        '''Puts filenames in ../ADNI_volumes_customtemplate_float32 in
+        '''Puts filenames in ../MRI_volumes_customtemplate_float32 in
         dictionaries according to class (stableMCI, MCItoAD, stableNL and stableAD)
         dictionaries according to class (stableMCI, MCItoAD, stableNL and stableAD)
         with keys corresponding to image modality (mri and JD)
         with keys corresponding to image modality (mri and JD)
         '''
         '''

+ 78 - 0
utils/CNN_methods.py

@@ -0,0 +1,78 @@
+from torch import add
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+
+"""
+Returns a function that convolutes or separable convolutes, normalizes, activates (ELU), pools and dropouts input.
+ 
+Kernel_size = (height, width, depth)
+
+CAN DO SEPARABLE CONVOLUTION IF GROUP = 2!!!! :))))
+"""
+
+def conv_elu_maxpool_drop(in_channel, filters, kernel_size, stride=(1,1,1), padding=0, dilation=1,
+                      groups=1, bias=True, padding_mode='zeros', pool=False, drop_rate=0, sep_conv = False):
+    def f(input):
+
+        # SEPARABLE CONVOLUTION
+        if(sep_conv):
+
+            # SepConv depthwise, Normalizes, and ELU activates
+            sepConvDepthwise = nn.Conv3d(in_channel, filters, kernel_size, stride=stride, padding=padding,
+                                         groups=in_channel, bias=bias, padding_mode=padding_mode)(input)
+
+            # SepConv pointwise
+            # Todo, will stride & padding be correct for this?
+            conv = nn.Conv3d(in_channel, filters, kernel_size=1, stride=stride, padding=padding,
+                                         groups=1, bias=bias, padding_mode=padding_mode)(sepConvDepthwise)
+
+        # CONVOLUTES
+        else:
+            # Convolutes, Normalizes, and ELU activates
+            conv = nn.Conv3d(in_channel, filters, kernel_size, stride=stride, padding=padding, dilation=dilation,
+                             groups=groups, bias=bias, padding_mode=padding_mode)(input)
+
+        normalization = nn.BatchNorm3d(filters)(conv)
+        elu = nn.ELU()(normalization)
+
+        # Pools
+        if (pool):
+            elu = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)(elu)
+
+        return nn.Dropout(p=drop_rate)(elu)
+    return f
+
+
+'''
+Mid_flow in CNN. sep_convolutes 3 times, adds residual (initial input) to 3 times convoluted, and activates through ELU()
+'''
+
+def mid_flow(I, drop_rate, filters):
+    in_channel = None   # TODO, IN_CHANNEL
+
+    residual = I        # TODO, DOES THIS ACTUALLY COPY?
+
+    x = conv_elu_maxpool_drop(in_channel, filters, (3,3,3), drop_rate=drop_rate)(I)
+    x = conv_elu_maxpool_drop(in_channel, filters, (3,3,3), drop_rate=drop_rate)(x)
+    x = conv_elu_maxpool_drop(in_channel, filters, (3, 3, 3), drop_rate=drop_rate)(x)
+
+    x = add(x, residual)
+    x = nn.ELU()(x)
+    return x
+
+
+"""
+Returns a function that Fully Connects (FC), normalizes, activates (ELU), and dropouts input.
+"""
+
+def fc_elu_drop(in_features, units, drop_rate=0):
+    def f(input):
+
+        fc = nn.Linear(in_features, out_features=units)(input)
+        fc = nn.BatchNorm3d(units)(fc)          # TODO 3d or 2d???
+        fc = nn.ELU()(fc)
+        fc = nn.Dropout(p=drop_rate)
+        return fc
+
+    return f

+ 222 - 0
utils/models.py

@@ -0,0 +1,222 @@
+from torch import device, cuda
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+import utils.CNN_methods as CNN
+
+
+# METHODS: CONV3D, CONV2D, MAXPOOL, LINEAR, ...
+
+
+class CNN_Net(nn.Module):
+
+    # Defines all properties / layers that can be used
+    def __init__(self, mri_volume, params):
+        super().__init__()
+
+        # self.parameters = nn.ParameterList(params)
+        self.model = xalex3D(mri_volume)
+        self.device = device('cuda:0' if cuda.is_available() else 'cpu')
+
+        print("CNN Initialized. Using: " + str(self.device))
+
+
+    # Implements layers with x data, "running an epoch on x"
+    def forward(self, x):
+        x = F.relu(self.model.f(x, []))         # TODO Add Clinical
+        return x
+
+    # Training data
+    def train(self, trainloader, PATH):
+        criterion = nn.CrossEntropyLoss()
+        optimizer = optim.Adam(self.parameters(), lr=1e-5)
+
+        for epoch in range(2):  # loop over the dataset multiple times
+
+            running_loss = 0.0
+            for i, data in enumerate(trainloader, 0):
+                # get the inputs; data is a list of [inputs, labels]
+                inputs, labels = data[0].to(self.device), data[1].to(self.device)
+
+                # zero the parameter gradients
+                optimizer.zero_grad()
+
+                # forward + backward + optimize
+                outputs = self.forward(inputs)
+                loss = criterion(outputs, labels)
+                loss.backward()
+                optimizer.step()
+
+                # print statistics
+                running_loss += loss.item()
+                if i % 2000 == 1999:  # print every 2000 mini-batches
+                    print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
+                    running_loss = 0.0
+
+        print('Finished Training')
+
+        torch.save(self.state_dict(), PATH)
+
+
+    def test(self, testloader):
+        correct = 0
+        total = 0
+        # since we're not training, we don't need to calculate the gradients for our outputs
+        with torch.no_grad():
+            for data in testloader:
+                images, labels = data[0].to(self.device), data[1].to(self.devie)
+                # calculate outputs by running images through the network
+                outputs = self.forward(images)
+                # the class with the highest energy is what we choose as prediction
+                _, predicted = torch.max(outputs.data, 1)
+                total += labels.size(0)
+                correct += (predicted == labels).sum().item()
+
+        print(f'Accuracy of the network: {100 * correct // total} %')
+
+
+
+'''
+XAlex3D model.
+
+Functions used:
+- conv_elu_maxpool_drop(in_channel, filters, kernel_size, stride=(1,1,1), padding=0, dilation=1,
+                      groups=1, bias=True, padding_mode='zeros', pool=False, drop_rate=0, sep_conv = False)
+'''
+
+# TODO, figure out IN_CHANNEL
+# TODO, in_channel
+class xalex3D(nn.Module):
+    def __init__(self, mri_volume, drop_rate=0, final_layer_size=50):
+        self.drop_rate = drop_rate
+        self.final_layer_size = final_layer_size
+
+        # self.conv1 = CNN.conv_elu_maxpool_drop(len(next(iter(mri_volume))), 192, (11, 13, 11), stride=(4, 4, 4), drop_rate=self.drop_rate, pool=True)(next(iter(mri_volume)))
+        # self.conv2 = CNN.conv_elu_maxpool_drop(self.conv1.shape(), 384, (5, 6, 5), stride=(1, 1, 1), drop_rate=self.drop_rate, pool=True)(self.conv1)
+        # self.conv_mid_3 = CNN.mid_flow(self.conv2.shape(), self.drop_rate, filters=384)
+        # self.groupConv4 = CNN.conv_elu_maxpool_drop(self.conv_mid_3.shape(), 96, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
+        #                                        pool=True, groups=2)(self.conv_mid_3)
+        # self.groupConv5 = CNN.conv_elu_maxpool_drop(self.groupConv4.shape(), 48, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
+        #                                        pool=True, groups=2)(self.groupConv4)
+        #
+        # self.fc1 = CNN.fc_elu_drop(self.groupConv5.shape(), 20, drop_rate=self.drop_rate)(self.groupConv5)
+        #
+        # self.fc2 = CNN.fc_elu_drop(self.fc1.shape(), 50, drop_rate=self.drop_rate)(self.fc1)
+
+
+    def f(self, mri_volume, clinical_inputs):
+
+        conv1 = CNN.conv_elu_maxpool_drop(mri_volume.size(), 192, (11, 13, 11), stride=(4, 4, 4), drop_rate=self.drop_rate, pool=True)(mri_volume)
+
+        conv2 = CNN.conv_elu_maxpool_drop(conv1.size(), 384, (5, 6, 5), stride=(1, 1, 1), drop_rate=self.drop_rate, pool=True)(conv1)
+
+        # MIDDLE FLOW, 3 times sepConv & ELU()
+        print(f"Residual: {conv2.shape}")
+        conv_mid_3 = CNN.mid_flow(conv2, self.drop_rate, filters=384)
+
+        # CONV in 2 groups (left & right)
+        groupConv4 = CNN.conv_elu_maxpool_drop(conv_mid_3.size(), 96, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
+                                               pool=True, groups=2)(conv_mid_3)
+        groupConv5 = CNN.conv_elu_maxpool_drop(groupConv4.size(), 48, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
+                                               pool=True, groups=2)(groupConv4)
+
+        # FCs
+        fc1 = CNN.fc_elu_drop(groupConv5.size(), 20, drop_rate=self.drop_rate)(groupConv5)
+
+        fc2 = CNN.fc_elu_drop(fc1.size(), 50, drop_rate=self.drop_rate)(fc1)
+
+        return fc2
+
+
+
+
+
+
+"""     LAST PART:
+        
+        # Flatten 3D conv network representations
+        flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
+
+        # 2-layer Dense network for clinical features
+        vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
+                                   drop_rate=drop_rate)(clinical_inputs)
+
+        flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
+                                       drop_rate=drop_rate)(vol_fc1)
+
+        # Combine image and clinical features embeddings
+
+        fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
+        flat = concatenate([fc1, flat_volume])
+
+        # Final 4D embedding
+
+        fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
+            flat)  # was linear activation"""
+
+
+''' FULL CODE:
+
+
+    # First layer
+    conv1_left = _conv_bn_relu_pool_drop(192, 11, 13, 11, strides=(4, 4, 4), w_regularizer=w_regularizer,
+                                         drop_rate=drop_rate, pool=True)(mri_volume)
+   
+    # Second layer
+    conv2_left = _conv_bn_relu_pool_drop(384, 5, 6, 5, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
+        conv1_left)
+
+    # Introduce Middle Flow (separable convolutions with a residual connection)
+    print('residual shape ' + str(conv2_left.shape))
+    conv_mid_1 = mid_flow(conv2_left, drop_rate, w_regularizer,
+                          filters=384)  # changed input to conv2_left from conv2_concat
+    
+    # Split channels for grouped-style convolution
+    conv_mid_1_1 = Lambda(lambda x: x[:, :, :, :, :192])(conv_mid_1)
+    conv_mid_1_2 = Lambda(lambda x: x[:, :, :, :, 192:])(conv_mid_1)
+
+    conv5_left = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
+        conv_mid_1_1)
+
+    conv5_right = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
+        conv_mid_1_2)
+
+    conv6_left = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
+        conv5_left)
+
+    conv6_right = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
+        conv5_right)
+
+    conv6_concat = concatenate([conv6_left, conv6_right], axis=-1)
+
+    # convExtra = Conv3D(48, (20,30,20),
+    #                     strides = (1,1,1), kernel_initializer="he_normal",
+    #                     padding="same", kernel_regularizer = w_regularizer)(conv6_concat)
+
+    # Flatten 3D conv network representations
+    flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
+
+    # 2-layer Dense network for clinical features
+    vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
+                               drop_rate=drop_rate)(clinical_inputs)
+
+    flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
+                                   drop_rate=drop_rate)(vol_fc1)
+
+    # Combine image and clinical features embeddings
+
+    fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
+    # fc2 = _fc_bn_relu_drop (40, w_regularizer, drop_rate = drop_rate) (fc1)
+    flat = concatenate([fc1, flat_volume])
+
+    # Final 4D embedding
+
+    fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
+        flat)  # was linear activation
+    '''
+
+
+
+
+

+ 136 - 0
utils/newCNN.py

@@ -0,0 +1,136 @@
+from torch import device, cuda
+import torch
+from torch import add
+import torch.nn as nn
+import utils.newCNN_Layers as CustomLayers
+import torch.nn.functional as F
+import torch.optim as optim
+import utils.CNN_methods as CNN
+import pandas as pd
+import matplotlib.pyplot as plt
+
+
+class CNN_Net(nn.Module):
+    def __init__(self, input, prps, final_layer_size=5):
+        super(CNN_Net, self).__init__()
+        self.final_layer_size = final_layer_size
+        self.device = device('cuda:0' if cuda.is_available() else 'cpu')
+        print("CNN Initialized. Using: " + str(self.device))
+
+        # GETS FIRST IMAGE FOR SIZE
+        data_iter = iter(input)
+        first_batch = next(data_iter)
+        first_features = first_batch[0]
+        image = first_features[0]
+
+        # LAYERS
+        print(f"CNN Model Initialization. Input size: {image.size()}")
+        self.conv1 = CustomLayers.Conv_elu_maxpool_drop(1, 192, (11, 13, 11), stride=(4,4,4), pool=True, prps=prps)
+        self.conv2 = CustomLayers.Conv_elu_maxpool_drop(192, 384, (5, 6, 5), stride=(1,1,1), pool=True, prps=prps)
+        self.conv3_mid_flow = CustomLayers.Mid_flow(384, 384, prps=prps)
+        self.conv4_sepConv = CustomLayers.Conv_elu_maxpool_drop(384, 96,(3, 4, 3), stride=(1,1,1), pool=True, prps=prps,
+                                                                sep_conv=True)
+        self.conv5_sepConv = CustomLayers.Conv_elu_maxpool_drop(96, 48, (3, 4, 3), stride=(1, 1, 1), pool=True,
+                                                                prps=prps, sep_conv=True)
+        self.fc1 = CustomLayers.Fc_elu_drop(113568, 20, prps=prps)      # TODO, concatenate clinical data after this
+        self.fc2 = CustomLayers.Fc_elu_drop(20, final_layer_size, prps=prps)
+
+    # FORWARDS
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.conv2(x)
+        x = self.conv3_mid_flow(x)
+        x = self.conv4_sepConv(x)
+        x = self.conv5_sepConv(x)
+
+
+        # FLATTEN x
+        flatten_size = x.size(1) * x.size(2) * x.size(3) * x.size(4)
+        x = x.view(-1, flatten_size)
+
+        x = self.fc1(x)
+        x = self.fc2(x)
+        return x
+
+    # TRAIN
+    def train_model(self, trainloader, PATH, epochs):
+        self.train()
+        criterion = nn.CrossEntropyLoss(reduction='mean')
+        optimizer = optim.Adam(self.parameters(), lr=1e-5)
+
+        losses = pd.DataFrame(columns=['Epoch', 'Avg_loss'])
+
+        for epoch in range(epochs+1):  # loop over the dataset multiple times
+            print(f"Epoch {epoch}/{epochs}")
+            running_loss = 0.0
+
+            for i, data in enumerate(trainloader, 0):   # loops over batches
+                # get the inputs; data is a list of [inputs, labels]
+                inputs, labels = data[0].to(self.device), data[1].to(self.device)
+
+                # zero the parameter gradients
+                optimizer.zero_grad()
+
+                # forward + backward + optimize
+                outputs = self.forward(inputs)
+                loss = criterion(outputs, labels)   # This loss is the mean of losses for the batch
+                loss.backward()
+                optimizer.step()
+
+                # adds average batch loss to running loss
+                running_loss += loss.item()
+
+            avg_loss = running_loss / len(trainloader)      # Running_loss / number of batches
+            print(f"Avg. loss: {avg_loss}")
+            losses = losses.append({'Epoch':int(epoch), 'Avg_loss':avg_loss}, ignore_index=True)
+
+
+
+            # TODO COMPUTE LOSS ON VALIDATION
+            # TODO ADD TIME PER EPOCH, CALCULATE EXPECTED REMAINING TIME
+
+        print('Finished Training')
+        print(losses)
+
+        # MAKES GRAPH
+        plt.plot(losses['Epoch'], losses['Avg_loss'])
+        plt.xlabel('Epoch')
+        plt.ylabel('Average Loss')
+        plt.title('Average Loss vs Epoch On Training')
+        plt.show()
+
+        plt.savefig('avgloss_epoch_curve.png')
+
+        torch.save(self.state_dict(), PATH)
+
+    # TEST
+    def evaluate_model(self, testloader):
+        correct = 0
+        total = 0
+        self.eval()
+        # since we're not training, we don't need to calculate the gradients for our outputs
+        with torch.no_grad():
+            for data in testloader:
+                images, labels = data[0].to(self.device), data[1].to(self.device)
+                # calculate outputs by running images through the network
+                outputs = self.forward(images)
+                # the class with the highest energy is what we choose as prediction
+                _, predicted = torch.max(outputs.data, 1)
+                total += labels.size(0)
+                correct += (predicted == labels).sum().item()
+
+        print(f'Accuracy of the network on {total} scans: {100 * correct // total}%')
+        self.train()
+
+
+    # PREDICT
+    def predict(self, loader):
+        self.eval()
+        with torch.no_grad():
+            for data in loader:
+                images, labels = data[0].to(self.device), data[1].to(self.device)
+                outputs = self.forward(images)
+                # the class with the highest energy is what we choose as prediction
+                _, predicted = torch.max(outputs.data, 1)
+        self.train()
+        return predicted

+ 107 - 0
utils/newCNN_Layers.py

@@ -0,0 +1,107 @@
+from torch import device, cuda
+import torch
+from torch import add
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+import utils.CNN_methods as CNN
+import copy
+
+class Conv_elu_maxpool_drop(nn.Module):
+    def __init__(self, input_size, output_size, kernel_size, prps, stride=(1,1,1), pool = False, sep_conv = False, padding = 0):
+        super(Conv_elu_maxpool_drop, self).__init__()
+        self.input_size = input_size
+        self.output_size = output_size
+        self.pool_status = pool
+        self.sep_conv_status = sep_conv
+
+        # LAYERS
+        # TODO Check here, how many groups? just 2? or groups=input_size?
+        if(self.sep_conv_status):
+            self.sepConvDepthwise = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
+                                              padding=padding, dilation=prps['dilation'], groups=2, bias=prps["bias"], padding_mode=prps["padding_mode"])
+
+        self.conv = nn.Conv3d(input_size, output_size, kernel_size=kernel_size, stride=stride,
+                                          padding=padding, groups=1, bias=prps["bias"], padding_mode=prps["padding_mode"])
+        self.normalization = nn.BatchNorm3d(output_size)
+        self.elu = nn.ELU()
+        self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=0)
+        self.dropout = nn.Dropout(p=prps['drop_rate'])
+
+        self.weight = nn.Parameter(torch.randn(input_size, output_size))
+        self.bias = nn.Parameter(torch.randn(output_size))
+
+
+    def forward(self, x):
+        # print(f"Forward Input: {x.size()}")
+        if(self.sep_conv_status): x = self.sepConvDepthwise(x)
+        else: x = self.conv(x)
+        x = self.normalization(x)
+        x = self.elu(x)
+        if(self.pool_status): self.maxpool(x)
+        x = self.dropout(x)
+
+        # return torch.matmul(x, self.weight) + self.bias
+        return x        # TODO WHAT??? WEIGHT & BIAS YES OR NO?
+
+
+
+class Mid_flow(nn.Module):
+    def __init__(self, input_size, output_size, prps):
+        super(Mid_flow, self).__init__()
+        self.input_size = input_size
+        self.output_size = output_size
+
+        # LAYERS
+        self.conv = Conv_elu_maxpool_drop(input_size, output_size, kernel_size=(3,3,3), stride=(1,1,1), sep_conv=True, padding='same', prps=prps)
+        self.elu = nn.ELU()
+
+        self.weight = nn.Parameter(torch.randn(input_size, output_size))
+        self.bias = nn.Parameter(torch.randn(output_size))
+
+
+    def forward(self, x):
+        # print("AT MIDFLOW!")
+        residual = x.clone()
+
+        # print(f"Input: {x.size()}")
+        x = self.conv(x)
+        x = self.conv(x)
+        x = self.conv(x)
+        # print(f"Output: {x.size()}")
+
+        x = add(x, residual)
+        x = self.elu(x)
+
+        # return torch.matmul(x, self.weight) + self.bias       # TODO WHAT??? WEIGHT & BIAS YES OR NO?
+        return x
+
+
+
+class Fc_elu_drop(nn.Module):
+    def __init__(self, input_size, output_size, prps):
+        super(Fc_elu_drop, self).__init__()
+        self.input_size = input_size
+        self.output_size = output_size
+
+        # LAYERS
+        self.linear = nn.Linear(input_size, output_size)
+        self.normalization = nn.BatchNorm1d(output_size)
+        self.elu = nn.ELU()
+        self.dropout = nn.Dropout(p=prps['drop_rate'])
+
+        self.weight = nn.Parameter(torch.randn(input_size, output_size))
+        self.bias = nn.Parameter(torch.randn(output_size))
+
+
+    def forward(self, x):
+        # print("AT FC")
+        # print(f"Forward Input: {x.size()}")
+        x = self.linear(x)
+        # print(f"After Linear: {x.size()}")
+        x = self.normalization(x)
+        x = self.elu(x)
+        x = self.dropout(x)
+
+        # return torch.matmul(x, self.weight) + self.bias
+        return x        # TODO WHAT??? WEIGHT & BIAS YES OR NO?

+ 38 - 5
utils/preprocess.py

@@ -1,12 +1,12 @@
-# NEEDS TO BE FINISHED
-# TODO CHECK ABOUT IMAGE DIMENSIONS
-# TODO ENSURE ITERATION WORKS
 import glob
 import glob
 import nibabel as nib
 import nibabel as nib
 import numpy as np
 import numpy as np
 import random
 import random
 import torch
 import torch
 from torch.utils.data import Dataset
 from torch.utils.data import Dataset
+import torchvision.transforms as transforms
+import re
+
 
 
 
 
 '''
 '''
@@ -17,6 +17,7 @@ def prepare_datasets(mri_dir, val_split=0.2, seed=50):
     rndm = random.Random(seed)
     rndm = random.Random(seed)
 
 
     raw_data = glob.glob(mri_dir + "*")
     raw_data = glob.glob(mri_dir + "*")
+
     AD_list = []
     AD_list = []
     NL_list = []
     NL_list = []
 
 
@@ -42,14 +43,42 @@ def prepare_datasets(mri_dir, val_split=0.2, seed=50):
     rndm.shuffle(val_list)
     rndm.shuffle(val_list)
     rndm.shuffle(test_list)
     rndm.shuffle(test_list)
 
 
+    print(f"DATA INITIALIZATION")
+    print(f"Training size: {len(train_list)}")
+    print(f"Validation size: {len(val_list)}")
+    print(f"Test size: {len(test_list)}")
+
+
+    # # TRANSFORM
+    # transform = transforms.Compose([
+    #     transforms.Grayscale(num_output_channels=1)
+    # ])
+
     train_dataset = CustomDataset(train_list)
     train_dataset = CustomDataset(train_list)
     val_dataset = CustomDataset(val_list)
     val_dataset = CustomDataset(val_list)
     test_dataset = CustomDataset(test_list)
     test_dataset = CustomDataset(test_list)
 
 
     return train_dataset, val_dataset, test_dataset
     return train_dataset, val_dataset, test_dataset
 
 
-    # TODO  Normalize data? Later add / Exctract clinical data? Which data?
+    # TODO  Normalize data? Later add / Extract clinical data? Which data?
+
 
 
+def prepare_predict(mri_dir, IDs):
+
+    raw_data = glob.glob(mri_dir + "*")
+
+    image_list = []
+
+    # Gets all images and prepares them for Dataset
+    for ID in IDs:
+        pattern = re.compile(ID)
+        matches = [item for item in raw_data if pattern.search(item)]
+        if (len(matches) != 1): print("No image found, or more than one")
+        for match in matches:
+            if "NL" in match: image_list.append((match, 0))
+            if "AD" in match: image_list.append((match, 1))
+
+    return CustomDataset(image_list)
 
 
 
 
 '''
 '''
@@ -98,8 +127,12 @@ class CustomDataset(Dataset):
     def __getitem__(self, idx):     # RETURNS TUPLE WITH IMAGE AND CLASS_ID, BASED ON INDEX IDX
     def __getitem__(self, idx):     # RETURNS TUPLE WITH IMAGE AND CLASS_ID, BASED ON INDEX IDX
         mri_path, class_id = self.data[idx]
         mri_path, class_id = self.data[idx]
         mri = nib.load(mri_path)
         mri = nib.load(mri_path)
-        mri_data = mri.get_fdata()
+        image = np.asarray(mri.dataobj)
+        mri_data = np.asarray(np.expand_dims(image, axis=0))
+
+        # mri_data = mri.get_fdata()
         # mri_array = np.array(mri)
         # mri_array = np.array(mri)
         # mri_tensor = torch.from_numpy(mri_array)
         # mri_tensor = torch.from_numpy(mri_array)
         # class_id = torch.tensor([class_id]) TODO return tensor or just id (0, 1)??
         # class_id = torch.tensor([class_id]) TODO return tensor or just id (0, 1)??
+
         return mri_data, class_id
         return mri_data, class_id

+ 1 - 1
utils/show_image.py

@@ -6,7 +6,7 @@ import matplotlib.pyplot as plt
 '''
 '''
 Function to load and show image. If the image is NL, control must be true. 
 Function to load and show image. If the image is NL, control must be true. 
 '''
 '''
-def show_image(image_id, show=True, data_path='./ADNI_volumes_customtemplate_float32/', annotations_path='./LP_ADNIMERGE.csv'):
+def show_image(image_id, show=True, data_path='./MRI_volumes_customtemplate_float32/', annotations_path='./LP_ADNIMERGE.csv'):
     print('Image ID: ' + str(image_id))
     print('Image ID: ' + str(image_id))
 
 
     annotations_file = pd.read_csv(annotations_path)
     annotations_file = pd.read_csv(annotations_path)