Преглед на файлове

Improvements to model, CNN trains

Ruben преди 1 година
родител
ревизия
0941ee17aa
променени са 61 файла, в които са добавени 266 реда и са изтрити 460 реда
  1. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11767_masked_brain.nii.nii
  2. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11879_masked_brain.nii.nii
  3. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I12061_masked_brain.nii.nii
  4. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13509_masked_brain.nii.nii
  5. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13807_masked_brain.nii.nii
  6. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14166_masked_brain.nii.nii
  7. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14808_masked_brain.nii.nii
  8. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16169_masked_brain.nii.nii
  9. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16238_masked_brain.nii.nii
  10. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16740_masked_brain.nii.nii
  11. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16828_masked_brain.nii.nii
  12. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17363_masked_brain.nii.nii
  13. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17415_masked_brain.nii.nii
  14. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17585_masked_brain.nii.nii
  15. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20080_masked_brain.nii.nii
  16. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20332_masked_brain.nii.nii
  17. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20506_masked_brain.nii.nii
  18. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20771_masked_brain.nii.nii
  19. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23153_masked_brain.nii.nii
  20. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23431_masked_brain.nii.nii
  21. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I11771_masked_brain.nii.nii
  22. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I12151_masked_brain.nii.nii
  23. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I13447_masked_brain.nii.nii
  24. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14114_masked_brain.nii.nii
  25. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14783_masked_brain.nii.nii
  26. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I16867_masked_brain.nii.nii
  27. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I17508_masked_brain.nii.nii
  28. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I18931_masked_brain.nii.nii
  29. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20550_masked_brain.nii.nii
  30. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20726_masked_brain.nii.nii
  31. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23089_masked_brain.nii.nii
  32. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23667_masked_brain.nii.nii
  33. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23798_masked_brain.nii.nii
  34. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23901_masked_brain.nii.nii
  35. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I24641_masked_brain.nii.nii
  36. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I25715_masked_brain.nii.nii
  37. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26030_masked_brain.nii.nii
  38. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26940_masked_brain.nii.nii
  39. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26947_masked_brain.nii.nii
  40. BIN
      MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I28549_masked_brain.nii.nii
  41. BIN
      ROC.png
  42. BIN
      acc.png
  43. BIN
      avgloss_epoch_curve.png
  44. BIN
      cnn_net.pth
  45. 21 0
      cnn_net_data.csv
  46. BIN
      first_train_cnn.pth
  47. 10 20
      main.py
  48. 3 3
      original_model/mci_train.py
  49. 0 0
      original_model/utils_old/__init__.py
  50. 0 0
      original_model/utils_old/augmentation.py
  51. 0 0
      original_model/utils_old/heatmapPlotting.py
  52. 0 0
      original_model/utils_old/models.py
  53. 0 0
      original_model/utils_old/patientsort.py
  54. 0 0
      original_model/utils_old/preprocess.py
  55. 0 0
      original_model/utils_old/sepconv3D.py
  56. 228 0
      utils/CNN.py
  57. 4 1
      utils/CNN_Layers.py
  58. 0 78
      utils/CNN_methods.py
  59. 0 222
      utils/models.py
  60. 0 136
      utils/newCNN.py
  61. BIN
      valloss_epoch_curve.png

BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11767_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I11879_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I12061_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13509_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I13807_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14166_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I14808_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16169_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16238_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16740_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I16828_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17363_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17415_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I17585_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20080_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20332_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20506_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I20771_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23153_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableAD__I23431_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I11771_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I12151_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I13447_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14114_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I14783_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I16867_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I17508_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I18931_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20550_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I20726_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23089_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23667_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23798_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I23901_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I24641_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I25715_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26030_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26940_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I26947_masked_brain.nii.nii


BIN
MRI_volumes_customtemplate_float32/Inf_NaN_stableNL__I28549_masked_brain.nii.nii


BIN
ROC.png


BIN
acc.png


BIN
avgloss_epoch_curve.png


BIN
cnn_net.pth


+ 21 - 0
cnn_net_data.csv

@@ -0,0 +1,21 @@
+,Epoch,Avg_loss,Time,Val_loss
+0,1.0,0.5493318528226279,63.193450927734375,0.6318140625953674
+1,2.0,0.5052056489060226,126.88481521606445,0.615243136882782
+2,3.0,0.5018373664143017,191.29927468299866,0.5647260546684265
+3,4.0,0.48469717265332785,256.35769629478455,0.7602677941322327
+4,5.0,0.49741162923933235,321.498055934906,0.5531934499740601
+5,6.0,0.4794707558687451,386.7943546772003,0.5435163974761963
+6,7.0,0.48868317627212376,452.08525347709656,0.6170985102653503
+7,8.0,0.4829676952755567,517.441656589508,0.5604787468910217
+8,9.0,0.5030312086771993,582.736382484436,0.7099340558052063
+9,10.0,0.4912947150110041,647.9085173606873,0.5861038565635681
+10,11.0,0.4996078980779185,712.9769625663757,0.6088337898254395
+11,12.0,0.4908207041545979,778.0235903263092,0.6034520268440247
+12,13.0,0.4944904490003308,843.1272113323212,0.7052374482154846
+13,14.0,0.4979538926221792,908.1915690898895,0.5868995785713196
+14,15.0,0.48077325710972535,973.2567093372345,0.6026476621627808
+15,16.0,0.495152342956043,1038.3403534889221,0.6178974509239197
+16,17.0,0.48088199594645825,1103.4229621887207,0.5096692442893982
+17,18.0,0.48285331656631914,1168.5112218856812,0.5497284531593323
+18,19.0,0.48006295116202347,1233.6090314388275,0.5811787247657776
+19,20.0,0.47963200437212455,1298.728411436081,0.6435700058937073

BIN
first_train_cnn.pth


+ 10 - 20
main.py

@@ -4,7 +4,7 @@ import torchvision
 # FOR DATA
 # FOR DATA
 from utils.preprocess import prepare_datasets, prepare_predict
 from utils.preprocess import prepare_datasets, prepare_predict
 from utils.show_image import show_image
 from utils.show_image import show_image
-from utils.newCNN import CNN_Net
+from utils.CNN import CNN_Net
 from torch.utils.data import DataLoader
 from torch.utils.data import DataLoader
 from torchvision import datasets
 from torchvision import datasets
 
 
@@ -28,17 +28,6 @@ import glob
 print("--- RUNNING ---")
 print("--- RUNNING ---")
 print("Pytorch Version: " + torch. __version__)
 print("Pytorch Version: " + torch. __version__)
 
 
-
-# MAYBE??
-'''
-import sys
-sys.path.append('//data/data_wnx3/data_wnx1/rschuurs/CNN+RNN-2class-1cnn-CLEAN/utils')
-
-import os
-os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 
-os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # use id from $ nvidia-smi
-'''
-
 # LOADING DATA
 # LOADING DATA
 # data & training properties:
 # data & training properties:
 val_split = 0.2     # % of val and test, rest will be train
 val_split = 0.2     # % of val and test, rest will be train
@@ -61,7 +50,7 @@ seed = 12       # TODO Randomize seed
 # }
 # }
 
 
 properties = {
 properties = {
-    "batch_size":4,
+    "batch_size":6,
     "padding":0,
     "padding":0,
     "dilation":1,
     "dilation":1,
     "groups":1,
     "groups":1,
@@ -73,8 +62,9 @@ properties = {
 
 
 # Might have to replace datapaths or separate between training and testing
 # Might have to replace datapaths or separate between training and testing
 model_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN'
 model_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN'
-CNN_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/cnn_net.pth'
+CNN_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/cnn_net.pth'       # cnn_net.pth
-mri_datapath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/MRI_volumes_customtemplate_float32/'
+# mri_datapath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/PET_volumes_customtemplate_float32/'   # Small Test
+mri_datapath = '/data/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/PET_volumes_customtemplate_float32/'   # Real data
 annotations_datapath = './data/data_wnx1/rschuurs/Pytorch_CNN-RNN/LP_ADNIMERGE.csv'
 annotations_datapath = './data/data_wnx1/rschuurs/Pytorch_CNN-RNN/LP_ADNIMERGE.csv'
 
 
 # annotations_file = pd.read_csv(annotations_datapath)    # DataFrame
 # annotations_file = pd.read_csv(annotations_datapath)    # DataFrame
@@ -84,7 +74,7 @@ annotations_datapath = './data/data_wnx1/rschuurs/Pytorch_CNN-RNN/LP_ADNIMERGE.c
 training_data, val_data, test_data = prepare_datasets(mri_datapath, val_split, seed)
 training_data, val_data, test_data = prepare_datasets(mri_datapath, val_split, seed)
 
 
 # Create data loaders
 # Create data loaders
-train_dataloader = DataLoader(training_data, batch_size=properties['batch_size'], shuffle=True)
+train_dataloader = DataLoader(training_data, batch_size=properties['batch_size'], shuffle=True, drop_last=True)
 test_dataloader = DataLoader(test_data, batch_size=properties['batch_size'], shuffle=True)
 test_dataloader = DataLoader(test_data, batch_size=properties['batch_size'], shuffle=True)
 val_dataloader = DataLoader(val_data, batch_size=properties['batch_size'], shuffle=True)
 val_dataloader = DataLoader(val_data, batch_size=properties['batch_size'], shuffle=True)
 
 
@@ -110,19 +100,19 @@ val_dataloader = DataLoader(val_data, batch_size=properties['batch_size'], shuff
 #     x = x+1
 #     x = x+1
 
 
 
 
-train = True
+train = False
 predict = False
 predict = False
 CNN = CNN_Net(train_dataloader, prps=properties, final_layer_size=2)
 CNN = CNN_Net(train_dataloader, prps=properties, final_layer_size=2)
 CNN.cuda()
 CNN.cuda()
 
 
 # RUN CNN
 # RUN CNN
 if(train):
 if(train):
-    CNN.train_model(train_dataloader, CNN_filepath, epochs=5)
+    CNN.train_model(train_dataloader, test_dataloader, CNN_filepath, epochs=20)
-    CNN.evaluate_model(val_dataloader)
+    CNN.evaluate_model(val_dataloader, roc=True)
 
 
 else:
 else:
     CNN.load_state_dict(torch.load(CNN_filepath))
     CNN.load_state_dict(torch.load(CNN_filepath))
-    CNN.evaluate_model(val_dataloader)
+    CNN.evaluate_model(val_dataloader, roc=True)
 
 
 
 
 # PREDICT MODE TO TEST INDIVIDUAL IMAGES
 # PREDICT MODE TO TEST INDIVIDUAL IMAGES

+ 3 - 3
original_model/mci_train.py

@@ -4,9 +4,9 @@ from sklearn.metrics import confusion_matrix
 from sklearn.preprocessing import label_binarize   
 from sklearn.preprocessing import label_binarize   
 from keras import regularizers 
 from keras import regularizers 
 import pickle as pickle
 import pickle as pickle
-from utils.preprocess import DataLoader
+from utils_old.preprocess import DataLoader
-from utils.models import Parameters, CNN_Net, RNN_Net
+from utils_old.models import Parameters, CNN_Net, RNN_Net
-from utils.heatmapPlotting import heatmapPlotter
+from utils_old.heatmapPlotting import heatmapPlotter
 from matplotlib import pyplot as plt
 from matplotlib import pyplot as plt
 import pandas as pd
 import pandas as pd
 from scipy import interp
 from scipy import interp

+ 0 - 0
original_model/utils/__init__.py → original_model/utils_old/__init__.py


+ 0 - 0
original_model/utils/augmentation.py → original_model/utils_old/augmentation.py


+ 0 - 0
original_model/utils/heatmapPlotting.py → original_model/utils_old/heatmapPlotting.py


+ 0 - 0
original_model/utils/models.py → original_model/utils_old/models.py


+ 0 - 0
original_model/utils/patientsort.py → original_model/utils_old/patientsort.py


+ 0 - 0
original_model/utils/preprocess.py → original_model/utils_old/preprocess.py


+ 0 - 0
original_model/utils/sepconv3D.py → original_model/utils_old/sepconv3D.py


+ 228 - 0
utils/CNN.py

@@ -0,0 +1,228 @@
+from torch import device, cuda
+import torch
+from torch import add
+import torch.nn as nn
+import utils.CNN_Layers as CustomLayers
+import torch.nn.functional as F
+import torch.optim as optim
+import utils.CNN_methods as CNN
+import pandas as pd
+import matplotlib.pyplot as plt
+import time
+import numpy as np
+# from sklearn.metrics import roc_curve, auc
+
+class CNN_Net(nn.Module):
+    def __init__(self, input, prps, final_layer_size=5):
+        super(CNN_Net, self).__init__()
+        self.final_layer_size = final_layer_size
+        self.device = device('cuda:0' if cuda.is_available() else 'cpu')
+        print("CNN Initialized. Using: " + str(self.device))
+
+        # GETS FIRST IMAGE FOR SIZE
+        data_iter = iter(input)
+        first_batch = next(data_iter)
+        first_features = first_batch[0]
+        image = first_features[0]
+
+        # LAYERS
+        print(f"CNN Model Initialization. Input size: {image.size()}")
+        self.conv1 = CustomLayers.Conv_elu_maxpool_drop(1, 192, (11, 13, 11), stride=(4,4,4), pool=True, prps=prps)
+        self.conv2 = CustomLayers.Conv_elu_maxpool_drop(192, 384, (5, 6, 5), stride=(1,1,1), pool=True, prps=prps)
+        self.conv3_mid_flow = CustomLayers.Mid_flow(384, 384, prps=prps)
+        self.conv4_sepConv = CustomLayers.Conv_elu_maxpool_drop(384, 96,(3, 4, 3), stride=(1,1,1), pool=True, prps=prps,
+                                                                sep_conv=True)
+        self.conv5_sepConv = CustomLayers.Conv_elu_maxpool_drop(96, 48, (3, 4, 3), stride=(1, 1, 1), pool=True,
+                                                                prps=prps, sep_conv=True)
+        self.fc1 = CustomLayers.Fc_elu_drop(113568, 20, prps=prps, softmax=False)      # TODO, concatenate clinical data after this
+        self.fc2 = CustomLayers.Fc_elu_drop(20, final_layer_size, prps=prps, softmax=True)  # For now this works as output layer, though may be incorrect
+
+    # FORWARDS
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.conv2(x)
+        x = self.conv3_mid_flow(x)
+        x = self.conv4_sepConv(x)
+        x = self.conv5_sepConv(x)
+
+        # FLATTEN x
+        flatten_size = x.size(1) * x.size(2) * x.size(3) * x.size(4)
+        x = x.view(-1, flatten_size)
+
+        x = self.fc1(x)
+        x = self.fc2(x)
+        return x
+
+    # TRAIN
+    def train_model(self, trainloader, testloader, PATH, epochs):
+        self.train()
+        criterion = nn.CrossEntropyLoss(reduction='mean')
+        optimizer = optim.Adam(self.parameters(), lr=1e-5)
+
+        losses = pd.DataFrame(columns=['Epoch', 'Avg_loss', 'Time'])
+        start_time = time.time()  # seconds
+
+        for epoch in range(epochs):  # loop over the dataset multiple times
+            epoch += 1
+
+            # Estimate & count training time
+            t = time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time))
+            t_remain = time.strftime("%H:%M:%S", time.gmtime((time.time() - start_time)/epoch * epochs))
+            print(f"{epoch/epochs * 100} || {epoch}/{epochs} || Time: {t}/{t_remain}")
+
+            running_loss = 0.0
+
+            # Batches & training
+            for i, data in enumerate(trainloader, 0):
+                # get the inputs; data is a list of [inputs, labels]
+                inputs, labels = data[0].to(self.device), data[1].to(self.device)
+
+                # zero the parameter gradients
+                optimizer.zero_grad()
+
+                # forward + backward + optimize
+                outputs = self.forward(inputs)
+                loss = criterion(outputs, labels)   # This loss is the mean of losses for the batch
+                loss.backward()
+                optimizer.step()
+
+                # adds average batch loss to running loss
+                running_loss += loss.item()
+
+                # mini-batches for progress
+                if(i%10==0 and i!=0):
+                    print(f"{i}/{len(trainloader)}, temp. loss:{running_loss / len(trainloader)}")
+
+            # average loss
+            avg_loss = running_loss / len(trainloader)      # Running_loss / number of batches
+            print(f"Avg. loss: {avg_loss}")
+
+            # loss on validation
+            val_loss = self.evaluate_model(testloader, roc=False)
+
+            losses = losses.append({'Epoch':int(epoch), 'Avg_loss':avg_loss, 'Val_loss':val_loss, 'Time':time.time() - start_time}, ignore_index=True)
+
+
+        print('Finished Training')
+        losses.to_csv('./cnn_net_data.csv')
+
+        # MAKES EPOCH VS AVG LOSS GRAPH
+        plt.plot(losses['Epoch'], losses['Avg_loss'])
+        plt.xlabel('Epoch')
+        plt.ylabel('Average Loss')
+        plt.title('Average Loss vs Epoch On Training')
+        plt.savefig('./avgloss_epoch_curve.png')
+        plt.show()
+
+        # MAKES EPOCH VS VALIDATION LOSS GRAPH
+        plt.plot(losses['Epoch'], losses['Val_loss'])
+        plt.xlabel('Epoch')
+        plt.ylabel('Validation Loss')
+        plt.title('Validation Loss vs Epoch On Training')
+        plt.savefig('./valloss_epoch_curve.png')
+        plt.show()
+
+        torch.save(self.state_dict(), PATH)
+        print("Model saved")
+
+    # TEST
+    def evaluate_model(self, testloader, roc):
+        correct = 0
+        total = 0
+
+        predictions = []
+        true_labels = []
+
+        criterion = nn.CrossEntropyLoss(reduction='mean')
+        self.eval()
+        # since we're not training, we don't need to calculate the gradients for our outputs
+        with torch.no_grad():
+            for data in testloader:
+                images, labels = data[0].to(self.device), data[1].to(self.device)
+                # calculate outputs by running images through the network
+                outputs = self.forward(images)
+                # the class with the highest energy is what we choose as prediction
+
+                loss = criterion(outputs, labels)  # mean loss from batch
+
+                # Gets accuracy
+                _, predicted = torch.max(outputs.data, 1)
+                total += labels.size(0)
+                correct += (predicted == labels).sum().item()
+
+                # Saves predictions and labels for ROC
+                if(roc):
+                    predictions.extend(outputs.data[:,1].cpu().numpy())     # Grabs probability of positive
+                    true_labels.extend(labels.cpu().numpy())
+
+        print(f'Accuracy of the network on {total} scans: {100 * correct // total}%')
+
+        if(not roc): print(f'Validation loss: {loss.item()}')
+        else:
+            # ROC
+            thresholds = np.linspace(0, 1, num=50)
+            tpr = []
+            fpr = []
+            acc = []
+
+
+            true_labels = np.array(true_labels)
+
+            for threshold in thresholds:
+                # Thresholding the predictions (meaning all predictions above threshold are considered positive)
+                thresholded_predictions = (predictions >= threshold).astype(int)
+
+                # Calculating true positives, false positives, true negatives, false negatives
+                true_positives = np.sum((thresholded_predictions == 1) & (true_labels == 1))
+                false_positives = np.sum((thresholded_predictions == 1) & (true_labels == 0))
+                true_negatives = np.sum((thresholded_predictions == 0) & (true_labels == 0))
+                false_negatives = np.sum((thresholded_predictions == 0) & (true_labels == 1))
+
+                accuracy  = (true_positives + true_negatives) / (true_positives + false_positives + true_negatives + false_negatives)
+
+                # Calculate TPR and FPR
+                tpr.append(true_positives / (true_positives + false_negatives))
+                fpr.append(false_positives / (false_positives + true_negatives))
+                acc.append(accuracy)
+
+
+            plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve')
+            plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
+            plt.xlim([0.0, 1.0])
+            plt.ylim([0.0, 1.0])
+
+            plt.xlabel('False Positive Rate (1 - Specificity)')
+            plt.ylabel('True Positive Rate (Sensitivity)')
+            plt.title('Receiver Operating Characteristic (ROC) Curve')
+            plt.legend(loc="lower right")
+            plt.savefig('./ROC.png')
+            plt.show()
+
+            plt.plot(thresholds, acc)
+            plt.xlabel('Thresholds')
+            plt.ylabel('Accuracy')
+            plt.title('Accuracy vs thresholds')
+            plt.savefig('./acc.png')
+            plt.show()
+
+
+            # ROC ATTEMPT 2
+            # fprRoc, tprRoc = roc_curve(true_labels, predictions)
+            # plt.plot(fprRoc, tprRoc)
+
+        self.train()
+
+        return(loss.item())
+
+
+    # PREDICT
+    def predict(self, loader):
+        self.eval()
+        with torch.no_grad():
+            for data in loader:
+                images, labels = data[0].to(self.device), data[1].to(self.device)
+                outputs = self.forward(images)
+                # the class with the highest energy is what we choose as prediction
+                _, predicted = torch.max(outputs.data, 1)
+        self.train()
+        return predicted

+ 4 - 1
utils/newCNN_Layers.py → utils/CNN_Layers.py

@@ -79,7 +79,7 @@ class Mid_flow(nn.Module):
 
 
 
 
 class Fc_elu_drop(nn.Module):
 class Fc_elu_drop(nn.Module):
-    def __init__(self, input_size, output_size, prps):
+    def __init__(self, input_size, output_size, softmax, prps):
         super(Fc_elu_drop, self).__init__()
         super(Fc_elu_drop, self).__init__()
         self.input_size = input_size
         self.input_size = input_size
         self.output_size = output_size
         self.output_size = output_size
@@ -89,6 +89,8 @@ class Fc_elu_drop(nn.Module):
         self.normalization = nn.BatchNorm1d(output_size)
         self.normalization = nn.BatchNorm1d(output_size)
         self.elu = nn.ELU()
         self.elu = nn.ELU()
         self.dropout = nn.Dropout(p=prps['drop_rate'])
         self.dropout = nn.Dropout(p=prps['drop_rate'])
+        self.softmax_status = softmax
+        if(softmax): self.softmax = nn.Softmax()
 
 
         self.weight = nn.Parameter(torch.randn(input_size, output_size))
         self.weight = nn.Parameter(torch.randn(input_size, output_size))
         self.bias = nn.Parameter(torch.randn(output_size))
         self.bias = nn.Parameter(torch.randn(output_size))
@@ -102,6 +104,7 @@ class Fc_elu_drop(nn.Module):
         x = self.normalization(x)
         x = self.normalization(x)
         x = self.elu(x)
         x = self.elu(x)
         x = self.dropout(x)
         x = self.dropout(x)
+        if(self.softmax_status): x = self.softmax(x)
 
 
         # return torch.matmul(x, self.weight) + self.bias
         # return torch.matmul(x, self.weight) + self.bias
         return x        # TODO WHAT??? WEIGHT & BIAS YES OR NO?
         return x        # TODO WHAT??? WEIGHT & BIAS YES OR NO?

+ 0 - 78
utils/CNN_methods.py

@@ -1,78 +0,0 @@
-from torch import add
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.optim as optim
-
-"""
-Returns a function that convolutes or separable convolutes, normalizes, activates (ELU), pools and dropouts input.
- 
-Kernel_size = (height, width, depth)
-
-CAN DO SEPARABLE CONVOLUTION IF GROUP = 2!!!! :))))
-"""
-
-def conv_elu_maxpool_drop(in_channel, filters, kernel_size, stride=(1,1,1), padding=0, dilation=1,
-                      groups=1, bias=True, padding_mode='zeros', pool=False, drop_rate=0, sep_conv = False):
-    def f(input):
-
-        # SEPARABLE CONVOLUTION
-        if(sep_conv):
-
-            # SepConv depthwise, Normalizes, and ELU activates
-            sepConvDepthwise = nn.Conv3d(in_channel, filters, kernel_size, stride=stride, padding=padding,
-                                         groups=in_channel, bias=bias, padding_mode=padding_mode)(input)
-
-            # SepConv pointwise
-            # Todo, will stride & padding be correct for this?
-            conv = nn.Conv3d(in_channel, filters, kernel_size=1, stride=stride, padding=padding,
-                                         groups=1, bias=bias, padding_mode=padding_mode)(sepConvDepthwise)
-
-        # CONVOLUTES
-        else:
-            # Convolutes, Normalizes, and ELU activates
-            conv = nn.Conv3d(in_channel, filters, kernel_size, stride=stride, padding=padding, dilation=dilation,
-                             groups=groups, bias=bias, padding_mode=padding_mode)(input)
-
-        normalization = nn.BatchNorm3d(filters)(conv)
-        elu = nn.ELU()(normalization)
-
-        # Pools
-        if (pool):
-            elu = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)(elu)
-
-        return nn.Dropout(p=drop_rate)(elu)
-    return f
-
-
-'''
-Mid_flow in CNN. sep_convolutes 3 times, adds residual (initial input) to 3 times convoluted, and activates through ELU()
-'''
-
-def mid_flow(I, drop_rate, filters):
-    in_channel = None   # TODO, IN_CHANNEL
-
-    residual = I        # TODO, DOES THIS ACTUALLY COPY?
-
-    x = conv_elu_maxpool_drop(in_channel, filters, (3,3,3), drop_rate=drop_rate)(I)
-    x = conv_elu_maxpool_drop(in_channel, filters, (3,3,3), drop_rate=drop_rate)(x)
-    x = conv_elu_maxpool_drop(in_channel, filters, (3, 3, 3), drop_rate=drop_rate)(x)
-
-    x = add(x, residual)
-    x = nn.ELU()(x)
-    return x
-
-
-"""
-Returns a function that Fully Connects (FC), normalizes, activates (ELU), and dropouts input.
-"""
-
-def fc_elu_drop(in_features, units, drop_rate=0):
-    def f(input):
-
-        fc = nn.Linear(in_features, out_features=units)(input)
-        fc = nn.BatchNorm3d(units)(fc)          # TODO 3d or 2d???
-        fc = nn.ELU()(fc)
-        fc = nn.Dropout(p=drop_rate)
-        return fc
-
-    return f

+ 0 - 222
utils/models.py

@@ -1,222 +0,0 @@
-from torch import device, cuda
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.optim as optim
-import utils.CNN_methods as CNN
-
-
-# METHODS: CONV3D, CONV2D, MAXPOOL, LINEAR, ...
-
-
-class CNN_Net(nn.Module):
-
-    # Defines all properties / layers that can be used
-    def __init__(self, mri_volume, params):
-        super().__init__()
-
-        # self.parameters = nn.ParameterList(params)
-        self.model = xalex3D(mri_volume)
-        self.device = device('cuda:0' if cuda.is_available() else 'cpu')
-
-        print("CNN Initialized. Using: " + str(self.device))
-
-
-    # Implements layers with x data, "running an epoch on x"
-    def forward(self, x):
-        x = F.relu(self.model.f(x, []))         # TODO Add Clinical
-        return x
-
-    # Training data
-    def train(self, trainloader, PATH):
-        criterion = nn.CrossEntropyLoss()
-        optimizer = optim.Adam(self.parameters(), lr=1e-5)
-
-        for epoch in range(2):  # loop over the dataset multiple times
-
-            running_loss = 0.0
-            for i, data in enumerate(trainloader, 0):
-                # get the inputs; data is a list of [inputs, labels]
-                inputs, labels = data[0].to(self.device), data[1].to(self.device)
-
-                # zero the parameter gradients
-                optimizer.zero_grad()
-
-                # forward + backward + optimize
-                outputs = self.forward(inputs)
-                loss = criterion(outputs, labels)
-                loss.backward()
-                optimizer.step()
-
-                # print statistics
-                running_loss += loss.item()
-                if i % 2000 == 1999:  # print every 2000 mini-batches
-                    print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
-                    running_loss = 0.0
-
-        print('Finished Training')
-
-        torch.save(self.state_dict(), PATH)
-
-
-    def test(self, testloader):
-        correct = 0
-        total = 0
-        # since we're not training, we don't need to calculate the gradients for our outputs
-        with torch.no_grad():
-            for data in testloader:
-                images, labels = data[0].to(self.device), data[1].to(self.devie)
-                # calculate outputs by running images through the network
-                outputs = self.forward(images)
-                # the class with the highest energy is what we choose as prediction
-                _, predicted = torch.max(outputs.data, 1)
-                total += labels.size(0)
-                correct += (predicted == labels).sum().item()
-
-        print(f'Accuracy of the network: {100 * correct // total} %')
-
-
-
-'''
-XAlex3D model.
-
-Functions used:
-- conv_elu_maxpool_drop(in_channel, filters, kernel_size, stride=(1,1,1), padding=0, dilation=1,
-                      groups=1, bias=True, padding_mode='zeros', pool=False, drop_rate=0, sep_conv = False)
-'''
-
-# TODO, figure out IN_CHANNEL
-# TODO, in_channel
-class xalex3D(nn.Module):
-    def __init__(self, mri_volume, drop_rate=0, final_layer_size=50):
-        self.drop_rate = drop_rate
-        self.final_layer_size = final_layer_size
-
-        # self.conv1 = CNN.conv_elu_maxpool_drop(len(next(iter(mri_volume))), 192, (11, 13, 11), stride=(4, 4, 4), drop_rate=self.drop_rate, pool=True)(next(iter(mri_volume)))
-        # self.conv2 = CNN.conv_elu_maxpool_drop(self.conv1.shape(), 384, (5, 6, 5), stride=(1, 1, 1), drop_rate=self.drop_rate, pool=True)(self.conv1)
-        # self.conv_mid_3 = CNN.mid_flow(self.conv2.shape(), self.drop_rate, filters=384)
-        # self.groupConv4 = CNN.conv_elu_maxpool_drop(self.conv_mid_3.shape(), 96, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
-        #                                        pool=True, groups=2)(self.conv_mid_3)
-        # self.groupConv5 = CNN.conv_elu_maxpool_drop(self.groupConv4.shape(), 48, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
-        #                                        pool=True, groups=2)(self.groupConv4)
-        #
-        # self.fc1 = CNN.fc_elu_drop(self.groupConv5.shape(), 20, drop_rate=self.drop_rate)(self.groupConv5)
-        #
-        # self.fc2 = CNN.fc_elu_drop(self.fc1.shape(), 50, drop_rate=self.drop_rate)(self.fc1)
-
-
-    def f(self, mri_volume, clinical_inputs):
-
-        conv1 = CNN.conv_elu_maxpool_drop(mri_volume.size(), 192, (11, 13, 11), stride=(4, 4, 4), drop_rate=self.drop_rate, pool=True)(mri_volume)
-
-        conv2 = CNN.conv_elu_maxpool_drop(conv1.size(), 384, (5, 6, 5), stride=(1, 1, 1), drop_rate=self.drop_rate, pool=True)(conv1)
-
-        # MIDDLE FLOW, 3 times sepConv & ELU()
-        print(f"Residual: {conv2.shape}")
-        conv_mid_3 = CNN.mid_flow(conv2, self.drop_rate, filters=384)
-
-        # CONV in 2 groups (left & right)
-        groupConv4 = CNN.conv_elu_maxpool_drop(conv_mid_3.size(), 96, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
-                                               pool=True, groups=2)(conv_mid_3)
-        groupConv5 = CNN.conv_elu_maxpool_drop(groupConv4.size(), 48, (3, 4, 3), stride=(1, 1, 1), drop_rate=self.drop_rate,
-                                               pool=True, groups=2)(groupConv4)
-
-        # FCs
-        fc1 = CNN.fc_elu_drop(groupConv5.size(), 20, drop_rate=self.drop_rate)(groupConv5)
-
-        fc2 = CNN.fc_elu_drop(fc1.size(), 50, drop_rate=self.drop_rate)(fc1)
-
-        return fc2
-
-
-
-
-
-
-"""     LAST PART:
-        
-        # Flatten 3D conv network representations
-        flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
-
-        # 2-layer Dense network for clinical features
-        vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
-                                   drop_rate=drop_rate)(clinical_inputs)
-
-        flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
-                                       drop_rate=drop_rate)(vol_fc1)
-
-        # Combine image and clinical features embeddings
-
-        fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
-        flat = concatenate([fc1, flat_volume])
-
-        # Final 4D embedding
-
-        fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
-            flat)  # was linear activation"""
-
-
-''' FULL CODE:
-
-
-    # First layer
-    conv1_left = _conv_bn_relu_pool_drop(192, 11, 13, 11, strides=(4, 4, 4), w_regularizer=w_regularizer,
-                                         drop_rate=drop_rate, pool=True)(mri_volume)
-   
-    # Second layer
-    conv2_left = _conv_bn_relu_pool_drop(384, 5, 6, 5, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
-        conv1_left)
-
-    # Introduce Middle Flow (separable convolutions with a residual connection)
-    print('residual shape ' + str(conv2_left.shape))
-    conv_mid_1 = mid_flow(conv2_left, drop_rate, w_regularizer,
-                          filters=384)  # changed input to conv2_left from conv2_concat
-    
-    # Split channels for grouped-style convolution
-    conv_mid_1_1 = Lambda(lambda x: x[:, :, :, :, :192])(conv_mid_1)
-    conv_mid_1_2 = Lambda(lambda x: x[:, :, :, :, 192:])(conv_mid_1)
-
-    conv5_left = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
-        conv_mid_1_1)
-
-    conv5_right = _conv_bn_relu_pool_drop(96, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
-        conv_mid_1_2)
-
-    conv6_left = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
-        conv5_left)
-
-    conv6_right = _conv_bn_relu_pool_drop(48, 3, 4, 3, w_regularizer=w_regularizer, drop_rate=drop_rate, pool=True)(
-        conv5_right)
-
-    conv6_concat = concatenate([conv6_left, conv6_right], axis=-1)
-
-    # convExtra = Conv3D(48, (20,30,20),
-    #                     strides = (1,1,1), kernel_initializer="he_normal",
-    #                     padding="same", kernel_regularizer = w_regularizer)(conv6_concat)
-
-    # Flatten 3D conv network representations
-    flat_conv_6 = Reshape((np.prod(K.int_shape(conv6_concat)[1:]),))(conv6_concat)
-
-    # 2-layer Dense network for clinical features
-    vol_fc1 = _fc_bn_relu_drop(64, w_regularizer=w_regularizer,
-                               drop_rate=drop_rate)(clinical_inputs)
-
-    flat_volume = _fc_bn_relu_drop(20, w_regularizer=w_regularizer,
-                                   drop_rate=drop_rate)(vol_fc1)
-
-    # Combine image and clinical features embeddings
-
-    fc1 = _fc_bn_relu_drop(20, w_regularizer, drop_rate=drop_rate, name='final_conv')(flat_conv_6)
-    # fc2 = _fc_bn_relu_drop (40, w_regularizer, drop_rate = drop_rate) (fc1)
-    flat = concatenate([fc1, flat_volume])
-
-    # Final 4D embedding
-
-    fc2 = Dense(units=final_layer_size, activation='linear', kernel_regularizer=w_regularizer, name='features')(
-        flat)  # was linear activation
-    '''
-
-
-
-
-

+ 0 - 136
utils/newCNN.py

@@ -1,136 +0,0 @@
-from torch import device, cuda
-import torch
-from torch import add
-import torch.nn as nn
-import utils.newCNN_Layers as CustomLayers
-import torch.nn.functional as F
-import torch.optim as optim
-import utils.CNN_methods as CNN
-import pandas as pd
-import matplotlib.pyplot as plt
-
-
-class CNN_Net(nn.Module):
-    def __init__(self, input, prps, final_layer_size=5):
-        super(CNN_Net, self).__init__()
-        self.final_layer_size = final_layer_size
-        self.device = device('cuda:0' if cuda.is_available() else 'cpu')
-        print("CNN Initialized. Using: " + str(self.device))
-
-        # GETS FIRST IMAGE FOR SIZE
-        data_iter = iter(input)
-        first_batch = next(data_iter)
-        first_features = first_batch[0]
-        image = first_features[0]
-
-        # LAYERS
-        print(f"CNN Model Initialization. Input size: {image.size()}")
-        self.conv1 = CustomLayers.Conv_elu_maxpool_drop(1, 192, (11, 13, 11), stride=(4,4,4), pool=True, prps=prps)
-        self.conv2 = CustomLayers.Conv_elu_maxpool_drop(192, 384, (5, 6, 5), stride=(1,1,1), pool=True, prps=prps)
-        self.conv3_mid_flow = CustomLayers.Mid_flow(384, 384, prps=prps)
-        self.conv4_sepConv = CustomLayers.Conv_elu_maxpool_drop(384, 96,(3, 4, 3), stride=(1,1,1), pool=True, prps=prps,
-                                                                sep_conv=True)
-        self.conv5_sepConv = CustomLayers.Conv_elu_maxpool_drop(96, 48, (3, 4, 3), stride=(1, 1, 1), pool=True,
-                                                                prps=prps, sep_conv=True)
-        self.fc1 = CustomLayers.Fc_elu_drop(113568, 20, prps=prps)      # TODO, concatenate clinical data after this
-        self.fc2 = CustomLayers.Fc_elu_drop(20, final_layer_size, prps=prps)
-
-    # FORWARDS
-    def forward(self, x):
-        x = self.conv1(x)
-        x = self.conv2(x)
-        x = self.conv3_mid_flow(x)
-        x = self.conv4_sepConv(x)
-        x = self.conv5_sepConv(x)
-
-
-        # FLATTEN x
-        flatten_size = x.size(1) * x.size(2) * x.size(3) * x.size(4)
-        x = x.view(-1, flatten_size)
-
-        x = self.fc1(x)
-        x = self.fc2(x)
-        return x
-
-    # TRAIN
-    def train_model(self, trainloader, PATH, epochs):
-        self.train()
-        criterion = nn.CrossEntropyLoss(reduction='mean')
-        optimizer = optim.Adam(self.parameters(), lr=1e-5)
-
-        losses = pd.DataFrame(columns=['Epoch', 'Avg_loss'])
-
-        for epoch in range(epochs+1):  # loop over the dataset multiple times
-            print(f"Epoch {epoch}/{epochs}")
-            running_loss = 0.0
-
-            for i, data in enumerate(trainloader, 0):   # loops over batches
-                # get the inputs; data is a list of [inputs, labels]
-                inputs, labels = data[0].to(self.device), data[1].to(self.device)
-
-                # zero the parameter gradients
-                optimizer.zero_grad()
-
-                # forward + backward + optimize
-                outputs = self.forward(inputs)
-                loss = criterion(outputs, labels)   # This loss is the mean of losses for the batch
-                loss.backward()
-                optimizer.step()
-
-                # adds average batch loss to running loss
-                running_loss += loss.item()
-
-            avg_loss = running_loss / len(trainloader)      # Running_loss / number of batches
-            print(f"Avg. loss: {avg_loss}")
-            losses = losses.append({'Epoch':int(epoch), 'Avg_loss':avg_loss}, ignore_index=True)
-
-
-
-            # TODO COMPUTE LOSS ON VALIDATION
-            # TODO ADD TIME PER EPOCH, CALCULATE EXPECTED REMAINING TIME
-
-        print('Finished Training')
-        print(losses)
-
-        # MAKES GRAPH
-        plt.plot(losses['Epoch'], losses['Avg_loss'])
-        plt.xlabel('Epoch')
-        plt.ylabel('Average Loss')
-        plt.title('Average Loss vs Epoch On Training')
-        plt.show()
-
-        plt.savefig('avgloss_epoch_curve.png')
-
-        torch.save(self.state_dict(), PATH)
-
-    # TEST
-    def evaluate_model(self, testloader):
-        correct = 0
-        total = 0
-        self.eval()
-        # since we're not training, we don't need to calculate the gradients for our outputs
-        with torch.no_grad():
-            for data in testloader:
-                images, labels = data[0].to(self.device), data[1].to(self.device)
-                # calculate outputs by running images through the network
-                outputs = self.forward(images)
-                # the class with the highest energy is what we choose as prediction
-                _, predicted = torch.max(outputs.data, 1)
-                total += labels.size(0)
-                correct += (predicted == labels).sum().item()
-
-        print(f'Accuracy of the network on {total} scans: {100 * correct // total}%')
-        self.train()
-
-
-    # PREDICT
-    def predict(self, loader):
-        self.eval()
-        with torch.no_grad():
-            for data in loader:
-                images, labels = data[0].to(self.device), data[1].to(self.device)
-                outputs = self.forward(images)
-                # the class with the highest energy is what we choose as prediction
-                _, predicted = torch.max(outputs.data, 1)
-        self.train()
-        return predicted

BIN
valloss_epoch_curve.png