import torch import talos # FOR DATA from utils.preprocess import prepare_datasets from utils.train_methods import train, load, evaluate, predict from utils.CNN import CNN_Net from torch.utils.data import DataLoader from torchvision import datasets from sklearn.model_selection import KFold # GENERAL PURPOSE import pandas as pd import numpy as np import matplotlib.pyplot as plt import platform import time current_time = time.localtime() print(time.strftime("%Y-%m-%d_%H:%M", current_time)) print("--- RUNNING ---") print("Pytorch Version: " + torch. __version__) print("Python Version: " + platform.python_version()) # LOADING DATA val_split = 0.2 # % of val and test, rest will be train seed = 12 # TODO Randomize seed params = { "batch_size": (15, 40, 5), "padding":0, "dilation":1, "groups":1, "bias":True, "padding_mode":"zeros", "drop_rate":[0, 0.1, 0.2], "epochs": (10, 30, 5), "lr": [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6], 'momentum':[0.99, 0.97, 0.95, 0.9], 'weight_decay':[1e-3, 1e-4, 1e-5, 0] } # "optimizer":'adam', model_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN' CNN_filepath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/cnn_net.pth' # cnn_net.pth # small dataset # mri_datapath = '/data/data_wnx1/rschuurs/Pytorch_CNN-RNN/PET_volumes_customtemplate_float32/' # Small Test # big dataset mri_datapath = '/data/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/PET_volumes_customtemplate_float32/' # Real data annotations_datapath = './data/data_wnx1/rschuurs/Pytorch_CNN-RNN/LP_ADNIMERGE.csv' # annotations_file = pd.read_csv(annotations_datapath) # DataFrame # show_image(17508) # TODO: Datasets include multiple labels, such as medical info training_data, val_data, test_data = prepare_datasets(mri_datapath, val_split, seed) training_data_list = list(training_data) val_data_list = list(val_data) test_data_list = list(test_data) # Create data loaders train_dataloader = DataLoader(training_data, batch_size=params['batch_size'], shuffle=True, drop_last=True) val_dataloader = DataLoader(val_data, batch_size=params['batch_size'], shuffle=True) # Used during training test_dataloader = DataLoader(test_data, batch_size=params['batch_size'], shuffle=True) # Used at end for graphs # loads a few images to test x = 0 while x < 0: train_features, train_labels = next(iter(train_dataloader))[0] # print(f"Feature batch shape: {train_features.size()}") img = train_features[0].squeeze() print(f"Feature batch shape: {img.size()}") image = img[:, :, 40] print(f"Feature batch shape: {image.size()}") label = train_labels[0] print(f"Label: {label}") plt.imshow(image, cmap="gray") plt.savefig(f"./Image{x}_IS:{label}.png") plt.show() x = x+1 # epochs = 20 roc = True CNN = CNN_Net(prps=params, final_layer_size=2) CNN.cuda() # scan_object = talos.Scan( train(CNN, train_dataloader, val_dataloader, CNN_filepath, params=params, graphs=True) # load(CNN, CNN_filepath) evaluate(CNN, test_dataloader) predict(CNN, test_dataloader) # EXTRA # # PREDICT MODE TO TEST INDIVIDUAL IMAGES # if(predict): # on = True # print("---- Predict mode ----") # print("Integer for image") # print("x or X for exit") # # while(on): # inp = input("Next image: ") # if(inp == None or inp.lower() == 'x' or not inp.isdigit()): on = False # else: # dataloader = DataLoader(prepare_predict(mri_datapath, [inp]), batch_size=params['batch_size'], shuffle=True) # prediction = CNN.predict(dataloader) # # features, labels = next(iter(dataloader), ) # img = features[0].squeeze() # image = img[:, :, 40] # print(f"Expected class: {labels}") # print(f"Prediction: {prediction}") # plt.imshow(image, cmap="gray") # plt.show() # # print("--- END ---") # params = { # "target_rows": 91, # "target_cols": 109, # "depth": 91, # "axis": 1, # "num_clinical": 2, # "CNN_drop_rate": 0.3, # "RNN_drop_rate": 0.1, # # "CNN_w_regularizer": regularizers.l2(2e-2), # # "RNN_w_regularizer": regularizers.l2(1e-6), # "CNN_batch_size": 10, # "RNN_batch_size": 5, # "val_split": 0.2, # "final_layer_size": 5 # } ''' params_dict = { 'CNN_w_regularizer': CNN_w_regularizer, 'RNN_w_regularizer': RNN_w_regularizer, 'CNN_batch_size': CNN_batch_size, 'RNN_batch_size': RNN_batch_size, 'CNN_drop_rate': CNN_drop_rate, 'epochs': 30, 'gpu': "/gpu:0", 'model_filepath': model_filepath, 'image_shape': (target_rows, target_cols, depth, axis), 'num_clinical': num_clinical, 'final_layer_size': final_layer_size, 'optimizer': optimizer, 'RNN_drop_rate': RNN_drop_rate,} params = Parameters(params_dict) # WHAT WAS THIS AGAIN? seeds = [np.random.randint(1, 5000) for _ in range(1)] # READ THIS TO UNDERSTAND TRAIN VS VALIDATION DATA def evaluate_net (seed): n_classes = 2 data_loader = DataLoader((target_rows, target_cols, depth, axis), seed = seed) train_data, val_data, test_data,rnn_HdataT1,rnn_HdataT2,rnn_HdataT3,rnn_AdataT1,rnn_AdataT2,rnn_AdataT3, test_mri_nonorm = data_loader.get_train_val_test(val_split, mri_datapath) print('Length Val Data[0]: ',len(val_data[0])) '''