123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138 |
- import glob
- import nibabel as nib
- import numpy as np
- import random
- import torch
- from torch.utils.data import Dataset
- import torchvision.transforms as transforms
- import re
- '''
- Prepares CustomDatasets for training, validating, and testing CNN
- '''
- def prepare_datasets(mri_dir, val_split=0.2, seed=50):
- rndm = random.Random(seed)
- raw_data = glob.glob(mri_dir + "*")
- AD_list = []
- NL_list = []
- print("--- DATA INFO ---")
- print("Amount of images: " + str(len(raw_data)))
- # TODO Check that image is in CSV?
- for image in raw_data:
- if "NL" in image:
- NL_list.append(image)
- elif "AD" in image:
- AD_list.append(image)
- print("Total AD: " + str(len(AD_list)))
- print("Total NL: " + str(len(NL_list)))
- rndm.shuffle(AD_list)
- rndm.shuffle(NL_list)
- train_list, val_list, test_list = get_train_val_test(AD_list, NL_list, val_split)
- rndm.shuffle(train_list)
- rndm.shuffle(val_list)
- rndm.shuffle(test_list)
- print(f"DATA INITIALIZATION")
- print(f"Training size: {len(train_list)}")
- print(f"Validation size: {len(val_list)}")
- print(f"Test size: {len(test_list)}")
- # # TRANSFORM
- # transform = transforms.Compose([
- # transforms.Grayscale(num_output_channels=1)
- # ])
- train_dataset = CustomDataset(train_list)
- val_dataset = CustomDataset(val_list)
- test_dataset = CustomDataset(test_list)
- return train_dataset, val_dataset, test_dataset
- # TODO Normalize data? Later add / Extract clinical data? Which data?
- def prepare_predict(mri_dir, IDs):
- raw_data = glob.glob(mri_dir + "*")
- image_list = []
- # Gets all images and prepares them for Dataset
- for ID in IDs:
- pattern = re.compile(ID)
- matches = [item for item in raw_data if pattern.search(item)]
- if (len(matches) != 1): print("No image found, or more than one")
- for match in matches:
- if "NL" in match: image_list.append((match, 0))
- if "AD" in match: image_list.append((match, 1))
- return CustomDataset(image_list)
- '''
- Returns train_list, val_list and test_list in format [(image, id), ...] each
- '''
- def get_train_val_test(AD_list, NL_list, val_split):
- train_list, val_list, test_list = [], [], []
- num_test_ad = int(len(AD_list) * val_split)
- num_test_nl = int(len(NL_list) * val_split)
- num_val_ad = int((len(AD_list) - num_test_ad) * val_split)
- num_val_nl = int((len(NL_list) - num_test_nl) * val_split)
- # Sets up ADs
- for image in AD_list[0:num_val_ad]:
- val_list.append((image, 1))
- for image in AD_list[num_val_ad:num_test_ad]:
- test_list.append((image, 1))
- for image in AD_list[num_test_ad:]:
- train_list.append((image, 1))
- # Sets up NLs
- for image in NL_list[0:num_val_nl]:
- val_list.append((image, 0))
- for image in NL_list[num_val_nl:num_test_nl]:
- test_list.append((image, 0))
- for image in NL_list[num_test_nl:]:
- train_list.append((image, 0))
- return train_list, val_list, test_list
- class CustomDataset(Dataset):
- def __init__(self, list):
- self.data = list # DATA IS A LIST WITH TUPLES (image_dir, class_id)
- def __len__(self):
- return len(self.data)
- def __getitem__(self, idx): # RETURNS TUPLE WITH IMAGE AND CLASS_ID, BASED ON INDEX IDX
- mri_path, class_id = self.data[idx]
- mri = nib.load(mri_path)
- image = np.asarray(mri.dataobj)
- mri_data = np.asarray(np.expand_dims(image, axis=0))
- # mri_data = mri.get_fdata()
- # mri_array = np.array(mri)
- # mri_tensor = torch.from_numpy(mri_array)
- # class_id = torch.tensor([class_id]) TODO return tensor or just id (0, 1)??
- return mri_data, class_id
|