datasets.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. # NEEDS TO BE FINISHED
  2. # TODO CHECK ABOUT IMAGE DIMENSIONS
  3. # TODO ENSURE ITERATION WORKS
  4. import glob
  5. import nibabel as nib
  6. import numpy as np
  7. import random
  8. import torch
  9. from torch.utils.data import Dataset
  10. import pandas as pd
  11. from torch.utils.data import DataLoader
  12. '''
  13. Prepares CustomDatasets for training, validating, and testing CNN
  14. '''
  15. def prepare_datasets(mri_dir, xls_file, val_split=0.2, seed=50):
  16. rndm = random.Random(seed)
  17. xls_data = pd.read_csv(xls_file).set_index('Image Data ID')
  18. raw_data = glob.glob(mri_dir + "*")
  19. AD_list = []
  20. NL_list = []
  21. # TODO Check that image is in CSV?
  22. for image in raw_data:
  23. if "NL" in image:
  24. NL_list.append(image)
  25. elif "AD" in image:
  26. AD_list.append(image)
  27. rndm.shuffle(AD_list)
  28. rndm.shuffle(NL_list)
  29. train_list, val_list, test_list = get_train_val_test(AD_list, NL_list, val_split)
  30. rndm.shuffle(train_list)
  31. rndm.shuffle(val_list)
  32. rndm.shuffle(test_list)
  33. train_dataset = ADNIDataset(train_list, xls_data)
  34. val_dataset = ADNIDataset(val_list, xls_data)
  35. test_dataset = ADNIDataset(test_list, xls_data)
  36. return train_dataset, val_dataset, test_dataset
  37. # TODO Normalize data? Later add / Exctract clinical data? Which data?
  38. '''
  39. Returns train_list, val_list and test_list in format [(image, id), ...] each
  40. '''
  41. def get_train_val_test(AD_list, NL_list, val_split):
  42. train_list, val_list, test_list = [], [], []
  43. num_test_ad = int(len(AD_list) * val_split)
  44. num_test_nl = int(len(NL_list) * val_split)
  45. num_val_ad = int((len(AD_list) - num_test_ad) * val_split)
  46. num_val_nl = int((len(NL_list) - num_test_nl) * val_split)
  47. # Sets up ADs
  48. for image in AD_list[0:num_val_ad]:
  49. val_list.append((image, 1))
  50. for image in AD_list[num_val_ad:num_test_ad]:
  51. test_list.append((image, 1))
  52. for image in AD_list[num_test_ad:]:
  53. train_list.append((image, 1))
  54. # Sets up NLs
  55. for image in NL_list[0:num_val_nl]:
  56. val_list.append((image, 0))
  57. for image in NL_list[num_val_nl:num_test_nl]:
  58. test_list.append((image, 0))
  59. for image in NL_list[num_test_nl:]:
  60. train_list.append((image, 0))
  61. return train_list, val_list, test_list
  62. class ADNIDataset(Dataset):
  63. def __init__(self, mri, xls: pd.DataFrame):
  64. self.mri_data = mri # DATA IS A LIST WITH TUPLES (image_dir, class_id)
  65. self.xls_data = xls
  66. def __len__(self):
  67. return len(self.mri_data)
  68. def _xls_to_tensor(self, xls_data: pd.Series):
  69. #Get used data
  70. #data = xls_data.loc[['Sex', 'Age (current)', 'PTID', 'DXCONFID (1=uncertain, 2= mild, 3= moderate, 4=high confidence)', 'Alz_csf']]
  71. data = xls_data.loc[['Sex', 'Age (current)']]
  72. data.replace({'M': 0, 'F': 1}, inplace=True)
  73. #Convert to tensor
  74. xls_tensor = torch.tensor(data.values.astype(float))
  75. return xls_tensor
  76. def __getitem__(self, idx): # RETURNS TUPLE WITH IMAGE AND CLASS_ID, BASED ON INDEX IDX
  77. mri_path, class_id = self.mri_data[idx]
  78. mri = nib.load(mri_path)
  79. mri_data = mri.get_fdata()
  80. xls = self.xls_data.iloc[idx]
  81. #Convert xls data to tensor
  82. xls_tensor = self._xls_to_tensor(xls)
  83. mri_tensor = torch.from_numpy(mri_data).unsqueeze(0)
  84. class_id = torch.tensor([class_id])
  85. #Convert to one-hot and squeeze
  86. class_id = torch.nn.functional.one_hot(class_id, num_classes=2).squeeze(0)
  87. #Convert to float
  88. mri_tensor = mri_tensor.float()
  89. xls_tensor = xls_tensor.float()
  90. class_id = class_id.float()
  91. return (mri_tensor, xls_tensor), class_id
  92. def initalize_dataloaders(training_data, val_data, test_data, cuda_device=torch.device('cuda:0'), batch_size=64):
  93. train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
  94. test_dataloader = DataLoader(test_data, batch_size=(batch_size // 4), shuffle=True)
  95. val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
  96. return train_dataloader, val_dataloader, test_dataloader