|
@@ -109,29 +109,42 @@ def test_model(model, test_loader, cuda_device=torch.device('cuda:0')):
|
|
|
|
|
|
predictions = []
|
|
predictions = []
|
|
actual = []
|
|
actual = []
|
|
|
|
+
|
|
|
|
+ max_preds = []
|
|
|
|
+ max_actuals = []
|
|
|
|
|
|
with torch.no_grad():
|
|
with torch.no_grad():
|
|
length = len(test_loader)
|
|
length = len(test_loader)
|
|
for i, data in tqdm(enumerate(test_loader, 0), total=length, desc="Testing", unit="batch"):
|
|
for i, data in tqdm(enumerate(test_loader, 0), total=length, desc="Testing", unit="batch"):
|
|
- mri, xls, label = data
|
|
|
|
|
|
+ mri, xls, labels = data
|
|
|
|
|
|
mri = mri.to(cuda_device).float()
|
|
mri = mri.to(cuda_device).float()
|
|
xls = xls.to(cuda_device).float()
|
|
xls = xls.to(cuda_device).float()
|
|
- label = label.to(cuda_device).float()
|
|
|
|
|
|
+ labels = labels.to(cuda_device).float()
|
|
|
|
|
|
outputs = model((mri, xls))
|
|
outputs = model((mri, xls))
|
|
|
|
|
|
- _, predicted = torch.max(outputs.data, 1)
|
|
|
|
- _, labels = torch.max(label.data, 1)
|
|
|
|
|
|
+ _, m_predicted = torch.max(outputs.data, 1)
|
|
|
|
+ _, m_labels = torch.max(labels.data, 1)
|
|
|
|
|
|
- incorrect += (predicted != labels).sum().item()
|
|
|
|
- correct += (predicted == labels).sum().item()
|
|
|
|
|
|
+ incorrect += (m_predicted != m_labels).sum().item()
|
|
|
|
+ correct += (m_predicted == m_labels).sum().item()
|
|
|
|
+
|
|
|
|
+ #We just want the positive class, since there are only 2 classes and we use softmax
|
|
|
|
+ pos_outputs = outputs[:, 1]
|
|
|
|
+ pos_labels = labels[:, 1]
|
|
|
|
|
|
|
|
|
|
- predictions.extend(predicted.tolist())
|
|
|
|
- actual.extend(labels.tolist())
|
|
|
|
|
|
+ predictions.extend(pos_outputs.tolist())
|
|
|
|
+ actual.extend(pos_labels.tolist())
|
|
|
|
|
|
- return predictions, actual, correct, incorrect
|
|
|
|
|
|
+ _, max_pred = torch.max(outputs.data, 1)
|
|
|
|
+ _, max_actual = torch.max(labels.data, 1)
|
|
|
|
+
|
|
|
|
+ max_preds.extend(max_pred.tolist())
|
|
|
|
+ max_actuals.extend(max_actual.tolist())
|
|
|
|
+
|
|
|
|
+ return predictions, actual, correct, incorrect, max_preds, max_actuals
|
|
|
|
|
|
def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch.device('cuda:0'), batch_size=64):
|
|
def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch.device('cuda:0'), batch_size=64):
|
|
training_data, val_data, test_data = prepare_datasets(mri_path, xls_path, val_split, seed)
|
|
training_data, val_data, test_data = prepare_datasets(mri_path, xls_path, val_split, seed)
|
|
@@ -140,7 +153,7 @@ def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch
|
|
test_dataloader = DataLoader(test_data, batch_size=(batch_size // 4), shuffle=True, generator=torch.Generator(device=cuda_device))
|
|
test_dataloader = DataLoader(test_data, batch_size=(batch_size // 4), shuffle=True, generator=torch.Generator(device=cuda_device))
|
|
val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
|
|
val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
|
|
|
|
|
|
- return train_dataloader, val_dataloader, test_dataloader
|
|
|
|
|
|
+ return train_dataloader, val_dataloader, test_dataloader, test_data
|
|
|
|
|
|
|
|
|
|
def plot_results(train_acc, train_loss, val_acc, val_loss, model_name, timestamp, plot_path):
|
|
def plot_results(train_acc, train_loss, val_acc, val_loss, model_name, timestamp, plot_path):
|
|
@@ -188,10 +201,49 @@ def plot_roc_curve(predicted, actual, model_name, timestamp, plot_path):
|
|
np.array(actual, dtype=np.float64)
|
|
np.array(actual, dtype=np.float64)
|
|
|
|
|
|
fpr, tpr, _ = roc_curve(actual, predicted)
|
|
fpr, tpr, _ = roc_curve(actual, predicted)
|
|
- print(fpr, tpr)
|
|
|
|
auc = roc_auc_score(actual, predicted)
|
|
auc = roc_auc_score(actual, predicted)
|
|
plt.figure()
|
|
plt.figure()
|
|
RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=auc).plot()
|
|
RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=auc).plot()
|
|
plt.savefig(plot_path + model_name + "_t-" + timestamp + "_roc_curve.png")
|
|
plt.savefig(plot_path + model_name + "_t-" + timestamp + "_roc_curve.png")
|
|
plt.close()
|
|
plt.close()
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+def plot_image_selection(model, test_set, model_name, timestamp, plot_path, cuda_device=torch.device('cuda:0')):
|
|
|
|
+ #Plot a bevy of random images from the test set and their predictions for the positive class
|
|
|
|
+ if not os.path.exists(plot_path):
|
|
|
|
+ os.makedirs(plot_path)
|
|
|
|
+
|
|
|
|
+ #Get random images
|
|
|
|
+ images = []
|
|
|
|
+
|
|
|
|
+ for i in range(8):
|
|
|
|
+ images.append(test_set[np.random.randint(0, len(test_set))])
|
|
|
|
+
|
|
|
|
+ #Now that we have our images, create a subplot for each image
|
|
|
|
+ plt.figure()
|
|
|
|
+ fig, axs = plt.subplots(2, 4)
|
|
|
|
+
|
|
|
|
+ for i, image in enumerate(images):
|
|
|
|
+ mri, xls, label = image
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ mri = mri.to(cuda_device).float()
|
|
|
|
+ xls = xls.to(cuda_device).float()
|
|
|
|
+ label = label[1]
|
|
|
|
+
|
|
|
|
+ mri = mri.unsqueeze(0)
|
|
|
|
+ xls = xls.unsqueeze(0)
|
|
|
|
+
|
|
|
|
+ output = model((mri, xls))
|
|
|
|
+
|
|
|
|
+ prediction = output[:, 1]
|
|
|
|
+
|
|
|
|
+ sliced_image = torch.permute(torch.select(torch.squeeze(mri, 0), 3, 80), (1, 2, 0)).cpu().numpy()
|
|
|
|
+ axs[i // 4, i % 4].imshow(sliced_image, cmap="gray")
|
|
|
|
+ axs[i // 4, i % 4].set_title("Pr: " + str(round(prediction.item(), 3)) + ", \nAc: " + str(label.item()))
|
|
|
|
+
|
|
|
|
+ plt.savefig(plot_path + model_name + "_t-" + timestamp + "_image_selection.png")
|
|
|
|
+ plt.close()
|
|
|
|
+
|
|
|
|
+
|
|
|
|
|