123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132 |
- import torch
- import torchvision
- # FOR DATA
- from utils.preprocess import prepare_datasets
- from utils.show_image import show_image
- from torch.utils.data import DataLoader
- from torchvision import datasets
- from torch import nn
- import torch.nn.functional as F
- from torchvision.transforms import ToTensor
- # import nonechucks as nc # Used to load data in pytorch even when images are corrupted / unavailable (skips them)
- # FOR IMAGE VISUALIZATION
- import nibabel as nib
- # GENERAL PURPOSE
- import os
- import pandas as pd
- import numpy as np
- import matplotlib.pyplot as plt
- import glob
- from datetime import datetime
- # FOR TRAINING
- import torch.optim as optim
- import utils.models as models
- import utils.layers as ly
- from tqdm import tqdm
- #Set Default GPU
- cuda_device = torch.device('cuda:1')
- torch.set_default_device(cuda_device)
- print("--- RUNNING ---")
- print("Pytorch Version: " + torch. __version__)
- # data & training properties:
- val_split = 0.2 # % of val and test, rest will be train
- runs = 1
- epochs = 10
- time_stamp = timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
- seeds = [np.random.randint(0, 1000) for _ in range(runs)]
- #Data Path
- mri_datapath = '/data/data_wnx1/_Data/AlzheimersDL/CNN+RNN-2class-1cnn+data/PET_volumes_customtemplate_float32/'
- #Local Path
- local_path = '/export/home/nschense/alzheimers/Pytorch_CNN-RNN'
- xls_path = local_path + '/LP_ADNIMERGE.csv'
- saved_model_path = local_path + 'saved_models/'
- # TODO: Datasets include multiple labels, such as medical info
- def evaluate_model(seed):
- training_data, val_data, test_data = prepare_datasets(mri_datapath, xls_path, val_split, seed)
-
- batch_size = 64
- # Create data loaders
- train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
- test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
- val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
- #Print Shape of Image Data
- print("Shape of MRI Data: ", training_data[0][0].shape)
- print("Shape of XLS Data: ", training_data[0][1].shape)
- #Print Training Data Length
- print("Length of Training Data: ", len(train_dataloader))
- print("--- INITIALIZING MODEL ---")
- model_CNN = models.CNN_Net(1, 2, 0.5).to(cuda_device)
- criterion = nn.BCELoss()
- optimizer = optim.Adam(model_CNN.parameters(), lr=0.001)
- print("Seed: ", seed)
- epoch_number = 0
- print("--- TRAINING MODEL ---")
- for epoch in range(epochs):
- running_loss = 0.0
- length = len(train_dataloader)
- for i, data in tqdm(enumerate(train_dataloader, 0), total=length, desc="Epoch " + str(epoch), unit="batch"):
- mri, xls, label = data
- optimizer.zero_grad()
- mri = mri.to(cuda_device).float()
- xls = xls.to(cuda_device).float()
- label = label.to(cuda_device).float()
- outputs = model_CNN((mri, xls))
- loss = criterion(outputs, label)
- loss.backward()
- optimizer.step()
- running_loss += loss.item()
- if i % 1000 == 999:
- print("Epoch: ", epoch_number, "Batch: ", i+1, "Loss: ", running_loss / 1000, "Accuracy: ", )
- print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 1000))
- running_loss = 0.0
- epoch_number += 1
- #Test model
- correct = 0
- total = 0
- with torch.no_grad():
- for data in test_dataloader:
- images, labels = data
- outputs = model_CNN(images)
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- correct += (predicted == labels).sum().item()
- print("Model Accuracy: ", 100 * correct / total)
-
- for seed in seeds:
- evaluate_model(seed)
- print("--- END ---")
|