Browse Source

Continuing work on model in preparation for ensembles

Nicholas Schense 5 months ago
parent
commit
7a0ea3c2dc

+ 3 - 1
config.toml

@@ -14,7 +14,9 @@ max_epochs = 30
 validation_split = 0.3
 validation_split = 0.3
 
 
 [model]
 [model]
-name = 'alzheimers+cnn'
+name = 'cnn-ensemble1'
+image_channels = 1
+clin_data_channels = 2
 
 
 [hyperparameters]
 [hyperparameters]
 batch_size = 64
 batch_size = 64

+ 24 - 14
train_cnn.py

@@ -2,12 +2,9 @@
 import torch
 import torch
 import torch.nn as nn
 import torch.nn as nn
 import torch.optim as optim
 import torch.optim as optim
-import torchvision
 
 
 #GENERAL USE
 #GENERAL USE
-import numpy as np
-import pandas as pd
-from datetime import datetime
+import random as rand
 
 
 #SYSTEM
 #SYSTEM
 import tomli as toml
 import tomli as toml
@@ -18,6 +15,8 @@ from sklearn.model_selection import train_test_split
 
 
 #CUSTOM MODULES
 #CUSTOM MODULES
 import utils.models.cnn as cnn
 import utils.models.cnn as cnn
+from utils.data.datasets import prepare_datasets, initalize_dataloaders 
+import utils.training as train
 
 
 #CONFIGURATION
 #CONFIGURATION
 if os.getenv('ADL_CONFIG_PATH') is None:
 if os.getenv('ADL_CONFIG_PATH') is None:
@@ -27,14 +26,25 @@ else:
     with open(os.getenv('ADL_CONFIG_PATH'), 'rb') as f:
     with open(os.getenv('ADL_CONFIG_PATH'), 'rb') as f:
         config = toml.load(f)
         config = toml.load(f)
         
         
-        
-#Set up the model
-model = cnn.CNN(config)
-criterion = nn.BCELoss()
-optimizer = optim.Adam(model.parameters(), lr = config['training']['learning_rate'])
-
-#Load datasets
-
-
-
+for i in range(config['training']['runs']):           
+    #Set up the model
+    model = cnn.CNN(config['model']['image_channels'], config['model']['clin_data_channels'], config['hyperparameters']['droprate']).float()
+    criterion = nn.BCELoss()
+    optimizer = optim.Adam(model.parameters(), lr = config['hyperparameters']['learning_rate'])
+
+    #Generate seed for each run
+    seed = rand.randint(0, 1000)
+
+    #Prepare data
+    train_dataset, val_dataset, test_dataset = prepare_datasets(config['paths']['mri_data'], config['paths']['xls_data'], config['dataset']['validation_split'], seed)
+    train_dataloader, val_dataloader, test_dataloader = initalize_dataloaders(train_dataset, val_dataset, test_dataset, config['hyperparameters']['batch_size'])
+    
+    #Train the model
+    history = train.train_model(model, train_dataloader, val_dataloader, criterion, optimizer, config)
+    
+    #Save model
+    if not os.path.exists(config['paths']['model_output'] + "/" + str(config['model']['name'])):
+        os.makedirs(config['paths']['model_output'] + "/" + str(config['model']['name']))
+    
+    torch.save(model, config['paths']['model_output'] + "/" + str(config['model']['name']) + "/" + str(i) + "_" + "s-" + str(seed) + ".pt")
 
 

BIN
utils/__pycache__/training.cpython-38.pyc


BIN
utils/data/__pycache__/datasets.cpython-38.pyc


+ 4 - 3
utils/data/datasets.py

@@ -120,12 +120,13 @@ class ADNIDataset(Dataset):
         class_id = torch.tensor([class_id])
         class_id = torch.tensor([class_id])
         #Convert to one-hot and squeeze
         #Convert to one-hot and squeeze
         class_id = torch.nn.functional.one_hot(class_id, num_classes=2).squeeze(0)
         class_id = torch.nn.functional.one_hot(class_id, num_classes=2).squeeze(0)
+        
 
 
         return (mri_tensor, xls_tensor), class_id
         return (mri_tensor, xls_tensor), class_id
     
     
     
     
 def initalize_dataloaders(training_data, val_data, test_data, cuda_device=torch.device('cuda:0'), batch_size=64):
 def initalize_dataloaders(training_data, val_data, test_data, cuda_device=torch.device('cuda:0'), batch_size=64):
-    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
-    test_dataloader = DataLoader(test_data, batch_size=(batch_size // 4), shuffle=True, generator=torch.Generator(device=cuda_device))
-    val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True, generator=torch.Generator(device=cuda_device))
+    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=(batch_size // 4), shuffle=True)
+    val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=True)
     return train_dataloader, val_dataloader, test_dataloader
     return train_dataloader, val_dataloader, test_dataloader

BIN
utils/models/__pycache__/cnn.cpython-38.pyc


BIN
utils/models/__pycache__/layers.cpython-38.pyc


+ 1 - 1
utils/models/cnn.py

@@ -3,7 +3,7 @@ from torchvision.transforms import ToTensor
 import os
 import os
 import pandas as pd
 import pandas as pd
 import numpy as np
 import numpy as np
-import layers as ly
+import utils.models.layers as ly
 
 
 import torch
 import torch
 import torchvision
 import torchvision

+ 1 - 1
utils/training.py

@@ -60,7 +60,7 @@ def train_model(model, train_loader, val_loader, criterion, optimizer, config):
     history = pd.DataFrame(columns = ["Epoch", "Train Loss", "Val Loss", "Train Acc","Val Acc"]).set_index("Epoch")
     history = pd.DataFrame(columns = ["Epoch", "Train Loss", "Val Loss", "Train Acc","Val Acc"]).set_index("Epoch")
     
     
     
     
-    for epoch in range(config["training"]["epochs"]):
+    for epoch in range(config["training"]["max_epochs"]):
         train_loss, val_loss = train_epoch(model, train_loader, val_loader, criterion, optimizer)
         train_loss, val_loss = train_epoch(model, train_loader, val_loader, criterion, optimizer)
         if config["operation"]["silent"] is False: print(f"Epoch {epoch + 1} - Train Loss: {train_loss} - Val Loss: {val_loss}")
         if config["operation"]["silent"] is False: print(f"Epoch {epoch + 1} - Train Loss: {train_loss} - Val Loss: {val_loss}")