|
@@ -33,8 +33,8 @@ def train_model(model, seed, timestamp, epochs, train_loader, val_loader, saved_
|
|
train_corr = 0
|
|
train_corr = 0
|
|
|
|
|
|
#Training
|
|
#Training
|
|
- length = len(train_loader)
|
|
|
|
- for _, data in tqdm(enumerate(train_loader, 0), total=length, desc="Epoch " + str(epoch), unit="batch"):
|
|
|
|
|
|
+ train_length = len(train_loader)
|
|
|
|
+ for _, data in tqdm(enumerate(train_loader, 0), total=train_length, desc="Epoch " + str(epoch), unit="batch"):
|
|
mri, xls, label = data
|
|
mri, xls, label = data
|
|
|
|
|
|
optimizer.zero_grad()
|
|
optimizer.zero_grad()
|
|
@@ -58,7 +58,7 @@ def train_model(model, seed, timestamp, epochs, train_loader, val_loader, saved_
|
|
train_corr += (predicted == labels).sum().item()
|
|
train_corr += (predicted == labels).sum().item()
|
|
train_incc += (predicted != labels).sum().item()
|
|
train_incc += (predicted != labels).sum().item()
|
|
|
|
|
|
- train_losses.append(train_loss)
|
|
|
|
|
|
+ train_losses.append(train_loss / train_length)
|
|
train_accs.append(train_corr / (train_corr + train_incc))
|
|
train_accs.append(train_corr / (train_corr + train_incc))
|
|
|
|
|
|
|
|
|
|
@@ -68,6 +68,7 @@ def train_model(model, seed, timestamp, epochs, train_loader, val_loader, saved_
|
|
val_incc = 0
|
|
val_incc = 0
|
|
val_corr = 0
|
|
val_corr = 0
|
|
|
|
|
|
|
|
+ val_length = len(val_loader)
|
|
for _, data in enumerate(val_loader, 0):
|
|
for _, data in enumerate(val_loader, 0):
|
|
mri, xls, label = data
|
|
mri, xls, label = data
|
|
|
|
|
|
@@ -86,7 +87,7 @@ def train_model(model, seed, timestamp, epochs, train_loader, val_loader, saved_
|
|
val_corr += (predicted == labels).sum().item()
|
|
val_corr += (predicted == labels).sum().item()
|
|
val_incc += (predicted != labels).sum().item()
|
|
val_incc += (predicted != labels).sum().item()
|
|
|
|
|
|
- val_losses.append(val_loss)
|
|
|
|
|
|
+ val_losses.append(val_loss / val_length)
|
|
val_accs.append(val_corr / (val_corr + val_incc))
|
|
val_accs.append(val_corr / (val_corr + val_incc))
|
|
|
|
|
|
epoch_number += 1
|
|
epoch_number += 1
|
|
@@ -111,7 +112,7 @@ def test_model(model, test_loader, cuda_device=torch.device('cuda:0')):
|
|
print("--- TESTING MODEL ---")
|
|
print("--- TESTING MODEL ---")
|
|
#Test model
|
|
#Test model
|
|
correct = 0
|
|
correct = 0
|
|
- total = 0
|
|
|
|
|
|
+ incorrect = 0
|
|
|
|
|
|
with torch.no_grad():
|
|
with torch.no_grad():
|
|
length = len(test_loader)
|
|
length = len(test_loader)
|
|
@@ -127,10 +128,10 @@ def test_model(model, test_loader, cuda_device=torch.device('cuda:0')):
|
|
_, predicted = torch.max(outputs.data, 1)
|
|
_, predicted = torch.max(outputs.data, 1)
|
|
_, labels = torch.max(label.data, 1)
|
|
_, labels = torch.max(label.data, 1)
|
|
|
|
|
|
- total += labels.size(0)
|
|
|
|
|
|
+ incorrect += (predicted != labels).sum().item()
|
|
correct += (predicted == labels).sum().item()
|
|
correct += (predicted == labels).sum().item()
|
|
|
|
|
|
- print("Model Accuracy: ", 100 * correct / total)
|
|
|
|
|
|
+ print("Model Accuracy: ", 100 * correct / (correct + incorrect))
|
|
|
|
|
|
def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch.device('cuda:0')):
|
|
def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch.device('cuda:0')):
|
|
training_data, val_data, test_data = prepare_datasets(mri_path, xls_path, val_split, seed)
|
|
training_data, val_data, test_data = prepare_datasets(mri_path, xls_path, val_split, seed)
|
|
@@ -145,6 +146,8 @@ def initalize_dataloaders(mri_path, xls_path, val_split, seed, cuda_device=torch
|
|
|
|
|
|
def plot_results(train_acc, train_loss, val_acc, val_loss, model_name, timestamp, plot_path):
|
|
def plot_results(train_acc, train_loss, val_acc, val_loss, model_name, timestamp, plot_path):
|
|
#Create 2 plots, one for accuracy and one for loss
|
|
#Create 2 plots, one for accuracy and one for loss
|
|
|
|
+ if not os.path.exists(plot_path):
|
|
|
|
+ os.makedirs(plot_path)
|
|
|
|
|
|
#Accuracy Plot
|
|
#Accuracy Plot
|
|
plt.figure()
|
|
plt.figure()
|