123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116 |
- from torch import device, cuda
- import torch
- from torch import add
- import torch.nn as nn
- import utils.newCNN_Layers as CustomLayers
- import torch.nn.functional as F
- import torch.optim as optim
- import utils.CNN_methods as CNN
- import copy
- class CNN_Net(nn.Module):
- def __init__(self, input, prps, final_layer_size=5):
- super(CNN_Net, self).__init__()
- self.final_layer_size = final_layer_size
- self.device = device('cuda:0' if cuda.is_available() else 'cpu')
- print("CNN Initialized. Using: " + str(self.device))
- # GETS FIRST IMAGE FOR SIZE
- data_iter = iter(input)
- first_batch = next(data_iter)
- first_features = first_batch[0]
- image = first_features[0]
- # LAYERS
- print(f"CNN Model Initialization. Input size: {image.size()}")
- self.conv1 = CustomLayers.Conv_elu_maxpool_drop(1, 192, (11, 13, 11), stride=(4,4,4), pool=True, prps=prps)
- self.conv2 = CustomLayers.Conv_elu_maxpool_drop(192, 384, (5, 6, 5), stride=(1,1,1), pool=True, prps=prps)
- self.conv3_mid_flow = CustomLayers.Mid_flow(384, 384, prps=prps)
- self.conv4_sepConv = CustomLayers.Conv_elu_maxpool_drop(384, 96,(3, 4, 3), stride=(1,1,1), pool=True, prps=prps,
- sep_conv=True)
- self.conv5_sepConv = CustomLayers.Conv_elu_maxpool_drop(96, 48, (3, 4, 3), stride=(1, 1, 1), pool=True,
- prps=prps, sep_conv=True)
- self.fc1 = CustomLayers.Fc_elu_drop(113568, 20, prps=prps) # TODO, concatenate clinical data after this
- self.fc2 = CustomLayers.Fc_elu_drop(20, final_layer_size, prps=prps)
- # FORWARDS
- def forward(self, x):
- x = self.conv1(x)
- x = self.conv2(x)
- x = self.conv3_mid_flow(x)
- x = self.conv4_sepConv(x)
- x = self.conv5_sepConv(x)
- # FLATTEN x
- flatten_size = x.size(1) * x.size(2) * x.size(3) * x.size(4)
- x = x.view(-1, flatten_size)
- x = self.fc1(x)
- x = self.fc2(x)
- return x
- # TRAIN
- def train_model(self, trainloader, PATH, epochs):
- self.train()
- criterion = nn.CrossEntropyLoss()
- optimizer = optim.Adam(self.parameters(), lr=1e-5)
- for epoch in epochs: # loop over the dataset multiple times
- print(f"Training... {epoch}/{epochs}")
- running_loss = 0.0
- for i, data in enumerate(trainloader, 0):
- # get the inputs; data is a list of [inputs, labels]
- inputs, labels = data[0].to(self.device), data[1].to(self.device)
- # zero the parameter gradients
- optimizer.zero_grad()
- # forward + backward + optimize
- outputs = self.forward(inputs)
- loss = criterion(outputs, labels)
- loss.backward()
- optimizer.step()
- # print statistics
- running_loss += loss.item()
- if i % 2000 == 1999: # print every 2000 mini-batches
- print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
- running_loss = 0.0
- print('Finished Training')
- torch.save(self.state_dict(), PATH)
- # TEST
- def evaluate_model(self, testloader):
- correct = 0
- total = 0
- self.eval()
- # since we're not training, we don't need to calculate the gradients for our outputs
- with torch.no_grad():
- for data in testloader:
- images, labels = data[0].to(self.device), data[1].to(self.device)
- # calculate outputs by running images through the network
- outputs = self.forward(images)
- # the class with the highest energy is what we choose as prediction
- _, predicted = torch.max(outputs.data, 1)
- total += labels.size(0)
- print(f"Predicted class vals: {predicted}")
- correct += (predicted == labels).sum().item()
- print(f'Accuracy of the network on {total} scans: {100 * correct // total}%')
- self.train()
- # PREDICT
- def predict(self, loader):
- self.eval()
- with torch.no_grad():
- for data in loader:
- images, labels = data[0].to(self.device), data[1].to(self.device)
- outputs = self.forward(images)
- # the class with the highest energy is what we choose as prediction
- _, predicted = torch.max(outputs.data, 1)
- self.train()
- return predicted
|