Explorar o código

initial commit to git0 psuf

Zan Klanecek %!s(int64=2) %!d(string=hai) anos
pai
achega
fd17827f49

+ 22 - 0
5-naloga-cnn-klasifikacija-covid-slik/README.md

@@ -0,0 +1,22 @@
+# Navodila
+
+## I.) Učenje in testiranje modelov
+### 1. datareader.py
+Za branje/odpiranje slik - ne spreminjaj.
+### 2. model.py
+Za arhitekturo konvolucijske mreže - spremeni le funkciji ```init``` in ```forward```.
+### 3. train.py
+Za učenje modela in hkratno preverjanje uspešnosti na validacijski množici. Po potrebi spremeni način vrednotenja modela (trenutno je končna verjetnost za pripadnost razredu 0/1 definirana kot povprečje verjetnosti 10 centralnih rezin).
+### 4. test.py
+Za testiranje modela na testni množici. Ne spreminjaj ```batch_size=10``` in ```shuffle=False```.
+### 5. run.py
+Primer uporabe učenja in testiranja na Marvinu. Za lokalno uporabo spremeni ```main_path_to_data```.
+
+## II.) Interpretacija modelov
+
+### 1. visualize_filters.py
+Vizualizacija filtrov naučenega modela.
+### 2. visualize_convolutions.py
+Za vizualizacijo rezultatov konvolucij vhodne slike na različnih globinah mreže.
+### 3. visualize_saliency.py
+Mapa izpostavljenosti oz. saliency map za doprinos k verjetnosti za hudo okužbo. 

+ 21 - 0
5-naloga-cnn-klasifikacija-covid-slik/datareader.py

@@ -0,0 +1,21 @@
+import numpy as np
+import torch.utils.data as tdata
+import os
+
+class DataReader(tdata.Dataset):
+    def __init__(self, main_path_to_data, data_info):
+        super(DataReader, self).__init__()
+        self.data = data_info 
+        self.num_sample = len(self.data)
+        self.main_path_to_data = main_path_to_data
+
+    def __len__(self):
+        return self.num_sample
+        
+    def __getitem__(self, n):  
+        filename, label = self.data[n]
+        path_to_file = os.path.join(self.main_path_to_data, filename)
+        img = np.load(path_to_file)
+
+        return img, np.float32([label])
+    

+ 26 - 0
5-naloga-cnn-klasifikacija-covid-slik/model.py

@@ -0,0 +1,26 @@
+import torch
+import torchvision.models as tmodels
+import torch.nn as nn
+
+class ModelCT(nn.Module):
+    def __init__(self):
+        super(ModelCT, self).__init__()
+        self.backbone = tmodels.resnet18(pretrained=True)
+        self.backbone.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
+        self.convolution2d = nn.Conv2d(512, 1, kernel_size=(1, 1), stride=(1, 1), bias=True)
+        self.fc_maxpool = nn.AdaptiveMaxPool2d((1, 1))
+            
+    def forward(self, x):
+        x = self.backbone.conv1(x)
+        x = self.backbone.bn1(x) 
+        x = self.backbone.relu(x)
+        x = self.backbone.maxpool(x)
+        x = self.backbone.layer1(x)
+        x = self.backbone.layer2(x)
+        x = self.backbone.layer3(x)
+        x = self.backbone.layer4(x)
+        x = self.convolution2d(x)
+        x = self.fc_maxpool(x)
+        x = torch.flatten(x, 1)
+        
+        return x

+ 43 - 0
5-naloga-cnn-klasifikacija-covid-slik/run.py

@@ -0,0 +1,43 @@
+from train import Training
+from test import Testing
+from model import ModelCT
+import json
+import os
+
+if __name__ == '__main__': 
+
+    main_path_to_data = "/data/PSUF_naloge/5-naloga/processed" # mapa v kateri se nahajajo vsi procesirani podatki
+    path_to_model = "trained_models/testrun123" # mapa v katero se bo shranilo: utezi naucenega modela, auc in loss med ucenjem
+    
+    # Nalozimo sezname za ucno, validacijsko in testno mnozico
+    with open(os.path.join(main_path_to_data, "train_info.json")) as f:
+        train_info = json.load(f)
+    with open(os.path.join(main_path_to_data, "val_info.json")) as f:
+        valid_info = json.load(f)
+    with open(os.path.join(main_path_to_data, "test_info.json")) as f:
+        test_info = json.load(f)
+
+    # Izberemo model
+    my_model = ModelCT()
+
+    # Nastavimo hiperparametre v slovarju
+    hyperparameters = {}
+    hyperparameters['learning_rate'] = 0.2e-3 # learning rate
+    hyperparameters['weight_decay'] = 0.0001 # weight decay
+    hyperparameters['total_epoch'] = 10 # total number of epochs
+    hyperparameters['multiplicator'] = 0.95 # each epoch learning rate is decreased on LR*multiplicator
+    
+    # Ustvarimo ucni in testni razred
+    TrainClass = Training(main_path_to_data)
+    TestClass = Testing(main_path_to_data)
+
+    # Naucimo model za izbrane hiperparametre
+    aucs, losses = TrainClass.train(train_info, valid_info, my_model, hyperparameters, path_to_model)
+
+    # Utezi naucenega modela
+    path_to_model_weights = os.path.join(path_to_model, "trained_model_weights.pth")
+    
+    # Testiramo nas model na testni mnozici
+    auc, fpr, tpr, thresholds, trues, predictions = TestClass.test(test_info, my_model, path_to_model_weights)
+
+    print("Test set AUC result: ", auc)

+ 57 - 0
5-naloga-cnn-klasifikacija-covid-slik/test.py

@@ -0,0 +1,57 @@
+import numpy as np
+import torch
+from torch.utils.data import DataLoader
+from datareader import DataReader
+from sklearn.metrics import roc_auc_score, roc_curve
+
+class Testing():
+    def __init__(self, main_path_to_data):
+        self.main_path_to_data = main_path_to_data
+        
+    def test(self, test_info, model, path_to_model_weights):
+        """Function for testing the model on test_info.
+
+        Args:
+            test_info (list): list of paths to 10 central slices per patient (ordered)
+            model (nn.Module): architecture of the model
+            path_to_model_weights (string): absolute path to the model weights
+
+        Returns:
+            auc (float): AUC calculated on test set
+            fpr (ndarray): increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]
+            tpr (ndarray): increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i]
+            thresholds (ndarray): decreasing thresholds on the decision function used to compute fpr and tpr
+            trues (list): ground truth labels (0/1)
+            predictions (list): predictions [0,1]
+        """
+        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        # 1. Load trained model and set it to eval mode
+        model.to(device)
+        model.load_state_dict(torch.load(path_to_model_weights))
+        model.eval()
+        
+        # 2. Create datalodaer
+        test_datareader = DataReader(self.main_path_to_data, test_info)
+        test_generator = DataLoader(test_datareader, batch_size=10, shuffle=False, pin_memory = True, num_workers=2)
+
+        # 3. Calculate metrics
+        predictions = []
+        trues = []
+        
+        for item_test in test_generator:
+            # Load images (x) and labels (y)
+            x, y = item_test
+            x = x.to(device)
+            y = y.to(device)
+            
+            # Forward pass
+            with torch.no_grad():
+                y_hat = model.forward(x)
+                y_hat = torch.sigmoid(y_hat) # In training we are using BCEWithLogitsLoss for improved performance (sigmoid is already embedded), here we have to add it
+            predictions.append(np.mean(y_hat.cpu().numpy()))
+            trues.append(y.cpu().numpy()[0])
+            
+        auc = roc_auc_score(trues, predictions)
+        fpr, tpr, thresholds = roc_curve(trues, predictions, pos_label=1)
+        
+        return auc, fpr, tpr, thresholds, trues, predictions

+ 124 - 0
5-naloga-cnn-klasifikacija-covid-slik/train.py

@@ -0,0 +1,124 @@
+import numpy as np
+import torch
+from torch.utils.data import DataLoader
+from datareader import DataReader
+from sklearn.metrics import roc_auc_score
+import time
+import os
+
+class Training():
+    def __init__(self, main_path_to_data):
+        self.main_path_to_data = main_path_to_data
+
+    def train(self, train_info, valid_info, model, hyperparameters, path_to_model):
+        """Function for training the model on train_info. 
+
+        Args:
+            train_info (list): list of paths to one central slice per patient (shuffled)
+            valid_info (list): list of paths to 10 central slices per patient (ordered)
+            model (nn.Module): architecture of the model
+            hyperparameters (dictionary): dictionary of hyperparameters (learning rate, weight decay, multiplicator)
+            path_to_model (string): absolute path to the folder where outputs will be saved
+
+        Returns:
+            aucs (ndarray): array of AUCs during validation (one AUC per epoch)
+            losses (ndarray): array of LOSSes during training (one running loss per epoch)
+        """
+        # 0. Check which device is available
+        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        
+        # 1. Create folder to save the model weights, aucs and losses
+        try:
+            os.mkdir(path_to_model)
+        except: # folder already exists, add current time to avoid duplication
+            os.mkdir(path_to_model + str(int(time.time())))
+        
+        # 2. Load hyperparameters
+        learning_rate = hyperparameters['learning_rate']
+        weight_decay = hyperparameters['weight_decay']
+        total_epoch = hyperparameters['total_epoch']
+        multiplicator = hyperparameters['multiplicator']
+        
+        # 4. Create train and validation generators, batch_size = 10 for validation generator (10 central slices)
+        train_datareader = DataReader(self.main_path_to_data, train_info)
+        train_generator = DataLoader(train_datareader, batch_size=16, shuffle=True, pin_memory=True, num_workers=2)
+        
+        valid_datareader = DataReader(self.main_path_to_data, valid_info)
+        valid_generator = DataLoader(valid_datareader, batch_size=10, shuffle=False, pin_memory=True, num_workers=2)
+        
+        # 5. Move model to the available device
+        model.to(device)
+        
+        # 6. Define criterion function, optimizer and scheduler
+        criterion = torch.nn.BCEWithLogitsLoss()
+        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
+        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, multiplicator, last_epoch=-1)
+        
+        # 7. Creat lists for tracking AUC and Losses during training
+        aucs = []
+        losses = []
+        best_auc = -np.inf
+        
+        # 8. Run training
+        for epoch in range(total_epoch):
+            start = time.time()
+            print('Epoch: %d/%d' % (epoch + 1, total_epoch))
+            
+            running_loss = 0
+            # A) Train model
+            model.train()  # put model in training mode
+            for item_train in train_generator: 
+                # Load images (x) and labels (y)
+                x, y = item_train
+                x = x.to(device)
+                y = y.to(device)
+
+                # Forward pass          
+                optimizer.zero_grad()
+                y_hat = model.forward(x)
+                loss = criterion(y_hat, y)
+                
+                # Backward pass
+                loss.backward()
+                optimizer.step()
+                
+                # Track loss change
+                running_loss += loss.item()
+            
+            # B) Validate model
+            predictions = []
+            trues = []
+            
+            model.eval() # put model in eval mode
+            for item_valid in valid_generator:
+                # Load images (x) and labels (y)
+                x, y = item_valid
+                x = x.to(device)
+                y = y.to(device)
+                
+                # Forward pass
+                with torch.no_grad():
+                    y_hat = model.forward(x)
+                    y_hat = torch.sigmoid(y_hat) # In training we are using BCEWithLogitsLoss for improved performance (sigmoid is already embedded), here we have to add it
+                
+                predictions.append(np.mean(y_hat.cpu().numpy())) # Calculate mean of 10 predictions
+                trues.append(int(y.cpu().numpy()[0]))
+        
+            auc = roc_auc_score(trues, predictions)
+            
+            # C) Track changes, update LR, save best model
+            print("AUC: ", auc, ", Running loss: ", running_loss/len(train_generator), ", Time: ", time.time()-start)
+            
+            if auc > best_auc:
+                torch.save(model.state_dict(), os.path.join(path_to_model, 'trained_model_weights.pth'))
+                best_auc = auc         
+
+            
+            aucs.append(auc)
+            losses.append(running_loss/len(train_generator))
+            scheduler.step()
+            
+        np.save(os.path.join(path_to_model, 'AUCS.npy'), np.array(aucs))
+        np.save(os.path.join(path_to_model, 'LOSSES.npy'), np.array(losses))
+        
+        return aucs, losses

+ 0 - 0
5-naloga-cnn-klasifikacija-covid-slik/trained_models/save_your_models_locally_here.txt