cnn.py 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. from torch import nn
  2. import utils.models.layers as ly
  3. import torch
  4. class Parameters:
  5. def __init__(self, param_dict):
  6. self.CNN_w_regularizer = param_dict["CNN_w_regularizer"]
  7. self.RNN_w_regularizer = param_dict["RNN_w_regularizer"]
  8. self.CNN_batch_size = param_dict["CNN_batch_size"]
  9. self.RNN_batch_size = param_dict["RNN_batch_size"]
  10. self.CNN_drop_rate = param_dict["CNN_drop_rate"]
  11. self.RNN_drop_rate = param_dict["RNN_drop_rate"]
  12. self.epochs = param_dict["epochs"]
  13. self.gpu = param_dict["gpu"]
  14. self.model_filepath = param_dict["model_filepath"] + "/net.h5"
  15. self.num_clinical = param_dict["num_clinical"]
  16. self.image_shape = param_dict["image_shape"]
  17. self.final_layer_size = param_dict["final_layer_size"]
  18. self.optimizer = param_dict["optimizer"]
  19. class CNN(nn.Module):
  20. def __init__(self, image_channels, clin_data_channels, droprate):
  21. super().__init__()
  22. # Image Section
  23. self.image_section = CNN_Image_Section(image_channels, droprate)
  24. # Data Layers, fully connected
  25. self.fc_clin1 = ly.FullConnBlock(clin_data_channels, 64, droprate=droprate)
  26. self.fc_clin2 = ly.FullConnBlock(64, 20, droprate=droprate)
  27. # Final Dense Layer
  28. self.dense1 = nn.Linear(40, 5)
  29. self.dense2 = nn.Linear(5, 2)
  30. self.softmax = nn.Softmax(dim=1)
  31. def forward(self, x):
  32. image, clin_data = x
  33. image = self.image_section(image)
  34. clin_data = self.fc_clin1(clin_data)
  35. clin_data = self.fc_clin2(clin_data)
  36. x = torch.cat((image, clin_data), dim=1)
  37. x = self.dense1(x)
  38. x = self.dense2(x)
  39. x = self.softmax(x)
  40. return x
  41. class CNN_Image_Section(nn.Module):
  42. def __init__(self, image_channels, droprate):
  43. super().__init__()
  44. # Initial Convolutional Blocks
  45. self.conv1 = ly.ConvBlock(
  46. image_channels,
  47. 192,
  48. (11, 13, 11),
  49. stride=(4, 4, 4),
  50. droprate=droprate,
  51. pool=False,
  52. )
  53. self.conv2 = ly.ConvBlock(192, 384, (5, 6, 5), droprate=droprate, pool=False)
  54. # Midflow Block
  55. self.midflow = ly.MidFlowBlock(384, droprate)
  56. # Split Convolutional Block
  57. self.splitconv = ly.SplitConvBlock(384, 192, 96, 1, droprate)
  58. # Fully Connected Block
  59. self.fc_image = ly.FullConnBlock(227136, 20, droprate=droprate)
  60. def forward(self, x):
  61. x = self.conv1(x)
  62. x = self.conv2(x)
  63. x = self.midflow(x)
  64. x = self.splitconv(x)
  65. x = torch.flatten(x, 1)
  66. x = self.fc_image(x)
  67. return x