SeekTransformModule.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. import os
  2. import numpy as np
  3. import scipy
  4. from scipy.spatial.distance import cdist
  5. from scipy.spatial.transform import Rotation as R
  6. import slicer
  7. from DICOMLib import DICOMUtils
  8. from collections import deque
  9. import vtk
  10. from slicer.ScriptedLoadableModule import *
  11. import qt
  12. #exec(open("C:/Users/lkomar/Documents/Prostata/FirstTryRegister.py").read())
  13. class SeekTransformModule(ScriptedLoadableModule):
  14. """
  15. Module description shown in the module panel.
  16. """
  17. def __init__(self, parent):
  18. ScriptedLoadableModule.__init__(self, parent)
  19. self.parent.title = "Seek Transform module"
  20. self.parent.categories = ["Image Processing"]
  21. self.parent.contributors = ["Luka Komar (Onkološki Inštitut Ljubljana, Fakulteta za Matematiko in Fiziko Ljubljana)"]
  22. self.parent.helpText = "This module applies rigid transformations to CBCT volumes based on reference CT volumes."
  23. self.parent.acknowledgementText = "Supported by doc. Primož Peterlin & prof. Andrej Studen"
  24. class SeekTransformModuleWidget(ScriptedLoadableModuleWidget):
  25. """
  26. GUI of the module.
  27. """
  28. def setup(self):
  29. ScriptedLoadableModuleWidget.setup(self)
  30. # Dropdown menu za izbiro metode
  31. self.rotationMethodComboBox = qt.QComboBox()
  32. self.rotationMethodComboBox.addItems(["Kabsch", "Horn", "Quaternion"])
  33. self.layout.addWidget(self.rotationMethodComboBox)
  34. # Load button
  35. self.applyButton = qt.QPushButton("Find markers and transform")
  36. self.applyButton.toolTip = "Finds markers, computes optimal rigid transform and applies it to CBCT volumes."
  37. self.applyButton.enabled = True
  38. self.layout.addWidget(self.applyButton)
  39. # Connect button to logic
  40. self.applyButton.connect('clicked(bool)', self.onApplyButton)
  41. self.layout.addStretch(1)
  42. def onApplyButton(self):
  43. logic = MyTransformModuleLogic()
  44. selectedMethod = self.rotationMethodComboBox.currentText #izberi metodo izračuna rotacije
  45. logic.run(selectedMethod)
  46. class MyTransformModuleLogic(ScriptedLoadableModuleLogic):
  47. """
  48. Core logic of the module.
  49. """
  50. def run(self, selectedMethod):
  51. def group_points(points, threshold):
  52. # Function to group points that are close to each other
  53. grouped_points = []
  54. while points:
  55. point = points.pop() # Take one point from the list
  56. group = [point] # Start a new group
  57. # Find all points close to this one
  58. distances = cdist([point], points) # Calculate distances from this point to others
  59. close_points = [i for i, dist in enumerate(distances[0]) if dist < threshold]
  60. # Add the close points to the group
  61. group.extend([points[i] for i in close_points])
  62. # Remove the grouped points from the list
  63. points = [point for i, point in enumerate(points) if i not in close_points]
  64. # Add the group to the result
  65. grouped_points.append(group)
  66. return grouped_points
  67. def region_growing(image_data, seed, intensity_threshold, max_distance):
  68. dimensions = image_data.GetDimensions()
  69. visited = set()
  70. region = []
  71. queue = deque([seed])
  72. while queue:
  73. x, y, z = queue.popleft()
  74. if (x, y, z) in visited:
  75. continue
  76. visited.add((x, y, z))
  77. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  78. if voxel_value >= intensity_threshold:
  79. region.append((x, y, z))
  80. # Add neighbors within bounds
  81. for dx, dy, dz in [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]:
  82. nx, ny, nz = x + dx, y + dy, z + dz
  83. if 0 <= nx < dimensions[0] and 0 <= ny < dimensions[1] and 0 <= nz < dimensions[2]:
  84. if (nx, ny, nz) not in visited:
  85. queue.append((nx, ny, nz))
  86. return region
  87. def detect_points_region_growing(volume_name, intensity_threshold=3000, x_min=90, x_max=380, y_min=190, y_max=380, z_min=80, z_max=120, max_distance=9, centroid_merge_threshold=5):
  88. volume_node = slicer.util.getNode(volume_name)
  89. if not volume_node:
  90. raise RuntimeError(f"Volume {volume_name} not found.")
  91. image_data = volume_node.GetImageData()
  92. matrix = vtk.vtkMatrix4x4()
  93. volume_node.GetIJKToRASMatrix(matrix)
  94. dimensions = image_data.GetDimensions()
  95. detected_regions = []
  96. # Check if it's CT or CBCT
  97. is_cbct = "cbct" in volume_name.lower()
  98. if is_cbct:
  99. valid_x_min, valid_x_max = 0, dimensions[0] - 1
  100. valid_y_min, valid_y_max = 0, dimensions[1] - 1
  101. valid_z_min, valid_z_max = 0, dimensions[2] - 1
  102. else:
  103. valid_x_min, valid_x_max = max(x_min, 0), min(x_max, dimensions[0] - 1)
  104. valid_y_min, valid_y_max = max(y_min, 0), min(y_max, dimensions[1] - 1)
  105. valid_z_min, valid_z_max = max(z_min, 0), min(z_max, dimensions[2] - 1)
  106. visited = set()
  107. def grow_region(x, y, z):
  108. if (x, y, z) in visited:
  109. return None
  110. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  111. if voxel_value < intensity_threshold:
  112. return None
  113. region = region_growing(image_data, (x, y, z), intensity_threshold, max_distance=max_distance)
  114. if region:
  115. for point in region:
  116. visited.add(tuple(point))
  117. return region
  118. return None
  119. regions = []
  120. for z in range(valid_z_min, valid_z_max + 1):
  121. for y in range(valid_y_min, valid_y_max + 1):
  122. for x in range(valid_x_min, valid_x_max + 1):
  123. region = grow_region(x, y, z)
  124. if region:
  125. regions.append(region)
  126. # Collect centroids using intensity-weighted average
  127. centroids = []
  128. for region in regions:
  129. points = np.array([matrix.MultiplyPoint([*point, 1])[:3] for point in region])
  130. intensities = np.array([image_data.GetScalarComponentAsDouble(*point, 0) for point in region])
  131. if intensities.sum() > 0:
  132. weighted_centroid = np.average(points, axis=0, weights=intensities)
  133. max_intensity = intensities.max()
  134. centroids.append((np.round(weighted_centroid, 2), max_intensity))
  135. unique_centroids = []
  136. for centroid, intensity in centroids:
  137. if not any(np.linalg.norm(centroid - existing_centroid) < centroid_merge_threshold for existing_centroid, _ in unique_centroids):
  138. unique_centroids.append((centroid, intensity))
  139. markups_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode", f"Markers_{volume_name}")
  140. for centroid, intensity in unique_centroids:
  141. markups_node.AddControlPoint(*centroid)
  142. #print(f"Detected Centroid (RAS): {centroid}, Max Intensity: {intensity}")
  143. return unique_centroids
  144. def compute_Kabsch_rotation(moving_points, fixed_points):
  145. """
  146. Computes the optimal rotation matrix to align moving_points to fixed_points.
  147. Parameters:
  148. moving_points (list or ndarray): List of points to be rotated CBCT
  149. fixed_points (list or ndarray): List of reference points CT
  150. Returns:
  151. ndarray: Optimal rotation matrix.
  152. """
  153. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  154. # Convert to numpy arrays
  155. moving = np.array(moving_points)
  156. fixed = np.array(fixed_points)
  157. # Compute centroids
  158. centroid_moving = np.mean(moving, axis=0)
  159. centroid_fixed = np.mean(fixed, axis=0)
  160. # Center the points
  161. moving_centered = moving - centroid_moving
  162. fixed_centered = fixed - centroid_fixed
  163. # Compute covariance matrix
  164. H = np.dot(moving_centered.T, fixed_centered)
  165. # SVD decomposition
  166. U, _, Vt = np.linalg.svd(H)
  167. Rotate_optimal = np.dot(Vt.T, U.T)
  168. # Correct improper rotation (reflection)
  169. if np.linalg.det(Rotate_optimal) < 0:
  170. Vt[-1, :] *= -1
  171. Rotate_optimal = np.dot(Vt.T, U.T)
  172. return Rotate_optimal
  173. def compute_Horn_rotation(moving_points, fixed_points):
  174. """
  175. Computes the optimal rotation matrix using Horn's method.
  176. Parameters:
  177. moving_points (list or ndarray): List of points to be rotated, CBCT
  178. fixed_points (list or ndarray): List of reference points, CT
  179. Returns:
  180. ndarray: Optimal rotation matrix.
  181. """
  182. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  183. moving = np.array(moving_points)
  184. fixed = np.array(fixed_points)
  185. # Compute centroids
  186. centroid_moving = np.mean(moving, axis=0)
  187. centroid_fixed = np.mean(fixed, axis=0)
  188. # Center the points
  189. moving_centered = moving - centroid_moving
  190. fixed_centered = fixed - centroid_fixed
  191. # Compute cross-dispersion matrix
  192. H = np.dot(moving_centered.T, fixed_centered)
  193. # Compute SVD of H
  194. U, _, Vt = np.linalg.svd(H)
  195. # Compute rotation matrix
  196. R = np.dot(Vt.T, U.T)
  197. # Ensure a proper rotation (avoid reflection)
  198. if np.linalg.det(R) < 0:
  199. Vt[-1, :] *= -1
  200. R = np.dot(Vt.T, U.T)
  201. return R
  202. def compute_quaternion_rotation(moving_points, fixed_points):
  203. """
  204. Computes the optimal rotation matrix using quaternions.
  205. Parameters:
  206. moving_points (list or ndarray): List of points to be rotated.
  207. fixed_points (list or ndarray): List of reference points.
  208. Returns:
  209. ndarray: Optimal rotation matrix.
  210. """
  211. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  212. moving = np.array(moving_points)
  213. fixed = np.array(fixed_points)
  214. # Compute centroids
  215. centroid_moving = np.mean(moving, axis=0)
  216. centroid_fixed = np.mean(fixed, axis=0)
  217. # Center the points
  218. moving_centered = moving - centroid_moving
  219. fixed_centered = fixed - centroid_fixed
  220. # Construct the cross-dispersion matrix
  221. M = np.dot(moving_centered.T, fixed_centered)
  222. # Construct the N matrix for quaternion solution
  223. A = M - M.T
  224. delta = np.array([A[1, 2], A[2, 0], A[0, 1]])
  225. trace = np.trace(M)
  226. N = np.zeros((4, 4))
  227. N[0, 0] = trace
  228. N[1:, 0] = delta
  229. N[0, 1:] = delta
  230. N[1:, 1:] = M + M.T - np.eye(3) * trace
  231. # Compute the eigenvector corresponding to the maximum eigenvalue
  232. eigvals, eigvecs = np.linalg.eigh(N)
  233. q_optimal = eigvecs[:, np.argmax(eigvals)] # Optimal quaternion
  234. # Convert quaternion to rotation matrix
  235. w, x, y, z = q_optimal
  236. R = np.array([
  237. [1 - 2*(y**2 + z**2), 2*(x*y - z*w), 2*(x*z + y*w)],
  238. [2*(x*y + z*w), 1 - 2*(x**2 + z**2), 2*(y*z - x*w)],
  239. [2*(x*z - y*w), 2*(y*z + x*w), 1 - 2*(x**2 + y**2)]
  240. ])
  241. return R
  242. def compute_translation(moving_points, fixed_points, rotation_matrix):
  243. """
  244. Computes the translation vector to align moving_points to fixed_points given a rotation matrix.
  245. Parameters:
  246. moving_points (list or ndarray): List of points to be translated.
  247. fixed_points (list or ndarray): List of reference points.
  248. rotation_matrix (ndarray): Rotation matrix.
  249. Returns:
  250. ndarray: Translation vector.
  251. """
  252. # Convert to numpy arrays
  253. moving = np.array(moving_points)
  254. fixed = np.array(fixed_points)
  255. # Compute centroids
  256. centroid_moving = np.mean(moving, axis=0)
  257. centroid_fixed = np.mean(fixed, axis=0)
  258. # Compute translation
  259. translation = centroid_fixed - np.dot(centroid_moving, rotation_matrix)
  260. return translation
  261. def create_vtk_transform(rotation_matrix, translation_vector):
  262. """
  263. Creates a vtkTransform from a rotation matrix and a translation vector.
  264. """
  265. # Create a 4x4 transformation matrix
  266. transform_matrix = np.eye(4) # Start with an identity matrix
  267. transform_matrix[:3, :3] = rotation_matrix # Set rotation part
  268. transform_matrix[:3, 3] = translation_vector # Set translation part
  269. # Convert to vtkMatrix4x4
  270. vtk_matrix = vtk.vtkMatrix4x4()
  271. for i in range(4):
  272. for j in range(4):
  273. vtk_matrix.SetElement(i, j, transform_matrix[i, j])
  274. print("Transform matrix: ")
  275. print(vtk_matrix)
  276. # Create vtkTransform and set the matrix
  277. transform = vtk.vtkTransform()
  278. transform.SetMatrix(vtk_matrix)
  279. return transform
  280. # Initialize lists and dictionary
  281. cbct_list = []
  282. ct_list = []
  283. volume_points_dict = {}
  284. # Process loaded volumes
  285. for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
  286. volumeName = volumeNode.GetName()
  287. shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene)
  288. imageItem = shNode.GetItemByDataNode(volumeNode)
  289. #modality = shNode.GetItemAttribute(imageItem, 'DICOM.Modality')
  290. #print(modality)
  291. # Check if the volume is loaded into the scene
  292. if not slicer.mrmlScene.IsNodePresent(volumeNode):
  293. print(f"Volume {volumeName} not present in the scene.")
  294. continue
  295. # Determine scan type
  296. if "cbct" in volumeName.lower():
  297. cbct_list.append(volumeName)
  298. scan_type = "CBCT"
  299. else:
  300. ct_list.append(volumeName)
  301. scan_type = "CT"
  302. # Detect points using region growing
  303. grouped_points = detect_points_region_growing(volumeName, intensity_threshold=3000)
  304. volume_points_dict[(scan_type, volumeName)] = grouped_points
  305. # Print the results
  306. # print(f"\nCBCT Volumes: {cbct_list}")
  307. # print(f"CT Volumes: {ct_list}")
  308. # print("\nDetected Points by Volume:")
  309. # for (scan_type, vol_name), points in volume_points_dict.items():
  310. # print(f"{scan_type} Volume '{vol_name}': {len(points)} points detected.")
  311. if cbct_list and ct_list:
  312. # Izberi prvi CT volumen kot referenco
  313. ct_volume_name = ct_list[0]
  314. ct_points = [centroid for centroid, _ in volume_points_dict[("CT", ct_volume_name)]]
  315. if len(ct_points) < 3:
  316. print("CT volumen nima dovolj točk za registracijo.")
  317. else:
  318. print("CT points: ", np.array(ct_points))
  319. for cbct_volume_name in cbct_list:
  320. # Izvleci točke za trenutni CBCT volumen
  321. cbct_points = [centroid for centroid, _ in volume_points_dict[("CBCT", cbct_volume_name)]]
  322. print(f"\nProcessing CBCT Volume: {cbct_volume_name}")
  323. if len(cbct_points) < 3:
  324. print(f"CBCT Volume '{cbct_volume_name}' nima dovolj točk za registracijo.")
  325. continue
  326. #print("CBCT points: ", np.array(cbct_points))
  327. # Display the results for the current CBCT volume
  328. # print("\nSVD Method:")
  329. # print("Rotation Matrix:\n", svd_rotation_matrix)
  330. # print("Translation Vector:\n", svd_translation_vector)
  331. # print("\nHorn Method:")
  332. # print("Rotation Matrix:\n", horn_rotation_matrix)
  333. # print("Translation Vector:\n", horn_translation_vector)
  334. # print("\nQuaternion Method:")
  335. # print("Rotation Matrix:\n", quaternion_rotation_matrix)
  336. # print("Translation Vector:\n", quaternion_translation_vector)
  337. # Izberi metodo glede na uporabnikov izbor
  338. if selectedMethod == "SVD":
  339. chosen_rotation_matrix = compute_Kabsch_rotation(cbct_points, ct_points)
  340. chosen_translation_vector = compute_translation(cbct_points, ct_points, chosen_rotation_matrix)
  341. print("\nKabsch Method:")
  342. print("Rotation Matrix:\n", chosen_rotation_matrix)
  343. print("Translation Vector:\n", chosen_translation_vector)
  344. elif selectedMethod == "Horn":
  345. chosen_rotation_matrix = compute_Horn_rotation(cbct_points, ct_points)
  346. chosen_translation_vector = compute_translation(cbct_points, ct_points, chosen_rotation_matrix)
  347. print("\nHorn Method:")
  348. print("Rotation Matrix:\n", chosen_rotation_matrix)
  349. print("Translation Vector:\n", chosen_translation_vector)
  350. elif selectedMethod == "Quaternion":
  351. chosen_rotation_matrix = compute_quaternion_rotation(cbct_points, ct_points)
  352. chosen_translation_vector = compute_translation(cbct_points, ct_points, chosen_rotation_matrix)
  353. print("\nQuaternion Method:")
  354. print("Rotation Matrix:\n", chosen_rotation_matrix)
  355. print("Translation Vector:\n", chosen_translation_vector)
  356. imeTransformNoda = cbct_volume_name + " Transform"
  357. # Ustvari vtkTransformNode in ga poveži z CBCT volumenom
  358. transform_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTransformNode", imeTransformNoda)
  359. # Kliči funkcijo, ki uporabi matriki
  360. vtk_transform = create_vtk_transform(chosen_rotation_matrix, chosen_translation_vector)
  361. # Dodaj transform v node
  362. transform_node.SetAndObserveTransformToParent(vtk_transform)
  363. # Pridobi CBCT volumen in aplikacijo transformacije
  364. cbct_volume_node = slicer.util.getNode(cbct_volume_name)
  365. cbct_volume_node.SetAndObserveTransformNodeID(transform_node.GetID()) # Pripni transform node na volumen
  366. # Uporabi transformacijo na volumnu (fizična aplikacija)
  367. slicer.vtkSlicerTransformLogic().hardenTransform(cbct_volume_node) # Uporabi transform na volumen
  368. print("Transform uspešen na", cbct_volume_name)
  369. #transformed_cbct_image = create_vtk_transform(cbct_image_sitk, chosen_rotation_matrix, chosen_translation_vector)
  370. else:
  371. print("CBCT ali CT volumen ni bil najden.")
  372. # def compute_rigid_transform(moving_points, fixed_points):
  373. # assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  374. # # Convert to numpy arrays
  375. # moving = np.array(moving_points)
  376. # fixed = np.array(fixed_points)
  377. # # Compute centroids
  378. # centroid_moving = np.mean(moving, axis=0)
  379. # centroid_fixed = np.mean(fixed, axis=0)
  380. # # Center the points
  381. # moving_centered = moving - centroid_moving
  382. # fixed_centered = fixed - centroid_fixed
  383. # # Compute covariance matrix
  384. # H = np.dot(moving_centered.T, fixed_centered)
  385. # # SVD decomposition
  386. # U, _, Vt = np.linalg.svd(H)
  387. # Rotate_optimal = np.dot(Vt.T, U.T)
  388. # # Correct improper rotation (reflection)
  389. # if np.linalg.det(Rotate_optimal) < 0:
  390. # Vt[-1, :] *= -1
  391. # Rotate_optimal = np.dot(Vt.T, U.T)
  392. # # Compute translation
  393. # translation = centroid_fixed - np.dot(centroid_moving, Rotate_optimal)
  394. # return Rotate_optimal, translation