FirstTryLinear.txt 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. import os
  2. import numpy as np
  3. import scipy
  4. from scipy.spatial.distance import cdist
  5. from scipy.spatial.transform import Rotation as R
  6. import slicer
  7. from DICOMLib import DICOMUtils
  8. from collections import deque
  9. import SimpleITK as sitk
  10. import sitkUtils
  11. #exec(open("C:/Users/lkomar/Documents/Prostata/FirstTryRegister.py").read())
  12. # Define a threshold for grouping nearby points (in voxel space)
  13. #distance_threshold = 4 # This can be adjusted based on your dataset
  14. # Function to group points that are close to each other
  15. def group_points(points, threshold):
  16. grouped_points = []
  17. while points:
  18. point = points.pop() # Take one point from the list
  19. group = [point] # Start a new group
  20. # Find all points close to this one
  21. distances = cdist([point], points) # Calculate distances from this point to others
  22. close_points = [i for i, dist in enumerate(distances[0]) if dist < threshold]
  23. # Add the close points to the group
  24. group.extend([points[i] for i in close_points])
  25. # Remove the grouped points from the list
  26. points = [point for i, point in enumerate(points) if i not in close_points]
  27. # Add the group to the result
  28. grouped_points.append(group)
  29. return grouped_points
  30. def region_growing(image_data, seed, intensity_threshold, max_distance):
  31. dimensions = image_data.GetDimensions()
  32. visited = set()
  33. region = []
  34. queue = deque([seed])
  35. while queue:
  36. x, y, z = queue.popleft()
  37. if (x, y, z) in visited:
  38. continue
  39. visited.add((x, y, z))
  40. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  41. if voxel_value >= intensity_threshold:
  42. region.append((x, y, z))
  43. # Add neighbors within bounds
  44. for dx, dy, dz in [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]:
  45. nx, ny, nz = x + dx, y + dy, z + dz
  46. if 0 <= nx < dimensions[0] and 0 <= ny < dimensions[1] and 0 <= nz < dimensions[2]:
  47. if (nx, ny, nz) not in visited:
  48. queue.append((nx, ny, nz))
  49. return region
  50. def detect_points_region_growing(volume_name, intensity_threshold=3000, x_min=90, x_max=380, y_min=190, y_max=380, z_min=80, z_max=120, max_distance=9, centroid_merge_threshold=5):
  51. volume_node = slicer.util.getNode(volume_name)
  52. if not volume_node:
  53. raise RuntimeError(f"Volume {volume_name} not found.")
  54. image_data = volume_node.GetImageData()
  55. matrix = vtk.vtkMatrix4x4()
  56. volume_node.GetIJKToRASMatrix(matrix)
  57. dimensions = image_data.GetDimensions()
  58. detected_regions = []
  59. # Check if it's CT or CBCT
  60. is_cbct = "cbct" in volume_name.lower()
  61. if is_cbct:
  62. valid_x_min, valid_x_max = 0, dimensions[0] - 1
  63. valid_y_min, valid_y_max = 0, dimensions[1] - 1
  64. valid_z_min, valid_z_max = 0, dimensions[2] - 1
  65. else:
  66. valid_x_min, valid_x_max = max(x_min, 0), min(x_max, dimensions[0] - 1)
  67. valid_y_min, valid_y_max = max(y_min, 0), min(y_max, dimensions[1] - 1)
  68. valid_z_min, valid_z_max = max(z_min, 0), min(z_max, dimensions[2] - 1)
  69. visited = set()
  70. def grow_region(x, y, z):
  71. if (x, y, z) in visited:
  72. return None
  73. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  74. if voxel_value < intensity_threshold:
  75. return None
  76. region = region_growing(image_data, (x, y, z), intensity_threshold, max_distance=max_distance)
  77. if region:
  78. for point in region:
  79. visited.add(tuple(point))
  80. return region
  81. return None
  82. regions = []
  83. for z in range(valid_z_min, valid_z_max + 1):
  84. for y in range(valid_y_min, valid_y_max + 1):
  85. for x in range(valid_x_min, valid_x_max + 1):
  86. region = grow_region(x, y, z)
  87. if region:
  88. regions.append(region)
  89. # Collect centroids using intensity-weighted average
  90. centroids = []
  91. for region in regions:
  92. points = np.array([matrix.MultiplyPoint([*point, 1])[:3] for point in region])
  93. intensities = np.array([image_data.GetScalarComponentAsDouble(*point, 0) for point in region])
  94. if intensities.sum() > 0:
  95. weighted_centroid = np.average(points, axis=0, weights=intensities)
  96. max_intensity = intensities.max()
  97. centroids.append((np.round(weighted_centroid, 2), max_intensity))
  98. unique_centroids = []
  99. for centroid, intensity in centroids:
  100. if not any(np.linalg.norm(centroid - existing_centroid) < centroid_merge_threshold for existing_centroid, _ in unique_centroids):
  101. unique_centroids.append((centroid, intensity))
  102. markups_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode", f"Markers_{volume_name}")
  103. for centroid, intensity in unique_centroids:
  104. markups_node.AddControlPoint(*centroid)
  105. #print(f"Detected Centroid (RAS): {centroid}, Max Intensity: {intensity}")
  106. return unique_centroids
  107. def compute_Kabsch_rotation(moving_points, fixed_points):
  108. """
  109. Computes the optimal rotation matrix to align moving_points to fixed_points.
  110. Parameters:
  111. moving_points (list or ndarray): List of points to be rotated CBCT
  112. fixed_points (list or ndarray): List of reference points CT
  113. Returns:
  114. ndarray: Optimal rotation matrix.
  115. """
  116. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  117. # Convert to numpy arrays
  118. moving = np.array(moving_points)
  119. fixed = np.array(fixed_points)
  120. # Compute centroids
  121. centroid_moving = np.mean(moving, axis=0)
  122. centroid_fixed = np.mean(fixed, axis=0)
  123. # Center the points
  124. moving_centered = moving - centroid_moving
  125. fixed_centered = fixed - centroid_fixed
  126. # Compute covariance matrix
  127. H = np.dot(moving_centered.T, fixed_centered)
  128. # SVD decomposition
  129. U, _, Vt = np.linalg.svd(H)
  130. Rotate_optimal = np.dot(Vt.T, U.T)
  131. # Correct improper rotation (reflection)
  132. if np.linalg.det(Rotate_optimal) < 0:
  133. Vt[-1, :] *= -1
  134. Rotate_optimal = np.dot(Vt.T, U.T)
  135. return Rotate_optimal
  136. def compute_Horn_rotation(moving_points, fixed_points):
  137. """
  138. Computes the optimal rotation matrix using Horn's method.
  139. Parameters:
  140. moving_points (list or ndarray): List of points to be rotated, CBCT
  141. fixed_points (list or ndarray): List of reference points, CT
  142. Returns:
  143. ndarray: Optimal rotation matrix.
  144. """
  145. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  146. moving = np.array(moving_points)
  147. fixed = np.array(fixed_points)
  148. # Compute centroids
  149. centroid_moving = np.mean(moving, axis=0)
  150. centroid_fixed = np.mean(fixed, axis=0)
  151. # Center the points
  152. moving_centered = moving - centroid_moving
  153. fixed_centered = fixed - centroid_fixed
  154. # Compute cross-dispersion matrix
  155. H = np.dot(moving_centered.T, fixed_centered)
  156. # Compute SVD of H
  157. U, _, Vt = np.linalg.svd(H)
  158. # Compute rotation matrix
  159. R = np.dot(Vt.T, U.T)
  160. # Ensure a proper rotation (avoid reflection)
  161. if np.linalg.det(R) < 0:
  162. Vt[-1, :] *= -1
  163. R = np.dot(Vt.T, U.T)
  164. return R
  165. def compute_quaternion_rotation(moving_points, fixed_points):
  166. """
  167. Computes the optimal rotation matrix using quaternions.
  168. Parameters:
  169. moving_points (list or ndarray): List of points to be rotated.
  170. fixed_points (list or ndarray): List of reference points.
  171. Returns:
  172. ndarray: Optimal rotation matrix.
  173. """
  174. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  175. moving = np.array(moving_points)
  176. fixed = np.array(fixed_points)
  177. # Compute centroids
  178. centroid_moving = np.mean(moving, axis=0)
  179. centroid_fixed = np.mean(fixed, axis=0)
  180. # Center the points
  181. moving_centered = moving - centroid_moving
  182. fixed_centered = fixed - centroid_fixed
  183. # Construct the cross-dispersion matrix
  184. M = np.dot(moving_centered.T, fixed_centered)
  185. # Construct the N matrix for quaternion solution
  186. A = M - M.T
  187. delta = np.array([A[1, 2], A[2, 0], A[0, 1]])
  188. trace = np.trace(M)
  189. N = np.zeros((4, 4))
  190. N[0, 0] = trace
  191. N[1:, 0] = delta
  192. N[0, 1:] = delta
  193. N[1:, 1:] = M + M.T - np.eye(3) * trace
  194. # Compute the eigenvector corresponding to the maximum eigenvalue
  195. eigvals, eigvecs = np.linalg.eigh(N)
  196. q_optimal = eigvecs[:, np.argmax(eigvals)] # Optimal quaternion
  197. # Convert quaternion to rotation matrix
  198. w, x, y, z = q_optimal
  199. R = np.array([
  200. [1 - 2*(y**2 + z**2), 2*(x*y - z*w), 2*(x*z + y*w)],
  201. [2*(x*y + z*w), 1 - 2*(x**2 + z**2), 2*(y*z - x*w)],
  202. [2*(x*z - y*w), 2*(y*z + x*w), 1 - 2*(x**2 + y**2)]
  203. ])
  204. return R
  205. def compute_translation(moving_points, fixed_points, rotation_matrix):
  206. """
  207. Computes the translation vector to align moving_points to fixed_points given a rotation matrix.
  208. Parameters:
  209. moving_points (list or ndarray): List of points to be translated.
  210. fixed_points (list or ndarray): List of reference points.
  211. rotation_matrix (ndarray): Rotation matrix.
  212. Returns:
  213. ndarray: Translation vector.
  214. """
  215. # Convert to numpy arrays
  216. moving = np.array(moving_points)
  217. fixed = np.array(fixed_points)
  218. # Compute centroids
  219. centroid_moving = np.mean(moving, axis=0)
  220. centroid_fixed = np.mean(fixed, axis=0)
  221. # Compute translation
  222. translation = centroid_fixed - np.dot(centroid_moving, rotation_matrix)
  223. return translation
  224. # def apply_transform(points, rotation_matrix, translation_vector):
  225. # points = np.array(points)
  226. # transformed_points = np.dot(points, rotation_matrix.T) + translation_vector
  227. # return transformed_points
  228. def apply_transform(volume, rotation_matrix, translation_vector):
  229. """
  230. Transforms a 3D volume using a given rotation matrix and translation vector.
  231. Parameters:
  232. volume (sitk.Image): Input 3D volume to be transformed.
  233. rotation_matrix (ndarray): 3x3 rotation matrix.
  234. translation_vector (ndarray): 1x3 translation vector.
  235. Returns:
  236. sitk.Image: Transformed 3D volume.
  237. """
  238. # Create an affine transform
  239. transform = sitk.AffineTransform(3)
  240. transform.SetMatrix(rotation_matrix.flatten())
  241. translation_vector = translation_vector.tolist()
  242. transform.SetTranslation(translation_vector)
  243. # Resample the volume
  244. resampler = sitk.ResampleImageFilter()
  245. resampler.SetReferenceImage(volume)
  246. resampler.SetInterpolator(sitk.sitkLinear) # Linear interpolation
  247. resampler.SetTransform(transform)
  248. resampler.SetDefaultPixelValue(0) # Background value for areas outside the original volume
  249. transformed_volume = resampler.Execute(volume)
  250. return transformed_volume
  251. # Initialize lists and dictionary
  252. cbct_list = []
  253. ct_list = []
  254. volume_points_dict = {}
  255. # Process loaded volumes
  256. for volumeNode in slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode"):
  257. volumeName = volumeNode.GetName()
  258. shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene)
  259. imageItem = shNode.GetItemByDataNode(volumeNode)
  260. modality = shNode.GetItemAttribute(imageItem, 'DICOM.Modality')
  261. #print(modality)
  262. # Check if the volume is loaded into the scene
  263. if not slicer.mrmlScene.IsNodePresent(volumeNode):
  264. print(f"Volume {volumeName} not present in the scene.")
  265. continue
  266. # Determine scan type
  267. if "cbct" in volumeName.lower():
  268. cbct_list.append(volumeName)
  269. scan_type = "CBCT"
  270. else:
  271. ct_list.append(volumeName)
  272. scan_type = "CT"
  273. # Detect points using region growing
  274. grouped_points = detect_points_region_growing(volumeName, intensity_threshold=3000)
  275. volume_points_dict[(scan_type, volumeName)] = grouped_points
  276. # Print the results
  277. # print(f"\nCBCT Volumes: {cbct_list}")
  278. # print(f"CT Volumes: {ct_list}")
  279. # print("\nDetected Points by Volume:")
  280. # for (scan_type, vol_name), points in volume_points_dict.items():
  281. # print(f"{scan_type} Volume '{vol_name}': {len(points)} points detected.")
  282. if cbct_list and ct_list:
  283. # Izberi prvi CT volumen kot referenco
  284. ct_volume_name = ct_list[0]
  285. ct_points = [centroid for centroid, _ in volume_points_dict[("CT", ct_volume_name)]]
  286. if len(ct_points) < 3:
  287. print("CT volumen nima dovolj točk za registracijo.")
  288. else:
  289. print("CT points: ", np.array(ct_points))
  290. for cbct_volume_name in cbct_list:
  291. # Izvleci točke za trenutni CBCT volumen
  292. cbct_points = [centroid for centroid, _ in volume_points_dict[("CBCT", cbct_volume_name)]]
  293. print(f"\nProcessing CBCT Volume: {cbct_volume_name}")
  294. if len(cbct_points) < 3:
  295. print(f"CBCT Volume '{cbct_volume_name}' nima dovolj točk za registracijo.")
  296. continue
  297. #print("CBCT points: ", np.array(cbct_points))
  298. # Compute rotation matrices using different methods
  299. svd_rotation_matrix = compute_Kabsch_rotation(cbct_points, ct_points)
  300. horn_rotation_matrix = compute_Horn_rotation(cbct_points, ct_points)
  301. quaternion_rotation_matrix = compute_quaternion_rotation(cbct_points, ct_points)
  302. # Compute translations for each method
  303. svd_translation_vector = compute_translation(cbct_points, ct_points, svd_rotation_matrix)
  304. horn_translation_vector = compute_translation(cbct_points, ct_points, horn_rotation_matrix)
  305. quaternion_translation_vector = compute_translation(cbct_points, ct_points, quaternion_rotation_matrix)
  306. # Display the results for the current CBCT volume
  307. print("\nSVD Method:")
  308. print("Rotation Matrix:\n", svd_rotation_matrix)
  309. print("Translation Vector:\n", svd_translation_vector)
  310. print("\nHorn Method:")
  311. print("Rotation Matrix:\n", horn_rotation_matrix)
  312. print("Translation Vector:\n", horn_translation_vector)
  313. print("\nQuaternion Method:")
  314. print("Rotation Matrix:\n", quaternion_rotation_matrix)
  315. print("Translation Vector:\n", quaternion_translation_vector)
  316. # Apply chosen transformation (select one method manually)
  317. chosen_rotation_matrix = svd_rotation_matrix # Change to horn_rotation_matrix or quaternion_rotation_matrix if needed
  318. chosen_translation_vector = svd_translation_vector #also change here
  319. # Pridobitev CBCT slike kot SimpleITK
  320. cbct_volume_node = slicer.util.getNode(cbct_volume_name)
  321. cbct_image_sitk = sitkUtils.PullVolumeFromSlicer(cbct_volume_node)
  322. transformed_cbct_image = apply_transform(cbct_image_sitk, chosen_rotation_matrix, chosen_translation_vector)
  323. print(f"\nTransformed Volume '{cbct_volume_name}' using SVD Method")
  324. # Shranimo nazaj v Slicer
  325. sitkUtils.PushVolumeToSlicer(transformed_cbct_image, name=f"Transformed_{cbct_volume_name}")
  326. else:
  327. print("CBCT ali CT volumen ni bil najden.")
  328. # def compute_rigid_transform(moving_points, fixed_points):
  329. # assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  330. # # Convert to numpy arrays
  331. # moving = np.array(moving_points)
  332. # fixed = np.array(fixed_points)
  333. # # Compute centroids
  334. # centroid_moving = np.mean(moving, axis=0)
  335. # centroid_fixed = np.mean(fixed, axis=0)
  336. # # Center the points
  337. # moving_centered = moving - centroid_moving
  338. # fixed_centered = fixed - centroid_fixed
  339. # # Compute covariance matrix
  340. # H = np.dot(moving_centered.T, fixed_centered)
  341. # # SVD decomposition
  342. # U, _, Vt = np.linalg.svd(H)
  343. # Rotate_optimal = np.dot(Vt.T, U.T)
  344. # # Correct improper rotation (reflection)
  345. # if np.linalg.det(Rotate_optimal) < 0:
  346. # Vt[-1, :] *= -1
  347. # Rotate_optimal = np.dot(Vt.T, U.T)
  348. # # Compute translation
  349. # translation = centroid_fixed - np.dot(centroid_moving, Rotate_optimal)
  350. # return Rotate_optimal, translation