SeekTransformModule.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. import os
  2. import numpy as np
  3. import scipy
  4. from scipy.spatial.distance import cdist
  5. from scipy.spatial.transform import Rotation as R
  6. import slicer
  7. from DICOMLib import DICOMUtils
  8. from collections import deque
  9. import vtk
  10. from slicer.ScriptedLoadableModule import *
  11. import qt
  12. import matplotlib.pyplot as plt
  13. import csv
  14. #exec(open("C:/Users/lkomar/Documents/Prostata/FirstTryRegister.py").read())
  15. class SeekTransformModule(ScriptedLoadableModule):
  16. """
  17. Module description shown in the module panel.
  18. """
  19. def __init__(self, parent):
  20. ScriptedLoadableModule.__init__(self, parent)
  21. self.parent.title = "Seek Transform module"
  22. self.parent.categories = ["Image Processing"]
  23. self.parent.contributors = ["Luka Komar (Onkološki Inštitut Ljubljana, Fakulteta za Matematiko in Fiziko Ljubljana)"]
  24. self.parent.helpText = "This module applies rigid transformations to CBCT volumes based on reference CT volumes."
  25. self.parent.acknowledgementText = "Supported by doc. Primož Peterlin & prof. Andrej Studen"
  26. class SeekTransformModuleWidget(ScriptedLoadableModuleWidget):
  27. """
  28. GUI of the module.
  29. """
  30. def setup(self):
  31. ScriptedLoadableModuleWidget.setup(self)
  32. # Dropdown menu za izbiro metode
  33. self.rotationMethodComboBox = qt.QComboBox()
  34. self.rotationMethodComboBox.addItems(["Kabsch", "Horn", "Iterative Closest Point (Kabsch)"])
  35. self.layout.addWidget(self.rotationMethodComboBox)
  36. # Checkboxi za transformacije
  37. self.rotationCheckBox = qt.QCheckBox("Rotation")
  38. self.rotationCheckBox.setChecked(True)
  39. self.layout.addWidget(self.rotationCheckBox)
  40. self.translationCheckBox = qt.QCheckBox("Translation")
  41. self.translationCheckBox.setChecked(True)
  42. self.layout.addWidget(self.translationCheckBox)
  43. self.scalingCheckBox = qt.QCheckBox("Scaling")
  44. self.scalingCheckBox.setChecked(True)
  45. self.layout.addWidget(self.scalingCheckBox)
  46. # Load button
  47. self.applyButton = qt.QPushButton("Find markers and transform")
  48. self.applyButton.toolTip = "Finds markers, computes optimal rigid transform and applies it to CBCT volumes."
  49. self.applyButton.enabled = True
  50. self.layout.addWidget(self.applyButton)
  51. # Connect button to logic
  52. self.applyButton.connect('clicked(bool)', self.onApplyButton)
  53. self.layout.addStretch(1)
  54. def onApplyButton(self):
  55. logic = MyTransformModuleLogic()
  56. selectedMethod = self.rotationMethodComboBox.currentText # izberi metodo izračuna rotacije
  57. # Preberi stanje checkboxov
  58. applyRotation = self.rotationCheckBox.isChecked()
  59. applyTranslation = self.translationCheckBox.isChecked()
  60. applyScaling = self.scalingCheckBox.isChecked()
  61. # Pokliči logiko z izbranimi nastavitvami
  62. logic.run(selectedMethod, applyRotation, applyTranslation, applyScaling)
  63. class MyTransformModuleLogic(ScriptedLoadableModuleLogic):
  64. """
  65. Core logic of the module.
  66. """
  67. def run(self, selectedMethod, applyRotation, applyTranslation, applyScaling):
  68. print("Calculating...")
  69. def group_points(points, threshold):
  70. # Function to group points that are close to each other
  71. grouped_points = []
  72. while points:
  73. point = points.pop() # Take one point from the list
  74. group = [point] # Start a new group
  75. # Find all points close to this one
  76. distances = cdist([point], points) # Calculate distances from this point to others
  77. close_points = [i for i, dist in enumerate(distances[0]) if dist < threshold]
  78. # Add the close points to the group
  79. group.extend([points[i] for i in close_points])
  80. # Remove the grouped points from the list
  81. points = [point for i, point in enumerate(points) if i not in close_points]
  82. # Add the group to the result
  83. grouped_points.append(group)
  84. return grouped_points
  85. def region_growing(image_data, seed, intensity_threshold, max_distance):
  86. dimensions = image_data.GetDimensions()
  87. visited = set()
  88. region = []
  89. queue = deque([seed])
  90. while queue:
  91. x, y, z = queue.popleft()
  92. if (x, y, z) in visited:
  93. continue
  94. visited.add((x, y, z))
  95. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  96. if voxel_value >= intensity_threshold:
  97. region.append((x, y, z))
  98. # Add neighbors within bounds
  99. for dx, dy, dz in [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]:
  100. nx, ny, nz = x + dx, y + dy, z + dz
  101. if 0 <= nx < dimensions[0] and 0 <= ny < dimensions[1] and 0 <= nz < dimensions[2]:
  102. if (nx, ny, nz) not in visited:
  103. queue.append((nx, ny, nz))
  104. return region
  105. def compute_optimal_scaling_per_axis(moving_points, fixed_points):
  106. """Computes optimal scaling factors for each axis (X, Y, Z) to align moving points (CBCT) to fixed points (CT).
  107. Args:
  108. moving_points (list of lists): List of (x, y, z) moving points (CBCT).
  109. fixed_points (list of lists): List of (x, y, z) fixed points (CT).
  110. Returns:
  111. tuple: Scaling factors (sx, sy, sz).
  112. """
  113. moving_points_np = np.array(moving_points)
  114. fixed_points_np = np.array(fixed_points)
  115. # Compute centroids
  116. centroid_moving = np.mean(moving_points_np, axis=0)
  117. centroid_fixed = np.mean(fixed_points_np, axis=0)
  118. # Compute absolute distances of each point from its centroid along each axis
  119. distances_moving = np.abs(moving_points_np - centroid_moving)
  120. distances_fixed = np.abs(fixed_points_np - centroid_fixed)
  121. # Compute scaling factors as the ratio of mean absolute distances per axis
  122. scale_factors = np.mean(distances_fixed, axis=0) / np.mean(distances_moving, axis=0)
  123. return tuple(scale_factors)
  124. def compute_scaling(cbct_points, scaling_factors):
  125. """Applies non-uniform scaling to CBCT points.
  126. Args:
  127. cbct_points (list of lists): List of (x, y, z) points.
  128. scaling_factors (tuple): Scaling factors (sx, sy, sz) for each axis.
  129. Returns:
  130. np.ndarray: Scaled CBCT points.
  131. """
  132. sx, sy, sz = scaling_factors # Extract scaling factors
  133. scaling_matrix = np.diag([sx, sy, sz]) # Create diagonal scaling matrix
  134. cbct_points_np = np.array(cbct_points) # Convert to numpy array
  135. scaled_points = cbct_points_np @ scaling_matrix.T # Apply scaling
  136. return scaled_points.tolist() # Convert back to list
  137. def compute_Kabsch_rotation(moving_points, fixed_points):
  138. """
  139. Computes the optimal rotation matrix to align moving_points to fixed_points.
  140. Parameters:
  141. moving_points (list or ndarray): List of points to be rotated CBCT
  142. fixed_points (list or ndarray): List of reference points CT
  143. Returns:
  144. ndarray: Optimal rotation matrix.
  145. """
  146. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  147. # Convert to numpy arrays
  148. moving = np.array(moving_points)
  149. fixed = np.array(fixed_points)
  150. # Compute centroids
  151. centroid_moving = np.mean(moving, axis=0)
  152. centroid_fixed = np.mean(fixed, axis=0)
  153. # Center the points
  154. moving_centered = moving - centroid_moving
  155. fixed_centered = fixed - centroid_fixed
  156. # Compute covariance matrix
  157. H = np.dot(moving_centered.T, fixed_centered)
  158. # SVD decomposition
  159. U, _, Vt = np.linalg.svd(H)
  160. Rotate_optimal = np.dot(Vt.T, U.T)
  161. # Correct improper rotation (reflection)
  162. if np.linalg.det(Rotate_optimal) < 0:
  163. Vt[-1, :] *= -1
  164. Rotate_optimal = np.dot(Vt.T, U.T)
  165. return Rotate_optimal
  166. def compute_Horn_rotation(moving_points, fixed_points):
  167. """
  168. Computes the optimal rotation matrix using quaternions.
  169. Parameters:
  170. moving_points (list or ndarray): List of points to be rotated.
  171. fixed_points (list or ndarray): List of reference points.
  172. Returns:
  173. ndarray: Optimal rotation matrix.
  174. """
  175. assert len(moving_points) == len(fixed_points), "Point lists must be the same length."
  176. moving = np.array(moving_points)
  177. fixed = np.array(fixed_points)
  178. # Compute centroids
  179. centroid_moving = np.mean(moving, axis=0)
  180. centroid_fixed = np.mean(fixed, axis=0)
  181. # Center the points
  182. moving_centered = moving - centroid_moving
  183. fixed_centered = fixed - centroid_fixed
  184. # Construct the cross-dispersion matrix
  185. M = np.dot(moving_centered.T, fixed_centered)
  186. # Construct the N matrix for quaternion solution
  187. A = M - M.T
  188. delta = np.array([A[1, 2], A[2, 0], A[0, 1]])
  189. trace = np.trace(M)
  190. N = np.zeros((4, 4))
  191. N[0, 0] = trace
  192. N[1:, 0] = delta
  193. N[0, 1:] = delta
  194. N[1:, 1:] = M + M.T - np.eye(3) * trace
  195. # Compute the eigenvector corresponding to the maximum eigenvalue
  196. eigvals, eigvecs = np.linalg.eigh(N)
  197. q_optimal = eigvecs[:, np.argmax(eigvals)] # Optimal quaternion
  198. # Convert quaternion to rotation matrix
  199. w, x, y, z = q_optimal
  200. R = np.array([
  201. [1 - 2*(y**2 + z**2), 2*(x*y - z*w), 2*(x*z + y*w)],
  202. [2*(x*y + z*w), 1 - 2*(x**2 + z**2), 2*(y*z - x*w)],
  203. [2*(x*z - y*w), 2*(y*z + x*w), 1 - 2*(x**2 + y**2)]
  204. ])
  205. return R
  206. def icp_algorithm(moving_points, fixed_points, max_iterations=100, tolerance=1e-5):
  207. """
  208. Iterative Closest Point (ICP) algorithm to align moving_points to fixed_points.
  209. Parameters:
  210. moving_points (list or ndarray): List of points to be aligned.
  211. fixed_points (list or ndarray): List of reference points.
  212. max_iterations (int): Maximum number of iterations.
  213. tolerance (float): Convergence tolerance.
  214. Returns:
  215. ndarray: Transformed moving points.
  216. ndarray: Optimal rotation matrix.
  217. ndarray: Optimal translation vector.
  218. """
  219. # Convert to numpy arrays
  220. moving = np.array(moving_points)
  221. fixed = np.array(fixed_points)
  222. # Initialize transformation
  223. R = np.eye(3) # Identity matrix for rotation
  224. t = np.zeros(3) # Zero vector for translation
  225. prev_error = np.inf # Initialize previous error to a large value
  226. for iteration in range(max_iterations):
  227. # Step 1: Find the nearest neighbors (correspondences)
  228. distances = np.linalg.norm(moving[:, np.newaxis] - fixed, axis=2)
  229. nearest_indices = np.argmin(distances, axis=1)
  230. nearest_points = fixed[nearest_indices]
  231. # Step 2: Compute the optimal rotation and translation
  232. R_new = compute_Kabsch_rotation(moving, nearest_points)
  233. centroid_moving = np.mean(moving, axis=0)
  234. centroid_fixed = np.mean(nearest_points, axis=0)
  235. t_new = centroid_fixed - np.dot(R_new, centroid_moving)
  236. # Step 3: Apply the transformation
  237. moving = np.dot(moving, R_new.T) + t_new
  238. # Update the cumulative transformation
  239. R = np.dot(R_new, R)
  240. t = np.dot(R_new, t) + t_new
  241. # Step 4: Check for convergence
  242. mean_error = np.mean(np.linalg.norm(moving - nearest_points, axis=1))
  243. if np.abs(prev_error - mean_error) < tolerance:
  244. print(f"ICP converged after {iteration + 1} iterations.")
  245. break
  246. prev_error = mean_error
  247. else:
  248. print(f"ICP reached maximum iterations ({max_iterations}).")
  249. return moving, R, t
  250. def compute_translation(moving_points, fixed_points, rotation_matrix):
  251. """
  252. Computes the translation vector to align moving_points to fixed_points given a rotation matrix.
  253. Parameters:
  254. moving_points (list or ndarray): List of points to be translated.
  255. fixed_points (list or ndarray): List of reference points.
  256. rotation_matrix (ndarray): Rotation matrix.
  257. Returns:
  258. ndarray: Translation vector.
  259. """
  260. # Convert to numpy arrays
  261. moving = np.array(moving_points)
  262. fixed = np.array(fixed_points)
  263. # Compute centroids
  264. centroid_moving = np.mean(moving, axis=0)
  265. centroid_fixed = np.mean(fixed, axis=0)
  266. # Compute translation
  267. translation = centroid_fixed - np.dot(centroid_moving, rotation_matrix)
  268. return translation
  269. def create_vtk_transform(rotation_matrix, translation_vector):
  270. """
  271. Creates a vtkTransform from a rotation matrix and a translation vector.
  272. """
  273. # Create a 4x4 transformation matrix
  274. transform_matrix = np.eye(4) # Start with an identity matrix
  275. transform_matrix[:3, :3] = rotation_matrix # Set rotation part
  276. transform_matrix[:3, 3] = translation_vector # Set translation part
  277. # Convert to vtkMatrix4x4
  278. vtk_matrix = vtk.vtkMatrix4x4()
  279. for i in range(4):
  280. for j in range(4):
  281. vtk_matrix.SetElement(i, j, transform_matrix[i, j])
  282. #print("Transform matrix:")
  283. #for i in range(4):
  284. # print(" ".join(f"{vtk_matrix.GetElement(i, j):.6f}" for j in range(4)))
  285. # Create vtkTransform and set the matrix
  286. transform = vtk.vtkTransform()
  287. transform.SetMatrix(vtk_matrix)
  288. return transform
  289. def detect_points_region_growing(volume_name, yesCbct, intensity_threshold=3000, x_min=90, x_max=380, y_min=190, y_max=380, z_min=80, z_max=140, max_distance=9, centroid_merge_threshold=5):
  290. volume_node = slicer.util.getNode(volume_name)
  291. if not volume_node:
  292. raise RuntimeError(f"Volume {volume_name} not found.")
  293. image_data = volume_node.GetImageData()
  294. matrix = vtk.vtkMatrix4x4()
  295. volume_node.GetIJKToRASMatrix(matrix)
  296. dimensions = image_data.GetDimensions()
  297. #detected_regions = []
  298. if yesCbct: #je cbct ali ct?
  299. valid_x_min, valid_x_max = 0, dimensions[0] - 1
  300. valid_y_min, valid_y_max = 0, dimensions[1] - 1
  301. valid_z_min, valid_z_max = 0, dimensions[2] - 1
  302. else:
  303. valid_x_min, valid_x_max = max(x_min, 0), min(x_max, dimensions[0] - 1)
  304. valid_y_min, valid_y_max = max(y_min, 0), min(y_max, dimensions[1] - 1)
  305. valid_z_min, valid_z_max = max(z_min, 0), min(z_max, dimensions[2] - 1)
  306. visited = set()
  307. def grow_region(x, y, z):
  308. if (x, y, z) in visited:
  309. return None
  310. voxel_value = image_data.GetScalarComponentAsDouble(x, y, z, 0)
  311. if voxel_value < intensity_threshold:
  312. return None
  313. region = region_growing(image_data, (x, y, z), intensity_threshold, max_distance=max_distance)
  314. if region:
  315. for point in region:
  316. visited.add(tuple(point))
  317. return region
  318. return None
  319. regions = []
  320. for z in range(valid_z_min, valid_z_max + 1):
  321. for y in range(valid_y_min, valid_y_max + 1):
  322. for x in range(valid_x_min, valid_x_max + 1):
  323. region = grow_region(x, y, z)
  324. if region:
  325. regions.append(region)
  326. # Collect centroids using intensity-weighted average
  327. centroids = []
  328. for region in regions:
  329. points = np.array([matrix.MultiplyPoint([*point, 1])[:3] for point in region])
  330. intensities = np.array([image_data.GetScalarComponentAsDouble(*point, 0) for point in region])
  331. if intensities.sum() > 0:
  332. weighted_centroid = np.average(points, axis=0, weights=intensities)
  333. max_intensity = intensities.max()
  334. centroids.append((np.round(weighted_centroid, 2), max_intensity))
  335. unique_centroids = []
  336. for centroid, intensity in centroids:
  337. if not any(np.linalg.norm(centroid - existing_centroid) < centroid_merge_threshold for existing_centroid, _ in unique_centroids):
  338. unique_centroids.append((centroid, intensity))
  339. markups_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode", f"Markers_{volume_name}")
  340. for centroid, intensity in unique_centroids:
  341. markups_node.AddControlPoint(*centroid)
  342. #print(f"Detected Centroid (RAS): {centroid}, Max Intensity: {intensity}")
  343. return unique_centroids
  344. # Globalni seznami za končno statistiko
  345. prostate_size_est = []
  346. ctcbct_distance = []
  347. # Pridobimo SubjectHierarchyNode
  348. shNode = slicer.vtkMRMLSubjectHierarchyNode.GetSubjectHierarchyNode(slicer.mrmlScene)
  349. studyItems = vtk.vtkIdList()
  350. shNode.GetItemChildren(shNode.GetSceneItemID(), studyItems)
  351. for i in range(studyItems.GetNumberOfIds()):
  352. studyItem = studyItems.GetId(i)
  353. # Dodamo ime študije v statistiko
  354. studyName = shNode.GetItemName(studyItem)
  355. prostate_size_est.append({"Study": studyName})
  356. ctcbct_distance.append({"Study": studyName})
  357. # **LOKALNI** seznami, resetirajo se pri vsakem study-ju
  358. cbct_list = []
  359. ct_list = []
  360. volume_points_dict = {}
  361. # Get child items of the study item
  362. volumeItems = vtk.vtkIdList()
  363. shNode.GetItemChildren(studyItem, volumeItems)
  364. # Iteracija čez vse volumne v posameznem studyju
  365. for j in range(volumeItems.GetNumberOfIds()):
  366. intermediateItem = volumeItems.GetId(j)
  367. # Preveri, ali je to dejanska skupina volumnov (npr. "No study description")
  368. intermediateName = shNode.GetItemName(intermediateItem)
  369. #print(f"Checking intermediate item: {intermediateName}")
  370. finalVolumeItems = vtk.vtkIdList()
  371. shNode.GetItemChildren(intermediateItem, finalVolumeItems) # Išči globlje!
  372. for k in range(finalVolumeItems.GetNumberOfIds()):
  373. volumeItem = finalVolumeItems.GetId(k)
  374. volumeNode = shNode.GetItemDataNode(volumeItem)
  375. dicomUIDs = volumeNode.GetAttribute("DICOM.instanceUIDs")
  376. if not dicomUIDs:
  377. print("❌ To je NRRD volume!")
  378. continue # Preskoči, če ni DICOM volume
  379. if volumeNode and volumeNode.IsA("vtkMRMLScalarVolumeNode"):
  380. print(f"✔️ Najden volume: {volumeNode.GetName()} (ID: {volumeItem})")
  381. if not volumeNode or not volumeNode.IsA("vtkMRMLScalarVolumeNode"):
  382. print("ne najdem volumeNode")
  383. continue # Preskoči, če ni veljaven volume
  384. # Preveri, če volume ima StorageNode (drugače `.GetFileName()` vrže napako)
  385. storageNode = volumeNode.GetStorageNode()
  386. if not storageNode:
  387. print("ne najdem storageNode")
  388. continue # Preskoči, če volume nima shranjenih DICOM podatkov
  389. volumeName = volumeNode.GetName()
  390. #print(volumeName)
  391. imageItem = shNode.GetItemByDataNode(volumeNode)
  392. #print(imageItem)
  393. #dicomUIDsList = volumeNode.GetAttribute("DICOM.instanceUIDs").split()
  394. # Preverimo modaliteto volumna (DICOM metapodatki)
  395. #modality = slicer.dicomDatabase.fileValue(storageNode.GetFileName(), "0008,0060") #prazen
  396. #modality = volumeNode.GetAttribute("DICOM.Modality") #None
  397. #modality = slicer.dicomDatabase.fileValue(uid, "0008,0060") # Modality #prazen
  398. #modality = slicer.dicomDatabase.fileValue(dicomUIDsList[0], "0008,0060") #prazen
  399. modality = shNode.GetItemAttribute(imageItem, "DICOM.Modality") #deluje!
  400. #print(modality)
  401. dimensions = volumeNode.GetImageData().GetDimensions()
  402. spacing = volumeNode.GetSpacing()
  403. #print(f"Volume {volumeNode.GetName()} - Dimenzije: {dimensions}, Spacing: {spacing}")
  404. if modality != "CT":
  405. print("Ni CT slika")
  406. continue # Preskoči, če ni CT
  407. # Preveri, če volume obstaja v sceni
  408. if not slicer.mrmlScene.IsNodePresent(volumeNode):
  409. print(f"Volume {volumeName} not present in the scene.")
  410. continue
  411. # Preverimo proizvajalca (DICOM metapodatki)
  412. manufacturer = shNode.GetItemAttribute(imageItem, 'DICOM.Manufacturer')
  413. #manufacturer = volumeNode.GetAttribute("DICOM.Manufacturer")
  414. #manufacturer = slicer.dicomDatabase.fileValue(uid, "0008,0070")
  415. #print(manufacturer)
  416. # Določimo, ali gre za CBCT ali CT
  417. if "varian" in manufacturer.lower() or "elekta" in manufacturer.lower():
  418. cbct_list.append(volumeName)
  419. scan_type = "CBCT"
  420. yesCbct = True
  421. print("CBCT")
  422. else: # Siemens ali Philips
  423. ct_list.append(volumeName)
  424. scan_type = "CT"
  425. yesCbct = False
  426. print("CT")
  427. # Detekcija točk v volumnu
  428. grouped_points = detect_points_region_growing(volumeName, yesCbct, intensity_threshold=3000)
  429. #print(f"Populating volume_points_dict with key ('{scan_type}', '{volumeName}')")
  430. volume_points_dict[(scan_type, volumeName)] = grouped_points
  431. #print(volume_points_dict) # Check if the key is correctly added
  432. # Če imamo oba tipa volumna (CBCT in CT) **znotraj istega studyja**
  433. if cbct_list and ct_list:
  434. ct_volume_name = ct_list[0] # Uporabi prvi CT kot referenco
  435. ct_points = [centroid for centroid, _ in volume_points_dict[("CT", ct_volume_name)]]
  436. if len(ct_points) < 3:
  437. print(f"CT volumen {ct_volume_name} nima dovolj točk za registracijo.")
  438. else:
  439. for cbct_volume_name in cbct_list:
  440. cbct_points = [centroid for centroid, _ in volume_points_dict[("CBCT", cbct_volume_name)]]
  441. print(f"\nProcessing CBCT Volume: {cbct_volume_name}")
  442. if len(cbct_points) < 3:
  443. print(f"CBCT Volume '{cbct_volume_name}' nima dovolj točk za registracijo.")
  444. continue
  445. # Shranjevanje razdalj
  446. distances_ct_cbct = []
  447. distances_internal = {"A-B": [], "B-C": [], "C-A": []}
  448. cbct_points_array = np.array(cbct_points) # Pretvorba v numpy array
  449. ct_volume_node = slicer.util.getNode(ct_volume_name)
  450. cbct_volume_node = slicer.util.getNode(cbct_volume_name)
  451. ct_spacing = ct_volume_node.GetSpacing() # (x_spacing, y_spacing, z_spacing)
  452. cbct_spacing = cbct_volume_node.GetSpacing() # (x_spacing, y_spacing, z_spacing)
  453. ct_scale_factor = np.array(ct_spacing) # Spacing za CT (x, y, z)
  454. cbct_scale_factor = np.array(cbct_spacing) # Spacing za CBCT (x, y, z)
  455. print(ct_scale_factor, cbct_scale_factor)
  456. # Sortiramo točke po Z-koordinati (ali X/Y, če raje uporabljaš drugo os)
  457. cbct_points_sorted = cbct_points_array[np.argsort(cbct_points_array[:, 2])]
  458. # Razdalje med CT in CBCT (SORTIRANE točke!)
  459. d_ct_cbct = np.linalg.norm(cbct_points_sorted - ct_points, axis=1)
  460. distances_ct_cbct.append(d_ct_cbct)
  461. # Razdalje med točkami znotraj SORTIRANIH cbct_points
  462. d_ab = np.linalg.norm(cbct_points_sorted[0] - cbct_points_sorted[1])
  463. d_bc = np.linalg.norm(cbct_points_sorted[1] - cbct_points_sorted[2])
  464. d_ca = np.linalg.norm(cbct_points_sorted[2] - cbct_points_sorted[0])
  465. # Sortiramo razdalje po velikosti, da so vedno v enakem vrstnem redu
  466. sorted_distances = sorted([d_ab, d_bc, d_ca])
  467. distances_internal["A-B"].append(sorted_distances[0])
  468. distances_internal["B-C"].append(sorted_distances[1])
  469. distances_internal["C-A"].append(sorted_distances[2])
  470. # **Shrani razdalje v globalne sezname**
  471. prostate_size_est.append({"Study": studyName, "Distances": sorted_distances})
  472. ctcbct_distance.append({"Study": studyName, "Distances": list(distances_ct_cbct[-1])}) # Pretvorimo v seznam
  473. # Izberi metodo glede na uporabnikov izbor
  474. chosen_rotation_matrix = np.eye(3)
  475. chosen_translation_vector = np.zeros(3)
  476. if applyScaling:
  477. scaling_factors = compute_optimal_scaling_per_axis(cbct_points, ct_points)
  478. print("Skalirni faktorji: ", scaling_factors)
  479. cbct_points = compute_scaling(cbct_points, scaling_factors)
  480. if applyRotation:
  481. if selectedMethod == "Kabsch":
  482. chosen_rotation_matrix = compute_Kabsch_rotation(cbct_points, ct_points)
  483. elif selectedMethod == "Horn":
  484. chosen_rotation_matrix = compute_Horn_rotation(cbct_points, ct_points)
  485. elif selectedMethod == "Iterative Closest Point (Kabsch)":
  486. _, chosen_rotation_matrix, _ = icp_algorithm(cbct_points, ct_points)
  487. print("Rotation Matrix:\n", chosen_rotation_matrix)
  488. if applyTranslation:
  489. chosen_translation_vector = compute_translation(cbct_points, ct_points, chosen_rotation_matrix)
  490. print("Translation Vector:\n", chosen_translation_vector)
  491. # Ustvari vtkTransformNode in ga poveži z CBCT volumenom
  492. imeTransformNoda = cbct_volume_name + " Transform"
  493. transform_node = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTransformNode", imeTransformNoda)
  494. # Kreiraj transformacijo in jo uporabi
  495. vtk_transform = create_vtk_transform(chosen_rotation_matrix, chosen_translation_vector)
  496. transform_node.SetAndObserveTransformToParent(vtk_transform)
  497. # Pridobi CBCT volumen in aplikacijo transformacije
  498. cbct_volume_node = slicer.util.getNode(cbct_volume_name)
  499. cbct_volume_node.SetAndObserveTransformNodeID(transform_node.GetID())
  500. # Uporabi transformacijo na volumnu (fizična aplikacija)
  501. slicer.vtkSlicerTransformLogic().hardenTransform(cbct_volume_node)
  502. print("Transform uspešen na", cbct_volume_name)
  503. else:
  504. print(f"Study {studyItem} nima ustreznih CBCT in CT volumnov.")
  505. # Izpis globalne statistike
  506. print("Razdalje med CT in CBCTji: ", ctcbct_distance)
  507. print("Razdalje med markerji: ", prostate_size_est)
  508. # Define the file path for the CSV file
  509. file_path = os.path.join(os.path.dirname(__file__), "study_data.csv")
  510. # Write lists to the CSV file
  511. with open(file_path, mode='w', newline='') as file: #w za write, a za append
  512. writer = csv.writer(file)
  513. # Write headers
  514. writer.writerow(["Prostate Size", "CT-CBCT Distance"])
  515. # Write data rows
  516. for i in range(len(prostate_size_est)):
  517. writer.writerow([prostate_size_est[i], ctcbct_distance[i]])