import pyvista
import torch
from diffdrr.drr import DRR
from diffdrr.visualization import drr_to_mesh, img_to_mesh
from diffpose.deepfluoro import DeepFluoroDataset
from diffpose.visualization import fiducials_to_mesh, lines_to_mesh
3D camera pose geometry
Using PyVista to visualize the 3D geometry of the projection setup
Note
3D plotting in Jupyter can be annoying to set up, not to mention getting it to work on a remote server. Be sure to follow all instructions in the PyVista documentation to resolve common bugs.
If running Jupyter remotely, run the following…
pyvista.start_xvfb()
pyvista.global_theme.trame.server_proxy_enabled = True
pyvista.global_theme.trame.server_proxy_prefix = "/proxy/"
"trame")
pyvista.set_jupyter_backend(= torch.device("cuda" if torch.cuda.is_available() else "cpu") device
# Initialize DiffDRR for Patient 1
= 6.410714285714286
SUBSAMPLE = DeepFluoroDataset(id_number=1)
specimen = int((1536 - 100) / SUBSAMPLE)
height = 0.194 * SUBSAMPLE
delx
= DRR(
drr
specimen.volume,
specimen.spacing,=specimen.focal_len / 2,
sdr=height,
height=delx,
delx=specimen.x0,
x0=specimen.y0,
y0=True,
reverse_x_axis=2.5,
bone_attenuation_multiplier ).to(device)
= specimen[69]
_, pose = pose.get_rotation().to(device)
rotations = pose.get_translation().to(device) translations
# Extract a mesh from the CT
= drr_to_mesh(drr, method="surface_nets", threshold=145, verbose=True)
ct
# Make meshes for the camera and detector plane and
# convert the DRR into a texture for the detector plane
= img_to_mesh(
camera, detector, texture, principal_ray "matrix"
drr, rotations, translations,
)
# Compute the locations of 3D fiducials and projected 2D fiducials
= fiducials_to_mesh(
fiducials_3d, fiducials_2d
specimen,
rotations,
translations,=detector,
detector="matrix",
parameterization
)
# Draw lines from the camera to the 2D fiducials
= lines_to_mesh(camera, fiducials_2d) lines
Performing Labeled Surface Extraction: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████[00:01<00:00]
Finding and Labeling Connected Regions.: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████[00:00<00:00]
Smoothing Mesh using Taubin Smoothing: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████[00:04<00:00]
Filling Holes: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████[00:00<00:00]
Cleaning: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████[00:00<00:00]
Rendering a single X-ray in an interactive window
Exporting a notebook to HTML does not also automatically catch the PyVista plots, so the interactive plot has been manually saved to HTML and loaded.
= pyvista.Plotter()
plotter
plotter.add_mesh(ct)=True)
plotter.add_mesh(camera, show_edges=texture)
plotter.add_mesh(detector, texture="red")
plotter.add_mesh(principal_ray, color
plotter.add_mesh(
fiducials_3d,="blueviolet",
color=7.5,
point_size=True,
render_points_as_spheres
)
plotter.add_mesh(
fiducials_2d,="lime",
color=5,
point_size=True,
render_points_as_spheres
)for line in lines:
="lime")
plotter.add_mesh(line, color
plotter.add_axes()
plotter.add_bounding_box()
# plotter.show() # If running Jupyter locally
# plotter.show(jupyter_backend="server") # If running Jupyter remotely
"render.html") plotter.export_html(
from IPython.display import IFrame
"render.html", height=500, width=749) IFrame(
Rendering multiple X-rays in a static window
# Initialize the plot with the CT and 3D fiducials (shared across all plots)
= pyvista.Plotter()
plotter
plotter.add_mesh(ct)
plotter.add_mesh(
fiducials_3d,="blueviolet",
color=7.5,
point_size=True,
render_points_as_spheres
)
# Render a subset of the X-rays
for idx, color in zip([0, 2, 69, 100], ["#1b9e77", "#d95f02", "#7570b3", "#e7298a"]):
= specimen[idx]
_, pose = pose.get_rotation().to(device)
rotations = pose.get_translation().to(device)
translations
= img_to_mesh(
camera, detector, texture, _ ="matrix"
drr, rotations, translations, parameterization
)= fiducials_to_mesh(
_, fiducials_2d =detector, parameterization="matrix"
specimen, rotations, translations, detector
)= lines_to_mesh(camera, fiducials_2d)
lines
=True, line_width=3)
plotter.add_mesh(camera, show_edges=texture)
plotter.add_mesh(detector, texture
plotter.add_mesh(
fiducials_2d,=color,
color=5,
point_size=True,
render_points_as_spheres
)for line in lines:
=color)
plotter.add_mesh(line, color
plotter.add_axes()
plotter.add_bounding_box()="static") plotter.show(jupyter_backend