import numpy as np
import torch
from diffdrr.drr import DRR
from diffdrr.data import load_example_ct
from diffdrr.visualization import plot_drr
Timing versus DRR size
# Read in the volume
= load_example_ct()
volume, spacing
# Get parameters for the detector
= np.array(volume.shape) * np.array(spacing) / 2
bx, by, bz = {
detector_kwargs "sdr" : 0.1,
"theta" : np.pi,
"phi" : 0,
"gamma" : np.pi / 2,
"bx" : bx,
"by" : by,
"bz" : bz,
}
= 100
height
= DRR(volume, spacing, height=height, delx=4.0).to("cuda" if torch.cuda.is_available() else "cpu")
drr
del drr
8.73 ms ± 134 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)
= 200
height
= DRR(volume, spacing, height=height, delx=4.0).to("cuda" if torch.cuda.is_available() else "cpu")
drr
del drr
28.7 ms ± 97.6 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
= 300
height
= DRR(volume, spacing, height=height, delx=4.0).to("cuda" if torch.cuda.is_available() else "cpu")
drr
del drr
62.1 ms ± 81.2 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
= 400
height
= DRR(volume, spacing, height=height, delx=4.0).to("cuda" if torch.cuda.is_available() else "cpu")
drr
del drr
109 ms ± 78.6 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
= 500
height
= DRR(volume, spacing, height=height, delx=4.0).to("cuda" if torch.cuda.is_available() else "cpu")
drr
del drr
OutOfMemoryError: CUDA out of memory. Tried to allocate 1.22 GiB (GPU 0; 10.76 GiB total capacity; 4.39 GiB already allocated; 1.05 GiB free; 7.13 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF