Processes an example of our cine MRI dataset.

Several assumptions are made regarding the acquisition protocol.

  • Cartesian cine dataset.

  • GRAPPA acceleration expected.

  • Phase Partial Fourier allowed.

This data is collected using cardiac triggering, which means that there are different numbers time points for each of the lines of k-space

Takes in raw k-space data (from .dat file) Fills k-space using GRAPPA Completes partial Fourier recon Resizes so all images are the same shape (in x and y)

Then we take this ‘truth’ data, and perform synthetic undersampling (gridded using a radial trajectory) To form multi-coil, complex 2D+t paired training data for deep de-aliasing

In this tutorial the dat file has been read and the data stored as h5 array, but the code to read dat files is commented out

import dataclasses
import functools
import pathlib

import h5py
import numpy as np
import tensorflow as tf
import tensorflow_mri as tfmri

import matplotlib.pyplot as plt
import matplotlib.animation

import operator

# This is necessary if reading from .dat files
from tensorflow_mri.python.io import twix_io
2025-01-21 22:41:32.166912: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F AVX512_VNNI FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2025-01-21 22:41:32.282165: I tensorflow/core/util/util.cc:169] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
2025-01-21 22:41:32.308835: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered

We need to read the raw data from the scanner (In siemens this is stored as a .dat file)

In this tutorial I have run this on a .dat file and saved the output in a h5 file to ensure confidentiality

#This cell contains functions that are only necessary if you are reading .dat files

def _get_loop_counters_as_tuple(header, dimensions=None, ignore_segments=True):
  if ignore_segments:
    header.loop_counters.seg = 0

  if dimensions is not None:
    # If a list of dimensions was specified, get only the requested dimensions,
    # after checking that none of the ignored dimensions are non-zero.
    for name, value in dataclasses.asdict(header.loop_counters).items():
      if value != 0 and name not in dimensions:
        raise ValueError(
            f"Unexpected non-zero value for loop counter `{name}`: "
            f"{header.loop_counters}")
    return operator.attrgetter(*dimensions)(header.loop_counters)

  # Return all loop counters (reversed).
  return tuple(reversed(dataclasses.astuple(header.loop_counters)))

def _get_kspace_shape(meas, dimensions=None, ignore_segments=True):
  first_header = None
  kspace_shape = _get_loop_counters_as_tuple(
      meas.scans[0].header, dimensions=dimensions,
      ignore_segments=ignore_segments)

  for scan in meas.scans[1:]:
    if scan.header.eval_info_mask.ACQEND:
      break
    if _should_skip_scan(scan.header):
      continue
    if first_header is None:
      first_header = scan.header

    kspace_shape = np.maximum(
        kspace_shape, _get_loop_counters_as_tuple(
            scan.header, dimensions=dimensions,
            ignore_segments=ignore_segments))

  kspace_shape += 1
  kspace_shape = tuple(kspace_shape)
  kspace_shape += (first_header.used_channels, first_header.samples_in_scan)
  return kspace_shape

def _should_skip_scan(header):
  # Skip scans with these flags.
  skip_flags = ['SYNCDATA', 'RETRO_DUMMYSCAN', 'NOISEADJSCAN']
  return any(getattr(header.eval_info_mask, flag) for flag in skip_flags)

def _dataclass_diff_summary(a, b):
  # Check that object is a dataclass instance.
  # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass
  if not dataclasses.is_dataclass(a) or isinstance(a, type):
    raise TypeError(f"Input must be a dataclass instance, but got: {a}")

  if a.__class__ != b.__class__:
    raise TypeError(
        f"Classes must have the same type: {a.__class__} != {b.__class__}")

  # Find differences.
  # diff = {}
  summary = []
  for field in dataclasses.fields(a):
    value_a = getattr(a, field.name)
    value_b = getattr(b, field.name)
    if value_a != value_b:
      if dataclasses.is_dataclass(field.type):
        # Recurse.
        temp = _dataclass_diff_summary(value_a, value_b)
        # Indent.
        temp = '\n'.join(map(lambda x: '  ' + x, temp.splitlines()))
        summary.append(f"{field.name}:\n{temp}")
      else:
        summary.append(f"{field.name}: {value_a} != {value_b}")

  # Create string.
  return '\n'.join(summary)

def get_kspace_array_from_twix_measurement(meas,
                                           output_dimensions=None,
                                           ignore_segments=True,
                                           create_mask=False,
                                           keep_scan_fn=None):

  kspace_shape = _get_kspace_shape(meas, dimensions=output_dimensions,
                                   ignore_segments=ignore_segments)
  kspace = np.zeros(kspace_shape, dtype=np.complex64)
  if create_mask:
    mask = np.zeros(kspace_shape[:-2], dtype=np.bool_)

  # We use this to keep track of what indices we've already filled.
  accu_counters = {}

  # Scans with one of these flags will be ignored.
  for scan in meas.scans:
    if scan.header.eval_info_mask.ACQEND:
      break
    if _should_skip_scan(scan.header):
      continue
    if keep_scan_fn is not None and not keep_scan_fn(scan.header):
      continue

    counters = _get_loop_counters_as_tuple(
          scan.header, dimensions=output_dimensions,
          ignore_segments=ignore_segments)

    if counters in accu_counters:
      raise ValueError(
          f"Duplicate index counters: {counters}. Do you need another dimension?\n\n"
          f"Current scan header:\n{scan.header}\n\n"
          f"Previous scan header:\n{accu_counters[counters]}\n\n"
          f"Diff (current, previous):\n{_dataclass_diff_summary(scan.header, accu_counters[counters])}")
    accu_counters[counters] = scan.header

    for channel_idx, channel in enumerate(scan.channels):
      kspace[counters + (channel_idx,)] = channel.data.numpy()
      if create_mask:
        mask[counters] = True

  if create_mask:
    return kspace, mask
  return kspace

def read_raw_data_file(input_file):
    # Read TWIX file.
    contents = tf.io.read_file(str(input_file))
    twix = tfmri.io.parse_twix(contents)

    meas = twix.measurements[-1]
    meas_prot = meas.protocol['Meas']

    PARTIAL_FOURIER_FACTOR = {
        0x01: 0.5,
        0x02: 5.0 / 8.0,
        0x04: 6.0 / 8.0,
        0x08: 7.0 / 8.0,
        0x10: 1.0
    }
    # The following are actually used in the computation.
    phase_pf = PARTIAL_FOURIER_FACTOR[meas_prot.MEAS.sKSpace.ucPhasePartialFourier.value]

    for scan in meas.scans:
        if _should_skip_scan(scan.header):
            continue
        kspace_centre_line = scan.header.kspace_centre_line_no
        break

    # Loop k-space data.
    OUTPUT_DIMENSIONS = ['slice', 'phase', 'line']
    kspace, kspace_mask = get_kspace_array_from_twix_measurement(
        meas, output_dimensions=OUTPUT_DIMENSIONS,
        ignore_segments=True, create_mask=True)
    # kspace: [slices, phases, lines, coils, columns]
    # kspace_mask: [slices, phases, lines]

    # Loop calibration data.
    def keep_scan_fn(header):
        flags = header.eval_info_mask
        return (flags.PATREFSCAN or flags.PATREFANDIMASCAN)
    calib, calib_mask = get_kspace_array_from_twix_measurement(
        meas, output_dimensions=OUTPUT_DIMENSIONS,
        ignore_segments=True, create_mask=True,
        keep_scan_fn=keep_scan_fn)
    # calib: [slices, phases, lines, coils, columns]
    # calib_mask: [slices, phases, lines]    


    return phase_pf, kspace_centre_line, kspace, kspace_mask, calib, calib_mask

The below cell is only needed if you are reading from a .dat file In this tutorial the .dat file has been read using these functions and the necessary outputs have been stored in a h5 file

#This cell is only needed if reading from a .dat file

# input_file =pathlib.Path('/workspaces/Tutorials/DataFile.dat')
# input_file = pathlib.Path(input_file)
# phase_pf, kspace_centre_line, kspace, kspace_mask, calib, calib_mask = read_raw_data_file(input_file)  

# # write these results to h5 for the purposes of the tutorial
# f = h5py.File('/workspaces/Tutorials/exampleRawDataCine.hdf5', 'w')
# f.create_dataset('phase_pf', data = phase_pf)
# f.create_dataset('kspace_centre_line', data = kspace_centre_line)
# f.create_dataset('kspace', data = kspace)
# f.create_dataset('kspace_mask', data = kspace_mask)
# f.create_dataset('calib', data = calib)
# f.create_dataset('calib_mask', data = calib_mask)
# Use this cell if reading from the pre-processed h5 file

import gdown

url = 'https://drive.google.com/uc?id=1GAM91x54VsKmepImKkza0Lh_kUjQ-oOj'
output = '/tools/docs/tutorials/recon/exampleRawDataCine.hdf5.zip'
gdown.download(url, output, quiet=False)

import zipfile
with zipfile.ZipFile("/tools/docs/tutorials/recon/exampleRawDataCine.hdf5.zip","r") as zip_ref:
    zip_ref.extractall("/tools/docs/tutorials/recon/")

input_file =pathlib.Path('/tools/docs/tutorials/recon/exampleRawDataCine.hdf5')

input_file = pathlib.Path(input_file)
f = h5py.File(input_file, 'r')
list(f.keys())

phase_pf            = f['phase_pf'][()]             # This is the parial fourier factor in the phase direction
kspace_centre_line  = f['kspace_centre_line'][()]   # This is the line index which represents the centre of kspace
kspace              = f['kspace']                   # This is the kspace data fourier factor in the phase direction in the format # [slices, cardiac_phases, y, coils, x]
kspace_mask         = f['kspace_mask']              # This is a 1's and 0's mask which determines which lines of k-space were acquired
calib               = f['calib']                    # This is the calibration (ACS) lines from the GRAPPA acquisition
calib_mask          = f['calib_mask']               # This is a 1's and 0's mask which determines which lines of k-space the ACS lines represent
kspace.shape
# [slices, cardiac_phases, y, coils, x]
# (1, 45, 190, 30, 544)
(1, 45, 190, 30, 544)

The cine data which we often use for training is from a Cartesian bSSFP sequence It has GRAPPA x2, with 24 ACS lines And partial fourier in the phase direction of ~75%

The data is acquired in a breath-hold, and is segmented This means that the data is collected over a number of RR intervals, where a different set of lines of k=space are collected in each R-wave

However variations in heart-rate mean that some R-waves are longer than others, and when combining the line data together for form multiple cardiac phases, there are lines which have more cardiac phases than others

# This functions is necessary to deal with the varying number of cardiac_phases for each line of kspace

def _fill_missing_lines_in_last_phases(kspace, mask, threshold):
  # The last few phases may have some empty lines. If the proportion of missing
  # lines is more than `threshold`, just drop the entire phase. Otherwise, fill
  # the missing lines with data from the previous phase.
  expected_lines = np.count_nonzero(mask[0, 0])
  expected_lines_mask = mask[0, 0]
  # Count number of acquired lines for each slice and phase.
  acquired_lines = np.count_nonzero(mask, axis=-1)
  # Find which phases are to be kept (1D boolean array, true if phase has
  # enough lines).
  phases_to_keep = np.all(
      acquired_lines >= expected_lines * (1.0 - threshold), axis=0)
  # Find the number of phases as the first False entry in the phases_to_keep
  # array.
  phases_to_discard_indices = np.nonzero(~phases_to_keep)[0]
  if len(phases_to_discard_indices) == 0:
    # There are no incomplete phases.
    first_discarded_phase = None
    num_phases = len(phases_to_keep)
  else:
    first_discarded_phase = phases_to_discard_indices[0]
    num_phases = first_discarded_phase
  kspace = kspace[:, :num_phases, ...]
  mask = mask[:, :num_phases, ...]
  # Now fill the missing lines with data from the previous phase.
  for slice_index, phase_index in np.ndindex(mask.shape[:2]):
    # Lines mask for this slice and phase.
    lines_mask = mask[slice_index, phase_index]

    # Fill missing lines with data from the previous phase.
    missing_lines_mask = np.logical_and(
        expected_lines_mask, np.logical_not(lines_mask))
    mask[slice_index, phase_index][missing_lines_mask] = True
    kspace[slice_index, phase_index][missing_lines_mask] = \
        kspace[slice_index, phase_index - 1][missing_lines_mask]

  return kspace, mask
THRESHOLD_MISSING_LINES_KSPACE = 0.3
THRESHOLD_MISSING_LINES_CALIB = 0.1
 
# Fill in missing lines.
kspace, kspace_mask = _fill_missing_lines_in_last_phases(
    kspace, kspace_mask, threshold=THRESHOLD_MISSING_LINES_KSPACE)

calib, calib_mask = _fill_missing_lines_in_last_phases(
    calib, calib_mask, threshold=THRESHOLD_MISSING_LINES_CALIB)

# Keep the lowest number of phases between `kspace` and `calib`.
phases = np.minimum(kspace.shape[1], calib.shape[1]) 
kspace = kspace[:, :phases, ...]
kspace_mask = kspace_mask[:, :phases, ...]
calib = calib[:, :phases, ...]
calib_mask = calib_mask[:, :phases, ...]
max_line = kspace_mask.shape[-1]

if phase_pf == 1.0:
  pe_lines = max_line
  assert kspace_centre_line == (max_line + 1) // 2
else:
  pe_lines = kspace_centre_line * 2  # Assuming even number of lines.
  phase_pf = max_line / pe_lines

  phase_pf = float(phase_pf)

The raw data is collected with both GRAPPA and PArtial Fourer acquistion. To use this data as our ‘truth’ we want to have fully sampled data. Therefore we need to perform both GRAPPA and partial fourier reconstruction to fill in the missing lines of k-space

# This cell contains all of the functionality to convert the k-space (with PF and GRAPPA) to a fully sampled dataset 

def _reconstruct_slice(kspace, kspace_mask, calib, calib_mask, phase_pf=1.0):

  # Create masked kspace and calib.
  kspace_lines_mask = kspace_mask[0]
  masked_kspace = kspace[:, kspace_lines_mask]
  calib_lines_mask = calib_mask[0]
  masked_calib = calib[:, calib_lines_mask]

  # Transpose.
  masked_kspace = np.transpose(masked_kspace, (0, 2, 1, 3))  # [phases, coils, lines, columns]
  masked_calib = np.transpose(masked_calib, (0, 2, 1, 3))  # [phases, coils, lines, columns]

  # Finally, make 2D mask (lines, columns).
  mask = np.transpose(np.tile(kspace_mask[0, ...], (masked_kspace.shape[-1], 1)))

  # Although tfmri.recon.grappa supports batches, it is quite likely to run out
  # of memory. Therefore we perform GRAPPA one phase at a time.
  num_coils = masked_kspace.shape[-3]
  def _single_phase_grappa(inputs):
    single_phase_kspace, single_phase_calib = inputs

    #print('single_phase_kspace: ', single_phase_kspace.shape, ' , mask:', mask.shape, ' , single_phase_calib: ',single_phase_calib.shape)
    #print('single_phase_kspace: ', single_phase_kspace.dtype, ' , mask:', mask.dtype, ' , single_phase_calib: ',single_phase_calib.dtype)
    # 
    # single_phase_kspace:  (30, 117, 544)  , mask: (190, 544)  , single_phase_calib:  (30, 44, 544)
    # single_phase_kspace:  <dtype: 'complex64'>  , mask: bool  , single_phase_calib:  <dtype: 'complex64'>

    return tfmri.recon.grappa(single_phase_kspace, mask, single_phase_calib,
                              return_kspace=True)
  kspace = tf.map_fn(_single_phase_grappa, [masked_kspace, masked_calib],
                     fn_output_signature=tf.TensorSpec(
                         shape=tf.TensorShape([num_coils]).concatenate(mask.shape),
                         dtype=tf.complex64))

  if phase_pf != 1.0:
    # kspace = tf.pad(kspace, [[0, 0], [0, 0], [0, 44], [0, 0]])
    def _single_phase_pf(inputs):
      return tfmri.recon.partial_fourier(
          inputs, [phase_pf, 1.0], method='homodyne',
          preserve_phase=True, return_kspace=True)

    _single_phase_pf(kspace)
    kspace = tf.map_fn(_single_phase_pf, kspace)

  return kspace

Here we call the function that performs the GRAPPA and PF recon for our dataset

# Reconstruct all slices and phases.
fully_sampled_kspace = np.vectorize(functools.partial(_reconstruct_slice,
                                        phase_pf=phase_pf),
                      otypes=['complex64'],
                      signature='(phs, lin, cha, col),(phs, lin),(phs, lin, cha, col),(phs, lin)->(phs, cha, out_lin, col)')(
    kspace, kspace_mask, calib, calib_mask)

print('Resulting fully-sampled fully_sampled_kspace.shape:', fully_sampled_kspace.shape)

# and now save fully_sampled_kspace as fully sampled dataset which can be used for training
# output_file = input_file.with_suffix('.h5')
2025-01-21 22:41:41.705654: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX2 AVX512F AVX512_VNNI FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2025-01-21 22:41:42.621475: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 436 MB memory:  -> device: 0, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:65:00.0, compute capability: 8.6
2025-01-21 22:41:42.622945: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1616] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 21746 MB memory:  -> device: 1, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:b3:00.0, compute capability: 8.6
2025-01-21 22:41:52.943571: W tensorflow/core/common_runtime/bfc_allocator.cc:479] Allocator (GPU_0_bfc) ran out of memory trying to allocate 1B (rounded to 256)requested by op SameWorkerRecvDone
If the cause is memory fragmentation maybe the environment variable 'TF_GPU_ALLOCATOR=cuda_malloc_async' will improve the situation. 
Current allocation summary follows.
Current allocation summary follows.
2025-01-21 22:41:52.943644: I tensorflow/core/common_runtime/bfc_allocator.cc:1033] BFCAllocator dump for GPU_0_bfc
2025-01-21 22:41:52.943658: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (256): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943663: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (512): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943672: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (1024): 	Total Chunks: 1, Chunks in use: 1. 1.2KiB allocated for chunks. 1.2KiB in use in bin. 1.0KiB client-requested in use in bin.
2025-01-21 22:41:52.943677: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (2048): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943682: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (4096): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943686: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (8192): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943690: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (16384): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943694: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (32768): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943698: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (65536): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943702: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (131072): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943707: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (262144): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943711: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (524288): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943715: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (1048576): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943719: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (2097152): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943723: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (4194304): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943727: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (8388608): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943731: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (16777216): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943741: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (33554432): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943745: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (67108864): 	Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2025-01-21 22:41:52.943752: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (134217728): 	Total Chunks: 1, Chunks in use: 1. 130.64MiB allocated for chunks. 130.64MiB in use in bin. 115.05MiB client-requested in use in bin.
2025-01-21 22:41:52.943757: I tensorflow/core/common_runtime/bfc_allocator.cc:1040] Bin (268435456): 	Total Chunks: 1, Chunks in use: 1. 305.92MiB allocated for chunks. 305.92MiB in use in bin. 305.92MiB client-requested in use in bin.
2025-01-21 22:41:52.943762: I tensorflow/core/common_runtime/bfc_allocator.cc:1056] Bin for 256B was 256B, Chunk State: 
2025-01-21 22:41:52.943766: I tensorflow/core/common_runtime/bfc_allocator.cc:1069] Next region of size 457768960
2025-01-21 22:41:52.943779: I tensorflow/core/common_runtime/bfc_allocator.cc:1089] InUse at 7f64d8000000 of size 320785920 next 1
2025-01-21 22:41:52.943783: I tensorflow/core/common_runtime/bfc_allocator.cc:1089] InUse at 7f64eb1ece00 of size 1280 next 2
2025-01-21 22:41:52.943787: I tensorflow/core/common_runtime/bfc_allocator.cc:1089] InUse at 7f64eb1ed300 of size 136981760 next 18446744073709551615
2025-01-21 22:41:52.943791: I tensorflow/core/common_runtime/bfc_allocator.cc:1094]      Summary of in-use Chunks by size: 
2025-01-21 22:41:52.943797: I tensorflow/core/common_runtime/bfc_allocator.cc:1097] 1 Chunks of size 1280 totalling 1.2KiB
2025-01-21 22:41:52.943802: I tensorflow/core/common_runtime/bfc_allocator.cc:1097] 1 Chunks of size 136981760 totalling 130.64MiB
2025-01-21 22:41:52.943806: I tensorflow/core/common_runtime/bfc_allocator.cc:1097] 1 Chunks of size 320785920 totalling 305.92MiB
2025-01-21 22:41:52.943810: I tensorflow/core/common_runtime/bfc_allocator.cc:1101] Sum Total of in-use chunks: 436.56MiB
2025-01-21 22:41:52.943814: I tensorflow/core/common_runtime/bfc_allocator.cc:1103] total_region_allocated_bytes_: 457768960 memory_limit_: 457768960 available bytes: 0 curr_region_allocation_bytes_: 915537920
2025-01-21 22:41:52.943823: I tensorflow/core/common_runtime/bfc_allocator.cc:1109] Stats: 
Limit:                       457768960
InUse:                       457768960
MaxInUse:                    457768960
NumAllocs:                           3
MaxAllocSize:                320785920
Reserved:                            0
PeakReserved:                        0
LargestFreeBlock:                    0

2025-01-21 22:41:52.943828: W tensorflow/core/common_runtime/bfc_allocator.cc:491] *************************************************************************************************xxx
---------------------------------------------------------------------------
ResourceExhaustedError                    Traceback (most recent call last)
Cell In[9], line 2
      1 # Reconstruct all slices and phases.
----> 2 fully_sampled_kspace = np.vectorize(functools.partial(_reconstruct_slice,
      3                                         phase_pf=phase_pf),
      4                       otypes=['complex64'],
      5                       signature='(phs, lin, cha, col),(phs, lin),(phs, lin, cha, col),(phs, lin)->(phs, cha, out_lin, col)')(
      6     kspace, kspace_mask, calib, calib_mask)
      8 print('Resulting fully-sampled fully_sampled_kspace.shape:', fully_sampled_kspace.shape)
     10 # and now save fully_sampled_kspace as fully sampled dataset which can be used for training
     11 # output_file = input_file.with_suffix('.h5')

File /usr/local/lib/python3.8/dist-packages/numpy/lib/function_base.py:2328, in vectorize.__call__(self, *args, **kwargs)
   2325     vargs = [args[_i] for _i in inds]
   2326     vargs.extend([kwargs[_n] for _n in names])
-> 2328 return self._vectorize_call(func=func, args=vargs)

File /usr/local/lib/python3.8/dist-packages/numpy/lib/function_base.py:2402, in vectorize._vectorize_call(self, func, args)
   2400 """Vectorized call to `func` over positional `args`."""
   2401 if self.signature is not None:
-> 2402     res = self._vectorize_call_with_signature(func, args)
   2403 elif not args:
   2404     res = func()

File /usr/local/lib/python3.8/dist-packages/numpy/lib/function_base.py:2442, in vectorize._vectorize_call_with_signature(self, func, args)
   2439 nout = len(output_core_dims)
   2441 for index in np.ndindex(*broadcast_shape):
-> 2442     results = func(*(arg[index] for arg in args))
   2444     n_results = len(results) if isinstance(results, tuple) else 1
   2446     if nout != n_results:

Cell In[8], line 32, in _reconstruct_slice(kspace, kspace_mask, calib, calib_mask, phase_pf)
     24   #print('single_phase_kspace: ', single_phase_kspace.shape, ' , mask:', mask.shape, ' , single_phase_calib: ',single_phase_calib.shape)
     25   #print('single_phase_kspace: ', single_phase_kspace.dtype, ' , mask:', mask.dtype, ' , single_phase_calib: ',single_phase_calib.dtype)
     26   # 
     27   # single_phase_kspace:  (30, 117, 544)  , mask: (190, 544)  , single_phase_calib:  (30, 44, 544)
     28   # single_phase_kspace:  <dtype: 'complex64'>  , mask: bool  , single_phase_calib:  <dtype: 'complex64'>
     30   return tfmri.recon.grappa(single_phase_kspace, mask, single_phase_calib,
     31                             return_kspace=True)
---> 32 kspace = tf.map_fn(_single_phase_grappa, [masked_kspace, masked_calib],
     33                    fn_output_signature=tf.TensorSpec(
     34                        shape=tf.TensorShape([num_coils]).concatenate(mask.shape),
     35                        dtype=tf.complex64))
     37 if phase_pf != 1.0:
     38   # kspace = tf.pad(kspace, [[0, 0], [0, 0], [0, 44], [0, 0]])
     39   def _single_phase_pf(inputs):

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/util/deprecation.py:629, in deprecated_arg_values.<locals>.deprecated_wrapper.<locals>.new_func(*args, **kwargs)
    622           _PRINTED_WARNING[(func, arg_name)] = True
    623         logging.warning(
    624             'From %s: calling %s (from %s) with %s=%s is deprecated and '
    625             'will be removed %s.\nInstructions for updating:\n%s',
    626             _call_location(), decorator_utils.get_qualified_name(func),
    627             func.__module__, arg_name, arg_value, 'in a future version'
    628             if date is None else ('after %s' % date), instructions)
--> 629 return func(*args, **kwargs)

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/util/deprecation.py:561, in deprecated_args.<locals>.deprecated_wrapper.<locals>.new_func(*args, **kwargs)
    553         _PRINTED_WARNING[(func, arg_name)] = True
    554       logging.warning(
    555           'From %s: calling %s (from %s) with %s is deprecated and will '
    556           'be removed %s.\nInstructions for updating:\n%s',
   (...)
    559           'in a future version' if date is None else ('after %s' % date),
    560           instructions)
--> 561 return func(*args, **kwargs)

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/map_fn.py:640, in map_fn_v2(fn, elems, dtype, parallel_iterations, back_prop, swap_memory, infer_shape, name, fn_output_signature)
    638 if fn_output_signature is None:
    639   fn_output_signature = dtype
--> 640 return map_fn(
    641     fn=fn,
    642     elems=elems,
    643     fn_output_signature=fn_output_signature,
    644     parallel_iterations=parallel_iterations,
    645     back_prop=back_prop,
    646     swap_memory=swap_memory,
    647     infer_shape=infer_shape,
    648     name=name)

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/util/deprecation.py:561, in deprecated_args.<locals>.deprecated_wrapper.<locals>.new_func(*args, **kwargs)
    553         _PRINTED_WARNING[(func, arg_name)] = True
    554       logging.warning(
    555           'From %s: calling %s (from %s) with %s is deprecated and will '
    556           'be removed %s.\nInstructions for updating:\n%s',
   (...)
    559           'in a future version' if date is None else ('after %s' % date),
    560           instructions)
--> 561 return func(*args, **kwargs)

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/map_fn.py:498, in map_fn(fn, elems, dtype, parallel_iterations, back_prop, swap_memory, infer_shape, name, fn_output_signature)
    493   tas = [
    494       ta.write(i, value) for (ta, value) in zip(tas, result_value_batchable)
    495   ]
    496   return (i + 1, tas)
--> 498 _, r_a = control_flow_ops.while_loop(
    499     lambda i, _: i < n,
    500     compute, (i, result_batchable_ta),
    501     parallel_iterations=parallel_iterations,
    502     back_prop=back_prop,
    503     swap_memory=swap_memory,
    504     maximum_iterations=n)
    505 result_batchable = [r.stack() for r in r_a]
    507 # Update each output tensor w/ static shape info about the outer dimension.

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/control_flow_ops.py:2761, in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
   2757 packed = False  # whether the body result was packed into a 1-item tuple
   2759 loop_var_structure = nest.map_structure(type_spec.type_spec_from_value,
   2760                                         list(loop_vars))
-> 2761 while cond(*loop_vars):
   2762   loop_vars = body(*loop_vars)
   2763   if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/control_flow_ops.py:2752, in while_loop.<locals>.<lambda>(i, lv)
   2749 else:
   2750   loop_vars = (counter, loop_vars)
   2751   cond = lambda i, lv: (  # pylint: disable=g-long-lambda
-> 2752       math_ops.logical_and(i < maximum_iterations, orig_cond(*lv)))
   2753   body = lambda i, lv: (i + 1, orig_body(*lv))
   2754 try_to_pack = False

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/map_fn.py:499, in map_fn.<locals>.<lambda>(i, _)
    493   tas = [
    494       ta.write(i, value) for (ta, value) in zip(tas, result_value_batchable)
    495   ]
    496   return (i + 1, tas)
    498 _, r_a = control_flow_ops.while_loop(
--> 499     lambda i, _: i < n,
    500     compute, (i, result_batchable_ta),
    501     parallel_iterations=parallel_iterations,
    502     back_prop=back_prop,
    503     swap_memory=swap_memory,
    504     maximum_iterations=n)
    505 result_batchable = [r.stack() for r in r_a]
    507 # Update each output tensor w/ static shape info about the outer dimension.

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/math_ops.py:1875, in _promote_dtypes_decorator.<locals>.wrapper(x, y, *args, **kwargs)
   1873 def wrapper(x, y, *args, **kwargs):
   1874   x, y = maybe_promote_tensors(x, y)
-> 1875   return fn(x, y, *args, **kwargs)

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/ops/gen_math_ops.py:5113, in less(x, y, name)
   5111   return _result
   5112 except _core._NotOkStatusException as e:
-> 5113   _ops.raise_from_not_ok_status(e, name)
   5114 except _core._FallbackException:
   5115   pass

File /usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/ops.py:7209, in raise_from_not_ok_status(e, name)
   7207 def raise_from_not_ok_status(e, name):
   7208   e.message += (" name: " + name if name is not None else "")
-> 7209   raise core._status_to_exception(e) from None

ResourceExhaustedError: {{function_node __wrapped__Less_device_/job:localhost/replica:0/task:0/device:GPU:0}} SameWorkerRecvDone unable to allocate output tensor. Key: /job:localhost/replica:0/task:0/device:GPU:0;43649fa394554138;/job:localhost/replica:0/task:0/device:GPU:0;edge_4_Less;0:0
	 [[{{node Less/_6}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.
 [Op:Less]

Now we have our fully sampled k-space, however each subjust mau have a different size image/kspace So we reize (crop or pad) the data so that all trainig data sets are the same size

# In training we often want to make all the data the same size

new_base_resolution = 256
image_shape = tf.constant([new_base_resolution, new_base_resolution], dtype=tf.int32)

fully_sampled_kspace = tf.cast(np.squeeze(fully_sampled_kspace), dtype = tf.complex64)
    
# Crop to fixed image size.
ground_truth_image = tfmri.signal.ifft(fully_sampled_kspace, axes=[-2, -1], shift=True)
ground_truth_image = tfmri.resize_with_crop_or_pad(ground_truth_image, image_shape)

# This is the FINAL 'truth; image
# complex multicoil image data
print(ground_truth_image.shape)
#(21, 30, 256, 256)

# Get number of phases in this image.
num_phases = ground_truth_image.shape[0]
(21, 30, 256, 256)
# And lets visualise
plt.rcParams["animation.html"] = "jshtml"
plt.rcParams['figure.dpi'] = 150  
plt.ioff()
fig, ax = plt.subplots()

cCoil = 0
t= np.linspace(0,num_phases)
def animate(t):
    plt.imshow(np.squeeze(tf.math.abs(ground_truth_image[t,cCoil,:,:])), cmap = 'gray')
    plt.title('Ground truth images (shown for one coil only)')

matplotlib.animation.FuncAnimation(fig, animate, frames=num_phases)
../../../_images/e6edbc07c947e37ae6c6d16a3b612f7e756ae15ad3aa1bc610f69429a7579e8e.png

At this point we have the ground truth k-space and image data

However, often we want to create paired undersampled data when performing ML.

In this example we will create radially undersampled cine data (multi-coil, complex) Here we use the parameters from the cmplex_multicoilUnet tutorial (radial)

# There is more informtion about calculating trajectories and gridding in the nufftTutorial
radial_views = 13 
    
# Compute trajectory.
trajectory = tfmri.sampling.radial_trajectory(base_resolution=new_base_resolution,
                                                views=radial_views,
                                                phases=phases,
                                                ordering='sorted_half')
# Compute density.
density = tfmri.sampling.estimate_radial_density(trajectory)

# Flatten trajectory and density.
trajectory = tfmri.sampling.flatten_trajectory(trajectory)
density = tfmri.sampling.flatten_density(density)

Final_image = tf.einsum('...tchw->...cthw', ground_truth_image) #check dims

print(Final_image.shape)
print(trajectory.shape)
#(30, 21, 256, 256)
#(21, 6656, 2)

# Final_image should be [time, coil, x, y]] 
# traj should # [time, nPtsPerRadial*nRadials, 2]]

radial_kspace = tfmri.signal.nufft(Final_image, trajectory,
                              transform_type='type_2',
                              fft_direction='forward')
radial_kspace /= tf.math.sqrt(
      tf.cast(tf.math.reduce_prod(image_shape), tf.complex64))
(30, 21, 256, 256)
(21, 6656, 2)

Now we have created out undersampled multi-coil complex radial k-space data, we need to grid it to get the paired images for training

# Now lets go back into image space
# Apply density compensation.
radial_kspace /= tf.cast(density, tf.complex64)

# and do 2D+t NUFFT to create the image from the undersampled radial kspace 
zfill_image = tfmri.signal.nufft(radial_kspace, trajectory,
                           grid_shape=image_shape, # Note that this is still [256, 256]
                           transform_type='type_1',
                           fft_direction='backward')

print('multicoil_time_recon.shape: ', zfill_image.shape)
# (12, 20, 256, 256)

def normalise(image):
    scale = tf.math.reduce_max(tf.math.abs(image))
    image = image / tf.cast(scale, image.dtype)
    return image
zfill_image = normalise(zfill_image)

# For use in training we will store as stacked real/imaginary data rather than as complex data
def convert_to_real(complex_tensor):
  # Get the real and imaginary parts
  real_part = tf.math.real(complex_tensor)
  imag_part = tf.math.imag(complex_tensor)

  stacked = tf.concat([real_part, imag_part], axis=0)
  return stacked

# This is only used here for visualisation, but if you were training a magnetide only UNet you would also need to combne the coil eleemnts like this
zfill_cc_for_vis = np.abs(tfmri.coils.combine_coils(zfill_image, coil_axis=-4))

# This is the format that the final image is taken in as for the UNet
zfill_image = convert_to_real(zfill_image)#USE FOR MULTICOIL

# The target imahe is often a coil-combined, magnitude image (even if the input is multi-coil, complex), so the following lines are necessary to create the final 'ground truth' input used by the model
target_coill_combined_image = tfmri.coils.combine_coils(Final_image, coil_axis=-4)
target_coill_combined_image = np.abs(normalise(target_coill_combined_image))

# Note that both the input and the ourput for the network are normalised between [0 - 1] before being used by the network

# And lets visualise
plt.rcParams["animation.html"] = "jshtml"
plt.rcParams['figure.dpi'] = 150  
plt.ioff()
fig, ax = plt.subplots()

cCoil = 0
t= np.linspace(0,num_phases)
def animate2(t):
    plt.imshow(InputOutputPair[t,:,:], cmap = 'gray')
    plt.title(' Image In and Image Out (shown for one combined coils)')

InputOutputPair = np.concatenate([target_coill_combined_image, zfill_cc_for_vis], axis=1)
matplotlib.animation.FuncAnimation(fig, animate2, frames=num_phases)
multicoil_time_recon.shape:  (30, 21, 256, 256)
print(image.shape)