diff --git a/qsirecon/interfaces/__init__.py b/qsirecon/interfaces/__init__.py index 2bea6997..50a466bb 100644 --- a/qsirecon/interfaces/__init__.py +++ b/qsirecon/interfaces/__init__.py @@ -3,4 +3,3 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: from .bids import DerivativesDataSink from .images import ConformDwi, ValidateImage -from .reports import AboutSummary, SubjectSummary diff --git a/qsirecon/interfaces/bids.py b/qsirecon/interfaces/bids.py index a4ed9f46..be638ebf 100644 --- a/qsirecon/interfaces/bids.py +++ b/qsirecon/interfaces/bids.py @@ -118,7 +118,7 @@ class DerivativesDataSink(SimpleInterface): >>> from pathlib import Path >>> import tempfile - >>> from qsirecon.utils.bids import collect_data + >>> from qsiprep.utils.bids import collect_data >>> tmpdir = Path(tempfile.mkdtemp()) >>> tmpfile = tmpdir / 'a_temp_file.nii.gz' >>> tmpfile.open('w').close() # "touch" the file diff --git a/qsirecon/interfaces/dsi_studio.py b/qsirecon/interfaces/dsi_studio.py index 0568f8e7..f4c2ec28 100644 --- a/qsirecon/interfaces/dsi_studio.py +++ b/qsirecon/interfaces/dsi_studio.py @@ -5,7 +5,6 @@ from copy import deepcopy from glob import glob from pathlib import Path -from subprocess import PIPE, Popen import nibabel as nb import nipype.interfaces.utility as niu @@ -122,48 +121,6 @@ def _list_outputs(self): return outputs -class _DSIStudioQCOutputSpec(TraitedSpec): - qc_txt = File(exists=True, desc="Text file with QC measures") - - -class DSIStudioQC(SimpleInterface): - output_spec = _DSIStudioQCOutputSpec - - def _run_interface(self, runtime): - # DSI Studio (0.12.2) action=qc has two modes, depending on wether the - # input is a file (src.gz|nii.gz)|(fib.gz) or a directory. For - # directories, the action will be run on a number of detected files - # (which *cannot* be symbolic links for some reason). - src_file = fname_presuffix(self.inputs.src_file, newpath=runtime.cwd) - cmd = ["dsi_studio", "--action=qc", "--source=" + src_file] - proc = Popen(cmd, cwd=runtime.cwd, stdout=PIPE, stderr=PIPE) - out, err = proc.communicate() - if out: - LOGGER.info(out.decode()) - if err: - LOGGER.critical(err.decode()) - self._results["qc_txt"] = op.join(runtime.cwd, "qc.txt") - return runtime - - -class _DSIStudioSrcQCInputSpec(DSIStudioCommandLineInputSpec): - src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio src[.gz] file") - - -class DSIStudioSrcQC(DSIStudioQC): - input_spec = _DSIStudioSrcQCInputSpec - ext = ".src.gz" - - -class _DSIStudioFibQCInputSpec(DSIStudioCommandLineInputSpec): - src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio fib[.gz] file") - - -class DSIStudioFibQC(DSIStudioQC): - input_spec = _DSIStudioFibQCInputSpec - ext = ".fib.gz" - - # Step 2 reonstruct ODFs class DSIStudioReconstructionInputSpec(DSIStudioCommandLineInputSpec): input_src_file = File( @@ -683,57 +640,6 @@ def _run_interface(self, runtime): return runtime -class _DSIStudioQCMergeInputSpec(BaseInterfaceInputSpec): - src_qc = File(exists=True, mandatory=True) - fib_qc = File(exists=True, mandatory=True) - - -class _DSIStudioQCMergeOutputSpec(TraitedSpec): - qc_file = File(exists=True) - - -class DSIStudioMergeQC(SimpleInterface): - input_spec = _DSIStudioQCMergeInputSpec - output_spec = _DSIStudioQCMergeOutputSpec - - def _run_interface(self, runtime): - output_csv = runtime.cwd + "/merged_qc.csv" - src_qc = load_src_qc_file(self.inputs.src_qc) - fib_qc = load_fib_qc_file(self.inputs.fib_qc) - src_qc.update(fib_qc) - qc_df = pd.DataFrame(src_qc) - qc_df.to_csv(output_csv, index=False) - self._results["qc_file"] = output_csv - return runtime - - -class _DSIStudioBTableInputSpec(BaseInterfaceInputSpec): - bval_file = File(exists=True, mandatory=True) - bvec_file = File(exists=True, mandatory=True) - bvec_convention = traits.Enum( - ("DIPY", "FSL"), - usedefault=True, - desc="Convention used for bvecs. FSL assumes LAS+ no matter image orientation", - ) - - -class _DSIStudioBTableOutputSpec(TraitedSpec): - btable_file = File(exists=True) - - -class DSIStudioBTable(SimpleInterface): - input_spec = _DSIStudioBTableInputSpec - output_spec = _DSIStudioBTableOutputSpec - - def _run_interface(self, runtime): - if self.inputs.bvec_convention != "DIPY": - raise NotImplementedError("Only DIPY Bvecs supported for now") - btab_file = op.join(runtime.cwd, "btable.txt") - btable_from_bvals_bvecs(self.inputs.bval_file, self.inputs.bvec_file, btab_file) - self._results["btable_file"] = btab_file - return runtime - - class _AutoTrackInputSpec(DSIStudioCommandLineInputSpec): fib_file = File(exists=True, mandatory=True, copyfile=False, argstr="--source=%s") map_file = File(exists=True, copyfile=False) @@ -885,53 +791,6 @@ def stat_txt_to_df(stat_txt_file, bundle_name): return bundle_stats -def load_src_qc_file(fname, prefix=""): - with open(fname, "r") as qc_file: - qc_data = qc_file.readlines() - data = qc_data[1] - parts = data.strip().split("\t") - dwi_contrast = np.nan - ndc_masked = np.nan - if len(parts) == 7: - _, dims, voxel_size, dirs, max_b, ndc, bad_slices = parts - elif len(parts) == 8: - _, dims, voxel_size, dirs, max_b, _, ndc, bad_slices = parts - elif len(parts) == 9: - _, dims, voxel_size, dirs, max_b, dwi_contrast, ndc, ndc_masked, bad_slices = parts - else: - raise Exception("Unknown QC File format") - - voxelsx, voxelsy, voxelsz = map(float, voxel_size.strip().split()) - dimx, dimy, dimz = map(float, dims.strip().split()) - n_dirs = float(dirs.split("/")[1]) - max_b = float(max_b) - dwi_corr = float(ndc) - n_bad_slices = float(bad_slices) - ndc_masked = float(ndc_masked) - dwi_contrast = float(dwi_contrast) - data = { - prefix + "dimension_x": [dimx], - prefix + "dimension_y": [dimy], - prefix + "dimension_z": [dimz], - prefix + "voxel_size_x": [voxelsx], - prefix + "voxel_size_y": [voxelsy], - prefix + "voxel_size_z": [voxelsz], - prefix + "max_b": [max_b], - prefix + "neighbor_corr": [dwi_corr], - prefix + "masked_neighbor_corr": [ndc_masked], - prefix + "dwi_contrast": [dwi_contrast], - prefix + "num_bad_slices": [n_bad_slices], - prefix + "num_directions": [n_dirs], - } - return data - - -def load_fib_qc_file(fname): - with open(fname, "r") as fibqc_f: - lines = [line.strip().split() for line in fibqc_f] - return {"coherence_index": [float(lines[1][-1])]} - - def btable_from_bvals_bvecs(bval_file, bvec_file, output_file): """Create a b-table from DIPY-style bvals/bvecs. diff --git a/qsirecon/interfaces/gradients.py b/qsirecon/interfaces/gradients.py index 0b785dd3..2be923b2 100644 --- a/qsirecon/interfaces/gradients.py +++ b/qsirecon/interfaces/gradients.py @@ -1,7 +1,6 @@ """Handle merging and spliting of DSI files.""" import logging -import os import nibabel as nb import numpy as np @@ -9,18 +8,14 @@ from nipype.interfaces.base import ( BaseInterfaceInputSpec, File, - InputMultiObject, - OutputMultiObject, SimpleInterface, TraitedSpec, isdefined, traits, ) from nipype.utils.filemanip import fname_presuffix -from sklearn.metrics import r2_score LOGGER = logging.getLogger("nipype.interface") -tensor_index = {"xx": (0, 0), "xy": (0, 1), "xz": (0, 2), "yy": (1, 1), "yz": (1, 2), "zz": (2, 2)} class RemoveDuplicatesInputSpec(BaseInterfaceInputSpec): @@ -111,103 +106,6 @@ def is_unique_sample(vec): return runtime -class SliceQCInputSpec(BaseInterfaceInputSpec): - uncorrected_dwi_files = InputMultiObject(File(exists=True), desc="uncorrected dwi files") - ideal_image_files = InputMultiObject(File(exists=True), desc="model-based images") - mask_image = File(exists=True, desc="brain mask") - impute_slice_threshold = traits.Float(0.0, desc="threshold for using imputed data in a slice") - min_slice_size_percentile = traits.CFloat( - 10.0, - usedefault=True, - desc="slices bigger than " "this percentile are candidates for imputation.", - ) - - -class SliceQCOutputSpec(TraitedSpec): - imputed_images = OutputMultiObject(File(exists=True), desc="dwi files with imputed slices") - slice_stats = File(exists=True, desc="npy file with the slice-by-TR error matrix") - - -class SliceQC(SimpleInterface): - input_spec = SliceQCInputSpec - output_spec = SliceQCOutputSpec - - def _run_interface(self, runtime): - ideal_image_files = self.inputs.ideal_image_files - uncorrected_image_files = self.inputs.uncorrected_dwi_files - - self._results["imputed_images"] = self.inputs.uncorrected_dwi_files - output_npz = os.path.join(runtime.cwd, "slice_stats.npz") - mask_img = nb.load(self.inputs.mask_image) - mask = mask_img.get_fdata() > 0 - masked_slices = (mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :]).astype( - int - ) - slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True) - min_size = np.percentile(slice_counts, self.inputs.min_slice_size_percentile) - too_small = slice_nums[slice_counts < min_size] - for small_slice in too_small: - masked_slices[masked_slices == small_slice] = 0 - valid_slices = slice_nums[slice_counts > min_size] - valid_slices = valid_slices[valid_slices > 0] - slice_scores = [] - wb_xcorrs = [] - wb_r2s = [] - # If impute slice threshold==0 or hmc_model=="none" - if isdefined(ideal_image_files): - for ideal_image, input_image in zip(ideal_image_files, uncorrected_image_files): - slices, wb_xcorr, wb_r2 = _score_slices( - ideal_image, input_image, masked_slices, valid_slices - ) - slice_scores.append(slices) - wb_xcorrs.append(wb_xcorr) - wb_r2s.append(wb_r2) - else: - num_trs = len(uncorrected_image_files) - num_slices = mask_img.shape[2] - wb_xcorrs = np.zeros(num_trs) - wb_r2s = np.zeros(num_trs) - slice_scores = np.zeros((num_slices, num_trs)) - - np.savez( - output_npz, - slice_scores=slice_scores, - wb_r2s=np.array(wb_r2s), - wb_xcorrs=np.array(wb_xcorrs), - valid_slices=valid_slices, - masked_slices=masked_slices, - slice_nums=slice_nums, - slice_counts=slice_counts, - ) - self._results["slice_stats"] = output_npz - return runtime - - -def _score_slices(ideal_image, input_image, masked_slices, valid_slices): - """Compute similarity metrics on a pair of images.""" - - def crosscor(vec1, vec2): - v1bar = vec1 - vec1.mean() - v2bar = vec2 - vec2.mean() - return np.inner(v1bar, v2bar) ** 2 / (np.inner(v1bar, v1bar) * np.inner(v2bar, v2bar)) - - slice_scores = np.zeros(valid_slices.shape) - ideal_data = nb.load(ideal_image).get_fdata() - input_data = nb.load(input_image).get_fdata() - for nslice, slicenum in enumerate(valid_slices): - slice_mask = masked_slices == slicenum - ideal_slice = ideal_data[slice_mask] - data_slice = input_data[slice_mask] - slice_scores[nslice] = crosscor(ideal_slice, data_slice) - - global_mask = masked_slices > 0 - wb_ideal = ideal_data[global_mask] - wb_input = input_data[global_mask] - global_xcorr = crosscor(wb_input, wb_ideal) - global_r2 = r2_score(wb_input, wb_ideal) - return slice_scores, global_xcorr, global_r2 - - class ExtractB0sInputSpec(BaseInterfaceInputSpec): b0_indices = traits.List() bval_file = File(exists=True) @@ -278,19 +176,3 @@ def concatenate_bvecs(input_files): if not stacked.shape[1] == 3: stacked = stacked.T return stacked - - -def write_concatenated_fsl_gradients(bval_files, bvec_files, out_prefix): - bvec_file = out_prefix + ".bvec" - bval_file = out_prefix + ".bval" - stacked_bvecs = concatenate_bvecs(bvec_files) - np.savetxt(bvec_file, stacked_bvecs.T, fmt="%.8f", delimiter=" ") - concatenate_bvals(bval_files, bval_file) - return bval_file, bvec_file - - -def get_vector_nii(data, affine, header): - hdr = header.copy() - hdr.set_data_dtype(np.dtype(" -\t\t
  • Subject ID: {subject_id}
  • -\t\t
  • Structural images: {n_t1s:d} T1-weighted {t2w}
  • -\t\t
  • Diffusion-weighted series: inputs {n_dwis:d}, outputs {n_outputs:d}
  • -{groupings} -\t\t
  • Resampling targets: T1wACPC -\t\t
  • FreeSurfer reconstruction: {freesurfer_status}
  • -\t -""" - -DIFFUSION_TEMPLATE = """\t\t

    Summary

    -\t\t -{validation_reports} -""" - -ABOUT_TEMPLATE = """\t - -""" - -TOPUP_TEMPLATE = """\ -\t\t

    -\t\t{summary}

    -""" - -GROUPING_TEMPLATE = """\t -""" - INTERACTIVE_TEMPLATE = """ @@ -121,575 +61,6 @@ class SummaryOutputSpec(TraitedSpec): out_report = File(exists=True, desc="HTML segment containing summary") -class SummaryInterface(SimpleInterface): - output_spec = SummaryOutputSpec - - def _generate_segment(self): - raise NotImplementedError() - - def _run_interface(self, runtime): - segment = self._generate_segment() - fname = os.path.join(runtime.cwd, "report.html") - with open(fname, "w") as fobj: - fobj.write(segment) - self._results["out_report"] = fname - return runtime - - -class SubjectSummaryInputSpec(BaseInterfaceInputSpec): - t1w = InputMultiPath(File(exists=True), desc="T1w structural images") - t2w = InputMultiPath(File(exists=True), desc="T2w structural images") - subjects_dir = Directory(desc="FreeSurfer subjects directory") - subject_id = Str(desc="Subject ID") - dwi_groupings = traits.Dict(desc="groupings of DWI files and their output names") - output_spaces = traits.List(desc="Target spaces") - template = traits.Enum("MNI152NLin2009cAsym", desc="Template space") - - -class SubjectSummaryOutputSpec(SummaryOutputSpec): - # This exists to ensure that the summary is run prior to the first ReconAll - # call, allowing a determination whether there is a pre-existing directory - subject_id = Str(desc="FreeSurfer subject ID") - - -class SubjectSummary(SummaryInterface): - input_spec = SubjectSummaryInputSpec - output_spec = SubjectSummaryOutputSpec - - def _run_interface(self, runtime): - if isdefined(self.inputs.subject_id): - self._results["subject_id"] = self.inputs.subject_id - return super(SubjectSummary, self)._run_interface(runtime) - - def _generate_segment(self): - if not isdefined(self.inputs.subjects_dir): - freesurfer_status = "Not run" - else: - recon = fs.ReconAll( - subjects_dir=self.inputs.subjects_dir, - subject_id=self.inputs.subject_id, - T1_files=self.inputs.t1w, - flags="-noskullstrip", - ) - if recon.cmdline.startswith("echo"): - freesurfer_status = "Pre-existing directory" - else: - freesurfer_status = "Run by qsirecon" - - t2w_seg = "" - if self.inputs.t2w: - t2w_seg = "(+ {:d} T2-weighted)".format(len(self.inputs.t2w)) - - # Add text for how the dwis are grouped - n_dwis = 0 - n_outputs = 0 - groupings = "" - if isdefined(self.inputs.dwi_groupings): - for output_fname, group_info in self.inputs.dwi_groupings.items(): - n_outputs += 1 - files_desc = [] - files_desc.append( - "\t\t\t
  • Scan group: %s (PE Dir %s)
  • ") - groupings += GROUPING_TEMPLATE.format( - output_name=output_fname, input_files="\n".join(files_desc) - ) - - return SUBJECT_TEMPLATE.format( - subject_id=self.inputs.subject_id, - n_t1s=len(self.inputs.t1w), - t2w=t2w_seg, - n_dwis=n_dwis, - n_outputs=n_outputs, - groupings=groupings, - output_spaces="T1wACPC", - freesurfer_status=freesurfer_status, - ) - - -class DiffusionSummaryInputSpec(BaseInterfaceInputSpec): - distortion_correction = traits.Str( - desc="Susceptibility distortion correction method", mandatory=True - ) - pe_direction = traits.Enum( - None, "i", "i-", "j", "j-", mandatory=True, desc="Phase-encoding direction detected" - ) - distortion_correction = traits.Str(mandatory=True, desc="Method used for SDC") - impute_slice_threshold = traits.CFloat(desc="threshold for imputing a slice") - hmc_transform = traits.Str(mandatory=True, desc="transform used during HMC") - hmc_model = traits.Str(desc="model used for hmc") - b0_to_t1w_transform = traits.Enum("Rigid", "Affine", desc="Transform type for coregistration") - denoise_method = traits.Str(desc="method used for image denoising") - dwi_denoise_window = traits.Either( - traits.Int(), traits.Str(), desc="window size for dwidenoise" - ) - output_spaces = traits.List(desc="Target spaces") - confounds_file = File(exists=True, desc="Confounds file") - validation_reports = InputMultiObject(File(exists=True)) - - -class DiffusionSummary(SummaryInterface): - input_spec = DiffusionSummaryInputSpec - - def _generate_segment(self): - if self.inputs.pe_direction is None: - pedir = "MISSING - Assuming Anterior-Posterior" - else: - pedir = {"i": "Left-Right", "j": "Anterior-Posterior"}[self.inputs.pe_direction[0]] - - if isdefined(self.inputs.confounds_file): - with open(self.inputs.confounds_file) as cfh: - conflist = cfh.readline().strip("\n").strip() - else: - conflist = "" - - validation_summaries = [] - for summary in self.inputs.validation_reports: - with open(summary, "r") as summary_f: - validation_summaries.extend(summary_f.readlines()) - validation_summary = "\n".join(validation_summaries) - - return DIFFUSION_TEMPLATE.format( - pedir=pedir, - sdc=self.inputs.distortion_correction, - coregistration=self.inputs.b0_to_t1w_transform, - hmc_transform=self.inputs.hmc_transform, - hmc_model=self.inputs.hmc_model, - denoise_method=self.inputs.denoise_method, - denoise_window=self.inputs.dwi_denoise_window, - output_spaces="T1wACPC", - confounds=re.sub(r"[\t ]+", ", ", conflist), - impute_slice_threshold=self.inputs.impute_slice_threshold, - validation_reports=validation_summary, - ) - - -class AboutSummaryInputSpec(BaseInterfaceInputSpec): - version = Str(desc="qsirecon version") - command = Str(desc="qsirecon command") - # Date not included - update timestamp only if version or command changes - - -class AboutSummary(SummaryInterface): - input_spec = AboutSummaryInputSpec - - def _generate_segment(self): - return ABOUT_TEMPLATE.format( - version=self.inputs.version, - command=self.inputs.command, - date=time.strftime("%Y-%m-%d %H:%M:%S %z"), - ) - - -class TopupSummaryInputSpec(BaseInterfaceInputSpec): - summary = Str(desc="Summary of TOPUP inputs") - - -class TopupSummary(SummaryInterface): - input_spec = TopupSummaryInputSpec - - def _generate_segment(self): - return TOPUP_TEMPLATE.format(summary=self.inputs.summary) - - -class GradientPlotInputSpec(BaseInterfaceInputSpec): - orig_bvec_files = InputMultiObject( - File(exists=True), mandatory=True, desc="bvecs from DWISplit" - ) - orig_bval_files = InputMultiObject( - File(exists=True), mandatory=True, desc="bvals from DWISplit" - ) - source_files = traits.List(desc="source file for each gradient") - final_bvec_file = File(exists=True, desc="bval file") - - -class GradientPlotOutputSpec(SummaryOutputSpec): - plot_file = File(exists=True) - - -class GradientPlot(SummaryInterface): - input_spec = GradientPlotInputSpec - output_spec = GradientPlotOutputSpec - - def _run_interface(self, runtime): - outfile = os.path.join(runtime.cwd, "bvec_plot.gif") - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) - - orig_bvecs = concatenate_bvecs(self.inputs.orig_bvec_files) - bvals = concatenate_bvals(self.inputs.orig_bval_files, None) - if isdefined(self.inputs.source_files): - file_array = np.array(self.inputs.source_files) - _, filenums = np.unique(file_array, return_inverse=True) - else: - filenums = np.ones_like(bvals) - - # Account for the possibility that this is a PE Pair average - if len(filenums) == len(bvals) * 2: - filenums = filenums[: len(bvals)] - - # Plot the final bvecs if provided - final_bvecs = None - if isdefined(self.inputs.final_bvec_file): - final_bvecs = np.loadtxt(self.inputs.final_bvec_file).T - - plot_gradients(bvals, orig_bvecs, filenums, outfile, final_bvecs) - self._results["plot_file"] = outfile - return runtime - - -def plot_gradients(bvals, orig_bvecs, source_filenums, output_fname, final_bvecs=None, frames=60): - qrads = np.sqrt(bvals) - qvecs = qrads[:, np.newaxis] * orig_bvecs - qx, qy, qz = qvecs.T - maxvals = qvecs.max(0) - minvals = qvecs.min(0) - total_max = max(np.abs(maxvals).max(), np.abs(minvals).max()) - - def force_scaling(ax): - # trick to force equal aspect on all 3 axes - for direction in (-1, 1): - for point in np.diag(direction * total_max * np.array([1, 1, 1])): - ax.plot([point[0]], [point[1]], [point[2]], "w") - - def add_lines(ax): - labels = ["L", "P", "S"] - for axnum in range(3): - minvec = np.zeros(3) - maxvec = np.zeros(3) - minvec[axnum] = minvals[axnum] - maxvec[axnum] = maxvals[axnum] - x, y, z = np.column_stack([minvec, maxvec]) - ax.plot(x, y, z, color="k") - txt_pos = maxvec + 5 - ax.text(txt_pos[0], txt_pos[1], txt_pos[2], labels[axnum], size=8, zorder=1, color="k") - - if final_bvecs is not None: - if final_bvecs.shape[0] == 3: - final_bvecs = final_bvecs.T - fqx, fqy, fqz = (qrads[:, np.newaxis] * final_bvecs).T - fig, axes = plt.subplots( - nrows=1, ncols=2, figsize=(10, 5), subplot_kw={"projection": "3d"} - ) - orig_ax = axes[0] - final_ax = axes[1] - axes_list = [orig_ax, final_ax] - final_ax.scatter(fqx, fqy, fqz, c=source_filenums, marker="+") - orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+") - final_ax.axis("off") - add_lines(final_ax) - final_ax.set_title("After Preprocessing") - else: - fig, orig_ax = plt.subplots( - nrows=1, ncols=1, figsize=(10, 5), subplot_kw={"aspect": "equal", "projection": "3d"} - ) - axes_list = [orig_ax] - orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+") - orig_ax.axis("off") - orig_ax.set_title("Original Scheme") - add_lines(orig_ax) - force_scaling(orig_ax) - # Animate rotating the axes - rotate_amount = np.ones(frames) * 180 / frames - stay_put = np.zeros_like(rotate_amount) - rotate_azim = np.concatenate([rotate_amount, stay_put, -rotate_amount, stay_put]) - rotate_elev = np.concatenate([stay_put, rotate_amount, stay_put, -rotate_amount]) - plt.tight_layout() - - def rotate(i): - for ax in axes_list: - ax.azim += rotate_azim[i] - ax.elev += rotate_elev[i] - return tuple(axes_list) - - anim = animation.FuncAnimation(fig, rotate, frames=frames * 4, interval=20, blit=False) - anim.save(output_fname, writer="imagemagick", fps=32) - - plt.close(fig) - fig = None - - -def topup_selection_to_report( - selected_indices, original_files, spec_lookup, image_source="combined DWI series" -): - """Write a description of how the images were selected for TOPUP. - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-AP_dwi.nii.gz"] * 30 + ["sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by images 0, 15 from sub-1_dir-AP_dwi.nii.gz. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. " - - Or - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz and \ -image 0 from sub-1_dir-AP_run-2_dwi.nii.gz. Distortion group '0 -1 0 0.087' was represented \ -by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. - - >>> selected_indices = [0, 15, 30, 45, 60] - >>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-3_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-3_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz, \ -image 0 from sub-1_dir-AP_run-2_dwi.nii.gz and image 0 from sub-1_dir-AP_run-3_dwi.nii.gz. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-PA_dwi.nii.gz"] * 60 - >>> spec_lookup = {"sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 1 distortion group was included in the combined dwi data. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15, 30, 45 \ -from sub-1_dir-PA_dwi.nii.gz. - - """ - image_indices = defaultdict(list) - for imgnum, image in enumerate(original_files): - image_indices[image].append(imgnum) - - # Collect the original volume number within each source image - selected_per_image = defaultdict(list) - for b0_index in selected_indices: - b0_image = original_files[b0_index] - first_index = min(image_indices[b0_image]) - within_image_index = b0_index - first_index - selected_per_image[b0_image].append(within_image_index) - - # Collect the images and indices within each warp group - selected_per_warp_group = defaultdict(list) - for original_image, selection in selected_per_image.items(): - warp_group = spec_lookup[original_image] - selected_per_warp_group[warp_group].append((original_image, selection)) - - # Make the description - num_groups = len(selected_per_warp_group) - plural = "s" if num_groups > 1 else "" - plural2 = "were" if plural == "s" else "was" - desc = [ - "A total of {num_groups} distortion group{plural} {plural2} included in the " - "{image_source} data. ".format( - num_groups=num_groups, plural=plural, plural2=plural2, image_source=image_source - ) - ] - for distortion_group, image_list in selected_per_warp_group.items(): - group_desc = [ - "Distortion group '{spec}' was represented by ".format(spec=distortion_group) - ] - for image_name, image_indices in image_list: - formatted_indices = ", ".join(map(str, image_indices)) - plural = "s" if len(image_indices) > 1 else "" - group_desc += [ - "image{plural} {imgnums} from {img_name}".format( - plural=plural, imgnums=formatted_indices, img_name=op.split(image_name)[-1] - ), - ", ", - ] - group_desc[-1] = ". " - if len(image_list) > 1: - group_desc[-3] = " and " - desc += group_desc - - return "".join(desc) - - -class _SeriesQCInputSpec(BaseInterfaceInputSpec): - pre_qc = File(exists=True, desc="qc file from the raw data", mandatory=True) - t1_qc = File(exists=True, desc="qc file from preprocessed image in t1 space") - t1_qc_postproc = File(exists=True, desc="qc file from preprocessed image in template space") - confounds_file = File(exists=True, desc="confounds file", mandatory=True) - t1_mask_file = File(exists=True, desc="brain mask in t1 space") - t1_cnr_file = File(exists=True, desc="CNR file in t1 space") - t1_b0_series = File(exists=True, desc="time series of b=0 images") - t1_dice_score = traits.Float() - mni_dice_score = traits.Float() - output_file_name = traits.File() - - -class _SeriesQCOutputSpec(TraitedSpec): - series_qc_file = File(exists=True) - - -class SeriesQC(SimpleInterface): - input_spec = _SeriesQCInputSpec - output_spec = _SeriesQCOutputSpec - - def _run_interface(self, runtime): - image_qc = _load_qc_file(self.inputs.pre_qc, prefix="raw_") - if isdefined(self.inputs.t1_qc): - image_qc.update(_load_qc_file(self.inputs.t1_qc, prefix="t1_")) - if isdefined(self.inputs.t1_qc_postproc): - image_qc.update(_load_qc_file(self.inputs.t1_qc_postproc, prefix="t1post_")) - motion_summary = calculate_motion_summary(self.inputs.confounds_file) - image_qc.update(motion_summary) - - # Add in Dice scores if available - if isdefined(self.inputs.t1_dice_score): - image_qc["t1_dice_distance"] = [self.inputs.t1_dice_score] - if isdefined(self.inputs.mni_dice_score): - image_qc["mni_dice_distance"] = [self.inputs.mni_dice_score] - - if isdefined(self.inputs.t1_mask_file): - if isdefined(self.inputs.t1_cnr_file): - image_qc.update(get_cnr_values(self.inputs.t1_cnr_file, self.inputs.t1_mask_file)) - if isdefined(self.inputs.t1_b0_series): - # Add a function to get b=0 TSNR - pass - - # Get the metadata - output_file = self.inputs.output_file_name - image_qc["file_name"] = output_file - bids_info = get_bids_params(output_file) - image_qc.update(bids_info) - output = op.join(runtime.cwd, "dwi_qc.csv") - pd.DataFrame(image_qc).to_csv(output, index=False) - self._results["series_qc_file"] = output - return runtime - - -def _load_qc_file(fname, prefix=""): - qc_data = pd.read_csv(fname).to_dict(orient="records")[0] - renamed = dict([(prefix + key, value) for key, value in qc_data.items()]) - return renamed - - -def get_cnr_values(cnr_image, brain_mask): - cnr_img = nb.load(cnr_image) - mask_img = nb.load(brain_mask) - - # Determine which CNRs we's getting - num_cnrs = 1 if cnr_img.ndim == 3 else cnr_img.shape[3] - if num_cnrs == 1: - cnr_labels = ["CNR"] - else: - cnr_labels = ["CNR%d" % value for value in range(num_cnrs)] - - cnrs = {} - strategies = ["mean", "median", "standard_deviation"] - for strategy in strategies: - masker = NiftiLabelsMasker(mask_img, strategy=strategy, resampling_target="data") - cnr_values = masker.fit_transform(cnr_img).flatten() - for cnr_name, cnr_value in zip(cnr_labels, cnr_values): - cnrs[cnr_name + "_" + strategy] = cnr_value - - return cnrs - - -def motion_derivatives(translations, rotations, framewise_disp, original_files): - - def padded_diff(data): - out = np.zeros_like(data) - out[1:] = np.diff(data, axis=0) - return out - - drotations = padded_diff(rotations) - dtranslations = padded_diff(translations) - - # We don't want the relative values across the boundaries of runs. - # Determine which values should be ignored - file_labels, _ = pd.factorize(original_files) - new_files = padded_diff(file_labels) - - def file_masked(data): - masked_data = data.copy() - masked_data[new_files > 0] = 0 - return masked_data - - framewise_disp = file_masked(framewise_disp) - return { - "mean_fd": [framewise_disp.mean()], - "max_fd": [framewise_disp.max()], - "max_rotation": [file_masked(np.abs(rotations)).max()], - "max_translation": [file_masked(np.abs(translations)).max()], - "max_rel_rotation": [file_masked(np.abs(drotations)).max()], - "max_rel_translation": [file_masked(np.abs(dtranslations)).max()], - } - - -def calculate_motion_summary(confounds_tsv): - if not isdefined(confounds_tsv) or confounds_tsv is None: - return { - "mean_fd": [np.nan], - "max_fd": [np.nan], - "max_rotation": [np.nan], - "max_translation": [np.nan], - "max_rel_rotation": [np.nan], - "max_rel_translation": [np.nan], - } - df = pd.read_csv(confounds_tsv, delimiter="\t") - - # the default case where each output image comes from one input image - if "trans_x" in df.columns: - translations = df[["trans_x", "trans_y", "trans_z"]].values - rotations = df[["rot_x", "rot_y", "rot_z"]].values - return motion_derivatives( - translations, rotations, df["framewise_displacement"], df["original_file"] - ) - - # If there was a PE Pair averaging, get motion from both - motion1 = motion_derivatives( - df[["trans_x_1", "trans_y_1", "trans_z_1"]].values, - df[["rot_x_1", "rot_y_1", "rot_z_1"]].values, - df["framewise_displacement_1"], - df["original_file_1"], - ) - - motion2 = motion_derivatives( - df[["trans_x_2", "trans_y_2", "trans_z_2"]].values, - df[["rot_x_2", "rot_y_2", "rot_z_2"]].values, - df["framewise_displacement_2"], - df["original_file_2"], - ) - - # Combine the FDs from both PE directions - # both_fd = np.column_stack([m1, m2]) - # framewise_disp = both_fd[np.nanargmax(np.abs(both_fd), axis=1)] - def compare_series(key_name, comparator): - m1 = motion1[key_name][0] - m2 = motion2[key_name][0] - return [comparator(m1, m2)] - - return { - "mean_fd": compare_series("mean_fd", lambda a, b: (a + b) / 2), - "max_fd": compare_series("max_fd", max), - "max_rotation": compare_series("max_rotation", max), - "max_translation": compare_series("max_translation", max), - "max_rel_rotation": compare_series("max_rel_rotation", max), - "max_rel_translation": compare_series("max_rel_translation", max), - } - - class _InteractiveReportInputSpec(TraitedSpec): raw_dwi_file = File(exists=True, mandatory=True) processed_dwi_file = File(exists=True, mandatory=True) diff --git a/qsirecon/interfaces/tortoise.py b/qsirecon/interfaces/tortoise.py index dcab248f..0c48b5e2 100644 --- a/qsirecon/interfaces/tortoise.py +++ b/qsirecon/interfaces/tortoise.py @@ -8,36 +8,19 @@ import os.path as op import subprocess -import nibabel as nb import nilearn.image as nim -import numpy as np -import pandas as pd -from nipype.interfaces import ants from nipype.interfaces.base import ( BaseInterfaceInputSpec, CommandLine, CommandLineInputSpec, File, InputMultiObject, - OutputMultiObject, SimpleInterface, TraitedSpec, isdefined, traits, ) from nipype.utils.filemanip import fname_presuffix -from niworkflows.viz.utils import compose_view, cuts_from_bbox - -from ..viz.utils import plot_denoise -from .denoise import ( - SeriesPreprocReport, - SeriesPreprocReportInputSpec, - SeriesPreprocReportOutputSpec, -) -from .epi_fmap import get_best_b0_topup_inputs_from, safe_get_3d_image -from .fmap import get_distortion_grouping -from .gradients import write_concatenated_fsl_gradients -from .images import split_bvals_bvecs, to_lps LOGGER = logging.getLogger("nipype.interface") @@ -83,127 +66,6 @@ def run(self, **inputs): return super(TORTOISECommandLine, self).run(**inputs) -class _GatherDRBUDDIInputsInputSpec(TORTOISEInputSpec): - dwi_files = InputMultiObject(File(exists=True)) - original_files = InputMultiObject(File(exists=True)) - bval_files = traits.Either(InputMultiObject(File(exists=True)), File(exists=True)) - bvec_files = traits.Either(InputMultiObject(File(exists=True)), File(exists=True)) - original_files = InputMultiObject(File(exists=True)) - b0_threshold = traits.CInt(100, usedefault=True) - epi_fmaps = InputMultiObject( - File(exists=True), desc="files from fmaps/ for distortion correction" - ) - raw_image_sdc = traits.Bool(True, usedefault=True) - fieldmap_type = traits.Enum("epi", "rpe_series", mandatory=True) - dwi_series_pedir = traits.Enum("i", "i-", "j", "j-", "k", "k-", mandatory=True) - - -class _GatherDRBUDDIInputsOutputSpec(TraitedSpec): - blip_up_image = File(exists=True) - blip_up_bmat = File(exists=True) - blip_up_json = File(exists=True) - blip_down_image = File(exists=True) - blip_down_bmat = File(exists=True) - blip_assignments = traits.List() - report = traits.Str() - - -class GatherDRBUDDIInputs(SimpleInterface): - input_spec = _GatherDRBUDDIInputsInputSpec - output_spec = _GatherDRBUDDIInputsOutputSpec - - def _run_interface(self, runtime): - - # Write the metadata - up_json = op.join(runtime.cwd, "blip_up.json") - with open(up_json, "w") as up_jsonf: - up_jsonf.write('{"PhaseEncodingDirection": "%s"}\n' % self.inputs.dwi_series_pedir) - self._results["blip_up_json"] = up_json - - # Coerce the bvals and bvecs into lists of files - if isinstance(self.inputs.bval_files, list) and len(self.inputs.bval_files) == 1: - bval_files, bvec_files = split_bvals_bvecs( - self.inputs.bval_files[0], - self.inputs.bvec_files[0], - deoblique=False, - img_files=self.inputs.dwi_files, - working_dir=runtime.cwd, - ) - else: - bval_files, bvec_files = self.inputs.bval_files, self.inputs.bvec_files - - if self.inputs.fieldmap_type == "rpe_series": - ( - self._results["blip_assignments"], - self._results["blip_up_image"], - self._results["blip_up_bmat"], - self._results["blip_down_image"], - self._results["blip_down_bmat"], - ) = split_into_up_and_down_niis( - dwi_files=self.inputs.dwi_files, - bval_files=bval_files, - bvec_files=bvec_files, - original_images=self.inputs.original_files, - prefix=op.join(runtime.cwd, "drbuddi"), - make_bmat=True, - ) - - elif self.inputs.fieldmap_type == "epi": - # Use the same function that was used to get images for TOPUP, but get the images - # directly from the CSV - _, _, _, b0_csv, _, _ = get_best_b0_topup_inputs_from( - dwi_file=self.inputs.dwi_files, - bval_file=bval_files, - b0_threshold=self.inputs.b0_threshold, - cwd=runtime.cwd, - bids_origin_files=self.inputs.original_files, - epi_fmaps=self.inputs.epi_fmaps, - max_per_spec=True, - raw_image_sdc=self.inputs.raw_image_sdc, - ) - - b0s_df = pd.read_csv(b0_csv) - selected_images = b0s_df[b0s_df.selected_for_sdc].reset_index(drop=True) - up_row = selected_images.loc[0] - down_row = selected_images.loc[1] - up_img = to_lps(safe_get_3d_image(up_row.bids_origin_file, up_row.original_volume)) - up_img.set_data_dtype("float32") - down_img = to_lps( - safe_get_3d_image(down_row.bids_origin_file, down_row.original_volume) - ) - down_img.set_data_dtype("float32") - - # Save the images - blip_up_nii = op.join(runtime.cwd, "blip_up_b0.nii") - blip_down_nii = op.join(runtime.cwd, "blip_down_b0.nii") - up_img.to_filename(blip_up_nii) - down_img.to_filename(blip_down_nii) - self._results["blip_up_image"] = blip_up_nii - self._results["blip_down_image"] = blip_down_nii - self._results["blip_assignments"] = split_into_up_and_down_niis( - dwi_files=self.inputs.dwi_files, - bval_files=bval_files, - bvec_files=bvec_files, - original_images=self.inputs.original_files, - prefix=op.join(runtime.cwd, "drbuddi"), - make_bmat=False, - assignments_only=True, - ) - self._results["blip_up_bmat"] = write_dummy_bmtxt(blip_up_nii) - self._results["blip_down_bmat"] = write_dummy_bmtxt(blip_down_nii) - - return runtime - - -def write_dummy_bmtxt(nii_file): - new_fname = fname_presuffix(nii_file, suffix=".bmtxt", use_ext=False) - img = nim.load_img(nii_file) - nvols = 1 if img.ndim < 4 else img.ndim.shape[3] - with open(new_fname, "w") as bmtxt_f: - bmtxt_f.write("\n".join(["0 0 0 0 0 0"] * nvols) + "\n") - return new_fname - - class _DRBUDDIInputSpec(TORTOISEInputSpec): num_threads = traits.Int( desc="number of OMP threads", @@ -349,234 +211,6 @@ def _list_outputs(self): return outputs -class _DRBUDDIAggregateOutputsInputSpec(TORTOISEInputSpec): - blip_assignments = traits.List() - undistorted_reference = File(exists=True) - bdown_to_bup_rigid_trans_h5 = File(exists=True) - undistorted_reference = File(exists=True) - blip_down_b0 = File(exists=True) - blip_down_b0_corrected = File(exists=True) - blip_down_b0_corrected_jac = File(exists=True) - blip_down_b0_quad = File(exists=True) - blip_up_b0 = File(exists=True) - blip_up_b0_corrected = File(exists=True) - blip_up_b0_corrected_jac = File(exists=True) - blip_up_b0_quad = File(exists=True) - deformation_finv = File(exists=True, desc="blip up to b0_corrected") - deformation_minv = File(exists=True) - blip_up_FA = File(exists=True) - blip_down_FA = File(exists=True) - fieldmap_type = traits.Enum("epi", "rpe_series", mandatory=True) - structural_image = File(exists=True) - wm_seg = File(exists=True, desc="White matter segmentation image") - - -class _DRBUDDIAggregateOutputsOutputSpec(TraitedSpec): - # Aggregated outputs for convenience - sdc_warps = OutputMultiObject(File(exists=True)) - sdc_scaling_images = OutputMultiObject(File(exists=True)) - # Fieldmap outputs for the reports - up_fa_corrected_image = File(exists=True) - down_fa_corrected_image = File(exists=True) - # The best image for coregistration to the corrected DWI - b0_ref = File(exists=True) - - -class DRBUDDIAggregateOutputs(SimpleInterface): - input_spec = _DRBUDDIAggregateOutputsInputSpec - output_spec = _DRBUDDIAggregateOutputsOutputSpec - - def _run_interface(self, runtime): - - # If the structural image has been used, return that as the b0ref, otherwise - # it's the b0_corrected_final - self._results["b0_ref"] = ( - self.inputs.structural_image - if isdefined(self.inputs.structural_image) - else self.inputs.undistorted_reference - ) - - # there may be 2 transforms for the blip down data. If so, compose them - if isdefined(self.inputs.bdown_to_bup_rigid_trans_h5): - # combine the rigid with displacement - down_warp = op.join(runtime.cwd, "blip_down_composite.nii.gz") - xfm = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_down_b0, - transforms=[self.inputs.deformation_minv, self.inputs.bdown_to_bup_rigid_trans_h5], - reference_image=self.inputs.undistorted_reference, - output_image=down_warp, - print_out_composite_warp_file=True, - interpolation="LanczosWindowedSinc", - ) - xfm.terminal_output = "allatonce" - xfm.resource_monitor = False - _ = xfm.run() - else: - down_warp = self.inputs.deformation_minv - - # Calculate the scaling images - scaling_blip_up_file = op.join(runtime.cwd, "blip_up_scale.nii.gz") - scaling_blip_down_file = op.join(runtime.cwd, "blip_down_scale.nii.gz") - scaling_blip_up_img = nim.math_img( - "a/b", a=self.inputs.undistorted_reference, b=self.inputs.blip_up_b0_corrected - ) - scaling_blip_up_img.to_filename(scaling_blip_up_file) - scaling_blip_down_img = nim.math_img( - "a/b", a=self.inputs.undistorted_reference, b=self.inputs.blip_down_b0_corrected - ) - scaling_blip_down_img.to_filename(scaling_blip_down_file) - - self._results["sdc_warps"] = [ - self.inputs.deformation_finv if blip_dir == "up" else down_warp - for blip_dir in self.inputs.blip_assignments - ] - self._results["sdc_scaling_images"] = [ - scaling_blip_up_file if blip_dir == "up" else scaling_blip_down_file - for blip_dir in self.inputs.blip_assignments - ] - - if self.inputs.fieldmap_type == "rpe_series": - fa_up_warped = fname_presuffix( - self.inputs.blip_up_FA, newpath=runtime.cwd, suffix="_corrected" - ) - xfm_fa_up = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_up_FA, - transforms=[self.inputs.deformation_finv], - reference_image=self.inputs.undistorted_reference, - output_image=fa_up_warped, - interpolation="NearestNeighbor", - ) - xfm_fa_up.terminal_output = "allatonce" - xfm_fa_up.resource_monitor = False - xfm_fa_up.run() - - fa_down_warped = fname_presuffix( - self.inputs.blip_down_FA, newpath=runtime.cwd, suffix="_corrected" - ) - xfm_fa_down = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_down_FA, - transforms=[self.inputs.deformation_minv, self.inputs.bdown_to_bup_rigid_trans_h5], - reference_image=self.inputs.undistorted_reference, - output_image=fa_down_warped, - interpolation="NearestNeighbor", - ) - xfm_fa_down.terminal_output = "allatonce" - xfm_fa_down.resource_monitor = False - xfm_fa_down.run() - self._results["up_fa_corrected_image"] = fa_up_warped - self._results["down_fa_corrected_image"] = fa_down_warped - - return runtime - - -class _GibbsInputSpec(TORTOISEInputSpec, SeriesPreprocReportInputSpec): - """Gibbs input_nifti output_nifti kspace_coverage(1,0.875,0.75) - phase_encoding_dir nsh minW(optional) maxW(optional)""" - - in_file = traits.File(exists=True, mandatory=True, position=0, argstr="%s") - out_file = traits.File( - argstr="%s", - position=1, - name_source="in_file", - name_template="%s_unrung.nii", - use_extension=False, - ) - kspace_coverage = traits.Float(mandatory=True, position=2, argstr="%.4f") - phase_encoding_dir = traits.Enum( - 0, 1, mandatory=True, argstr="%d", position=3, desc="0: horizontal, 1:vertical" - ) - nsh = traits.Int(argstr="%d", position=4) - min_w = traits.Int() - mask = File() - num_threads = traits.Int(1, usedefault=True, nohash=True) - - -class _GibbsOutputSpec(SeriesPreprocReportOutputSpec): - out_file = File(exists=True) - - -class Gibbs(SeriesPreprocReport, TORTOISECommandLine): - input_spec = _GibbsInputSpec - output_spec = _GibbsOutputSpec - _cmd = "Gibbs" - - def _get_plotting_images(self): - input_dwi = nim.load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = nim.load_img(ref_name) - return input_dwi, denoised_nii, None - - def _generate_report(self): - """Generate a reportlet.""" - LOGGER.info("Generating denoising visual report") - - input_dwi, denoised_nii, _ = self._get_plotting_images() - - # find an image to use as the background - image_data = input_dwi.get_fdata() - image_intensities = np.array([img.mean() for img in image_data.T]) - lowb_index = int(np.argmax(image_intensities)) - highb_index = int(np.argmin(image_intensities)) - - # Original images - orig_lowb_nii = input_dwi.slicer[..., lowb_index] - orig_highb_nii = input_dwi.slicer[..., highb_index] - - # Denoised images - denoised_lowb_nii = denoised_nii.slicer[..., lowb_index] - denoised_highb_nii = denoised_nii.slicer[..., highb_index] - - # Find spatial extent of the image - contour_nii = mask_nii = None - if isdefined(self.inputs.mask): - contour_nii = nim.load_img(self.inputs.mask) - else: - mask_nii = nim.threshold_img(denoised_lowb_nii, 50) - cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) - - diff_lowb_nii = nb.Nifti1Image( - orig_lowb_nii.get_fdata() - denoised_lowb_nii.get_fdata(), - affine=denoised_lowb_nii.affine, - ) - diff_highb_nii = nb.Nifti1Image( - orig_highb_nii.get_fdata() - denoised_highb_nii.get_fdata(), - affine=denoised_highb_nii.affine, - ) - - # Call composer - compose_view( - plot_denoise( - denoised_lowb_nii, - denoised_highb_nii, - "moving-image", - estimate_brightness=True, - cuts=cuts, - label="De-Gibbs", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - plot_denoise( - diff_lowb_nii, - diff_highb_nii, - "fixed-image", - estimate_brightness=True, - cuts=cuts, - label="Estimated Ringing", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - out_file=self._out_report, - ) - - self._calculate_nmse(input_dwi, denoised_nii) - - class _TORTOISEConvertInputSpec(BaseInterfaceInputSpec): bval_file = File(exists=True, mandatory=True, copyfile=True) bvec_file = File(exists=True, mandatory=True, copyfile=True) @@ -861,130 +495,7 @@ class ComputeMAPMRI_NG(TORTOISEReconCommandLine): _suffix_map = {"ng_file": "_NG", "ngpar_file": "_NGpar", "ngperp_file": "_NGperp"} -def split_into_up_and_down_niis( - dwi_files, - bval_files, - bvec_files, - original_images, - prefix, - make_bmat=True, - assignments_only=False, -): - """Takes the concatenated output from pre_hmc_wf and split it into "up" and "down" - decompressed nifti files with float32 datatypes.""" - group_names, group_assignments = get_distortion_grouping(original_images) - - if not len(set(group_names)) == 2 and not assignments_only: - raise Exception("DRBUDDI requires exactly one blip up and one blip down") - - up_images = [] - up_bvals = [] - up_bvecs = [] - up_prefix = prefix + "_up_dwi" - up_dwi_file = up_prefix + ".nii" - up_bmat_file = up_prefix + ".bmtxt" - down_images = [] - down_bvals = [] - down_bvecs = [] - down_prefix = prefix + "_down_dwi" - down_dwi_file = down_prefix + ".nii" - down_bmat_file = down_prefix + ".bmtxt" - - # We know up is first because we concatenated them ourselves - up_group_name = group_assignments[0] - blip_assignments = [] - for dwi_file, bval_file, bvec_file, distortion_group in zip( - dwi_files, bval_files, bvec_files, group_assignments - ): - - if distortion_group == up_group_name: - up_images.append(dwi_file) - up_bvals.append(bval_file) - up_bvecs.append(bvec_file) - blip_assignments.append("up") - else: - down_images.append(dwi_file) - down_bvals.append(bval_file) - down_bvecs.append(bvec_file) - blip_assignments.append("down") - - if assignments_only: - return blip_assignments - - # Write the 4d up image - up_4d = nim.concat_imgs(up_images, dtype="float32", auto_resample=False) - up_4d.set_data_dtype("float32") - up_4d.to_filename(up_dwi_file) - up_bval_file, up_bvec_file = write_concatenated_fsl_gradients(up_bvals, up_bvecs, up_prefix) - - # Write the 4d down image - down_4d = nim.concat_imgs(down_images, dtype="float32", auto_resample=False) - down_4d.set_data_dtype("float32") - down_4d.to_filename(down_dwi_file) - down_bval_file, down_bvec_file = write_concatenated_fsl_gradients( - down_bvals, down_bvecs, down_prefix - ) - - # Send back FSL-style gradients - if not make_bmat: - return ( - blip_assignments, - up_dwi_file, - up_bval_file, - up_bvec_file, - down_dwi_file, - down_bval_file, - down_bvec_file, - ) - - # Convert to bmatrix text file - make_bmat_file(up_bval_file, up_bvec_file) - make_bmat_file(down_bval_file, down_bvec_file) - - return blip_assignments, up_dwi_file, up_bmat_file, down_dwi_file, down_bmat_file - - def make_bmat_file(bvals, bvecs): pout = subprocess.run(["FSLBVecsToTORTOISEBmatrix", op.abspath(bvals), op.abspath(bvecs)]) print(pout) return bvals.replace("bval", "bmtxt") - - -def generate_drbuddi_boilerplate(fieldmap_type, t2w_sdc, with_topup=False): - """Generate boilerplate that describes how DRBUDDI is being used.""" - - desc = ["\n\nDRBUDDI [@drbuddi], part of the TORTOISE [@tortoisev3] software package,"] - if not with_topup: - # Until now there will have been no description of the SDC procedure. - # Add extra details about the input data. - desc.append( - "was used to perform susceptibility distortion correction. " - "Data was collected with reversed phase-encode blips, resulting " - "in pairs of images with distortions going in opposite directions." - ) - else: - desc += ["was used to perform a second stage of distortion correction."] - - # Describe what's going on - if fieldmap_type == "epi": - desc.append( - "DRBUDDI used b=0 reference images with reversed " - "phase encoding directions to estimate" - ) - else: - desc.append( - "DRBUDDI used multiple motion-corrected DWI series acquired " - "with opposite phase encoding " - "directions. A b=0 image **and** the Fractional Anisotropy " - "images from both phase encoding diesctions were used together in " - "a multi-modal registration to estimate" - ) - desc.append("the susceptibility-induced off-resonance field.") - - if t2w_sdc: - desc.append("A T2-weighted image was included in the multimodal registration.") - desc.append( - "Signal intensity was adjusted " - "in the final interpolated images using a method similar to LSR.\n\n" - ) - return " ".join(desc) diff --git a/qsirecon/utils/bids.py b/qsirecon/utils/bids.py index 319ddb3f..6cf27819 100644 --- a/qsirecon/utils/bids.py +++ b/qsirecon/utils/bids.py @@ -40,37 +40,8 @@ import warnings from pathlib import Path -import nibabel as nb -import numpy as np from bids import BIDSLayout -IMPORTANT_DWI_FIELDS = [ - # From image headers: - "Obliquity", - "ImageOrientation", - "NumVolumes", - "Dim1Size", - "Dim2Size", - "Dim3Size", - "VoxelSizeDim1", - "VoxelSizeDim2", - "VoxelSizeDim3", - # From sidecars: - "ParallelReductionFactorInPlane", - "ParallelAcquisitionTechnique", - "ParallelAcquisitionTechnique", - "PartialFourier", - "PhaseEncodingDirection", - "EffectiveEchoSpacing", - "TotalReadoutTime", - "EchoTime", - "SliceEncodingDirection", - "DwellTime", - "FlipAngle", - "MultibandAccelerationFactor", - "RepetitionTime", -] - class BIDSError(ValueError): def __init__(self, message, bids_root): @@ -177,41 +148,6 @@ def collect_participants(bids_dir, participant_label=None, strict=False, bids_va return found_label -def collect_data(bids_dir, participant_label, filters=None, bids_validate=True): - """Use pybids to retrieve the input data for a given participant.""" - if isinstance(bids_dir, BIDSLayout): - layout = bids_dir - else: - layout = BIDSLayout(str(bids_dir), validate=bids_validate) - - queries = { - "fmap": {"datatype": "fmap"}, - "sbref": {"datatype": "func", "suffix": "sbref"}, - "flair": {"datatype": "anat", "suffix": "FLAIR"}, - "t2w": {"datatype": "anat", "suffix": "T2w"}, - "t1w": {"datatype": "anat", "suffix": "T1w"}, - "roi": {"datatype": "anat", "suffix": "roi"}, - "dwi": {"datatype": "dwi", "part": ["mag", None], "suffix": "dwi"}, - } - bids_filters = filters or {} - for acq, entities in bids_filters.items(): - queries[acq].update(entities) - - subj_data = { - dtype: sorted( - layout.get( - return_type="file", - subject=participant_label, - extension=["nii", "nii.gz"], - **query, - ) - ) - for dtype, query in queries.items() - } - - return subj_data, layout - - def write_derivative_description(bids_dir, deriv_dir): from qsirecon import __version__ @@ -372,30 +308,3 @@ def validate_input_dir(exec_env, bids_dir, participant_label): def _get_shub_version(singularity_url): raise ValueError("Not yet implemented") - - -def update_metadata_from_nifti_header(metadata, nifti_file): - """Update a BIDS metadata dictionary with info from a NIfTI header. - - Code borrowed from CuBIDS. - """ - img = nb.load(nifti_file) - # get important info from niftis - obliquity = np.any(nb.affines.obliquity(img.affine) > 1e-4) - voxel_sizes = img.header.get_zooms() - matrix_dims = img.shape - # add nifti info to corresponding sidecars​ - - metadata["Obliquity"] = str(obliquity) - metadata["VoxelSizeDim1"] = float(voxel_sizes[0]) - metadata["VoxelSizeDim2"] = float(voxel_sizes[1]) - metadata["VoxelSizeDim3"] = float(voxel_sizes[2]) - metadata["Dim1Size"] = matrix_dims[0] - metadata["Dim2Size"] = matrix_dims[1] - metadata["Dim3Size"] = matrix_dims[2] - if img.ndim == 4: - metadata["NumVolumes"] = matrix_dims[3] - elif img.ndim == 3: - metadata["NumVolumes"] = 1.0 - orient = nb.orientations.aff2axcodes(img.affine) - metadata["ImageOrientation"] = "".join(orient) + "+" diff --git a/qsirecon/utils/bspline.py b/qsirecon/utils/bspline.py deleted file mode 100644 index bb59c2e8..00000000 --- a/qsirecon/utils/bspline.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import numpy as np -from nipype import logging - -LOGGER = logging.getLogger("nipype.interfaces") - - -def get_ijk(data, offset=0): - """ - Calculates voxel coordinates from data - """ - from numpy import mgrid - - if not isinstance(offset, (list, tuple)): - offset = [offset] * 3 - - grid = mgrid[ - offset[0] : (offset[0] + data.shape[0]), - offset[1] : (offset[1] + data.shape[1]), - offset[2] : (offset[2] + data.shape[2]), - ] - return grid.reshape(3, -1).T - - -def compute_affine(data, zooms): - """ - Compose a RAS affine mat, since the affine of the image might not be RAS - """ - aff = np.eye(4) * (list(zooms) + [1]) - aff[:3, 3] -= aff[:3, :3].dot(np.array(data.shape[:3], dtype=float) - 1.0) * 0.5 - return aff - - -def tbspl_eval(points, knots, zooms, njobs=None): - """ - Evaluate tensor product BSpline - """ - raise Exception("Removed BSpline") - - -def _evalp(args): - import numpy as np - from scipy.sparse import csr_matrix - - point, knots, vbspl, zooms = args - u_vec = (knots - point[np.newaxis, ...]) / zooms[np.newaxis, ...] - c = vbspl(u_vec.reshape(-1)).reshape((knots.shape[0], 3)).prod(axis=1) - return csr_matrix(c) diff --git a/qsirecon/utils/grouping.py b/qsirecon/utils/grouping.py deleted file mode 100644 index 604009c6..00000000 --- a/qsirecon/utils/grouping.py +++ /dev/null @@ -1,1220 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Utilities to group scans based on their acquisition parameters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Download many variations of fieldmaps and dwi data - -Examples --------- -Set up tests ->>> import os ->>> from qsirecon.utils.testing import get_grouping_test_data ->>> data_root = get_grouping_test_data() ->>> os.chdir(data_root) -""" -import logging -from collections import defaultdict - -from nipype.utils.filemanip import split_filename - -from .. import config -from ..interfaces.bids import get_bids_params - -LOGGER = logging.getLogger("nipype.workflow") - - -def group_dwi_scans( - subject_data, - using_fsl=False, - combine_scans=True, - ignore_fieldmaps=False, - concatenate_distortion_groups=False, -): - """Determine which scans can be concatenated based on their acquisition parameters. - - Parameters - ---------- - bids_layout : :obj:`pybids.BIDSLayout` - A PyBIDS layout - group_for_eddy : :obj:`bool` - Should a plus and minus series be grouped together for TOPUP/eddy? - combine_scans : :obj:`bool` - Should scan concatention happen? - concatenate_distortion_groups : :obj:`bool` - Will distortion groups get merged at the end of the pipeline? - - Returns - ------- - scan_groups : :obj:`list` of :obj:`dict` - A dict where the keys are the BIDS derivatives name of the output file after - concatenation. The values are lists of dwi files in that group. - """ - # Handle the grouping of multiple dwi files within a session - dwi_session_groups = get_session_groups(config.execution.layout, subject_data, combine_scans) - - # Group them by their warp group - dwi_fmap_groups = [] - for dwi_session_group in dwi_session_groups: - dwi_fmap_groups.extend( - group_by_warpspace(dwi_session_group, config.execution.layout, ignore_fieldmaps) - ) - - if using_fsl: - return group_for_eddy(dwi_fmap_groups) - - if concatenate_distortion_groups: - return dwi_fmap_groups, group_for_concatenation(dwi_fmap_groups) - - return dwi_fmap_groups, {} - - -def get_session_groups(layout, subject_data, combine_all_dwis): - """Handle the grouping of multiple dwi files within a session. - - Parameters - ---------- - layout : :obj:`pybids.BIDSLayout` - A PyBIDS layout - subject_data : :obj:`dict` - A dictionary of BIDS data for a single subject - combine_all_dwis : :obj:`bool` - If True, combine all dwi files within a session into a single group - - Returns - ------- - dwi_session_groups : :obj:`list` of :obj:`list` - A list of lists of dwi files. Each list of dwi files is a group of scans - that can be concatenated together. - """ - sessions = layout.get_sessions() if layout is not None else [] - all_dwis = subject_data["dwi"] - dwi_session_groups = [] - if not combine_all_dwis: - dwi_session_groups = [[dwi] for dwi in all_dwis] - - else: - if sessions: - LOGGER.info("Combining all dwi files within each available session:") - for session in sessions: - session_files = [img for img in all_dwis if "ses-" + session in img] - LOGGER.info("\t- %d scans in session %s", len(session_files), session) - dwi_session_groups.append(session_files) - else: - LOGGER.info("Combining all %d dwis within the single available session", len(all_dwis)) - dwi_session_groups = [all_dwis] - - return dwi_session_groups - - -FMAP_PRIORITY = { - "dwi": 0, - "epi": 1, - "fieldmap": 2, - "phasediff": 3, - "phase1": 4, - "phase": 4, - "syn": 5, -} - - -def get_highest_priority_fieldmap(fmap_infos): - """Return a dictionary describing the highest priority fieldmap. - - Parameters - ---------- - fmap_infos : :obj:`list` of :obj:`dict` - A list of dictionaries describing fieldmaps. Each dictionary must have a - ``suffix`` key and may have an ``epi`` key. - - Returns - ------- - selected_fmap_info : :obj:`dict` - The dictionary describing the highest priority fieldmap. - This will be the entry from ``fmap_infos`` with the highest priority value. - If no fieldmaps are found, the dictionary will have a ``suffix`` key with a - value of ``None``. - - Examples - -------- - Invent some potential fieldmaps - >>> epi_fmap1 = {"epi": "/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz", "suffix": "epi"} - >>> epi_fmap2 = {"epi": "/data/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz", "suffix": "epi"} - >>> epi_fmap3 = {"epi": "/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz", "suffix": "epi"} - >>> - >>> phasediff_fmap = {"phasediff": "/data/sub-1/fmap/sub-1_phasediff.nii.gz", - ... "suffix": "phasediff"} - >>> phases_fmap = {"phase1": "/data/sub-1/fmap/sub-1_phase1.nii.gz", - ... "suffix": "phase1"} - >>> - >>> dwi_fmap1 = {"dwi": "/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz", "suffix": "dwi"} - >>> dwi_fmap2 = {'suffix': 'dwi', - ... 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']} - - When there are no fieldmaps in ``fmaps/``, but a reverse PE DWI series - >>> get_highest_priority_fieldmap([dwi_fmap1]) - {'dwi': '/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz', 'suffix': 'dwi'} - - There is both an epi fieldmap and a phase1/phase2 GRE fieldmap - >>> get_highest_priority_fieldmap([epi_fmap1, phases_fmap]) - {'suffix': 'epi', 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz']} - - Multiple EPI fieldmaps - >>> get_highest_priority_fieldmap( - ... [epi_fmap1, epi_fmap2, epi_fmap3]) # doctest: +NORMALIZE_WHITESPACE - {'suffix': 'epi', - 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz', - '/data/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz']} - - An EPI fieldmap from ``fmap/`` should be chosen over a reverse PE DWI series - >>> get_highest_priority_fieldmap([epi_fmap1, dwi_fmap2]) - {'suffix': 'epi', 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz']} - - """ - # Find fieldmaps - default_priority = max(FMAP_PRIORITY.values()) + 1 - priority = default_priority - selected_fmap_info = {"suffix": None} - - # collapse multiple EPI fieldmaps into one entry - epi_fmaps = sorted( - [fmap_info["epi"] for fmap_info in fmap_infos if fmap_info.get("suffix") == "epi"] - ) - if epi_fmaps: - epi_info = {"suffix": "epi", "epi": epi_fmaps} - fmap_infos = [ - fmap_info for fmap_info in fmap_infos if fmap_info.get("suffix") != "epi" - ] + [epi_info] - - # Select the highest priority fieldmap - for fmap_info in fmap_infos: - if fmap_info.get("suffix") == "phase": - fmap_info["suffix"] = "phase1" - - fmap_type = fmap_info.get("suffix") - if fmap_type not in FMAP_PRIORITY: - continue - - this_priority = FMAP_PRIORITY[fmap_type] - if this_priority < priority: - priority = this_priority - selected_fmap_info = fmap_info - - return selected_fmap_info - - -def find_fieldmaps_from_other_dwis(dwi_files, dwi_file_metadatas): - """Find a list of files in the dwi/ directory that can be used for distortion correction. - - It is common to acquire DWI scans with opposite phase encoding directions so they can be - used to correct each other's EPI distortion. There is currently no mechanism in BIDS to - specify whether b=0 scans in dwi/ can be used as fieldmaps for one another. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - dwi_file_metadatas : :obj:`list` of :obj:`dict` - A list of dictionaries containing metadata for each dwi file. - Each dictionary should have a ``PhaseEncodingDirection`` key. - - Returns - ------- - dwi_series_fieldmaps : :obj:`dict` - A dictionary where the keys are the full paths to dwi files and the values are - dictionaries describing the fieldmap. If no fieldmap is found, the dictionary - will be empty. - - Examples - -------- - - A single scan with no opportunities to SDC with a DWI scan - >>> from qsirecon.utils.grouping import find_fieldmaps_from_other_dwis - >>> single_dwi_file = ["/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz"] - >>> single_dwi_file_metadatas = [{"PhaseEncodingDirection": "j"}] - >>> find_fieldmaps_from_other_dwis(single_dwi_file, single_dwi_file_metadatas) - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {}} - - Two scans with the same PE direction: again no opportunities to SDC - >>> repeat_dwi_files = ["/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz"] - >>> repeat_dwi_file_metadatas = [{"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}] - >>> find_fieldmaps_from_other_dwis(repeat_dwi_files, - ... repeat_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {}, - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz': {}} - - Paired scans, each in opposite PE directions - >>> paired_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz"] - >>> paired_dwi_file_metadatas = [ - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j-"}] - >>> find_fieldmaps_from_other_dwis(paired_dwi_files, - ... paired_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}} - - Multiple scans in multiple PE directions - >>> multi_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz"] - >>> multi_dwi_file_metadatas = [ - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j-"}, - ... {"PhaseEncodingDirection": "j-"}] - >>> find_fieldmaps_from_other_dwis(multi_dwi_files, - ... multi_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']}} - - No information available - >>> empty_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz"] - >>> empty_dwi_file_metadatas = [ - ... {}, - ... {}] - >>> find_fieldmaps_from_other_dwis(empty_dwi_files, - ... empty_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz': {}, - '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz': {}} - """ - - scans_to_pe_dirs = { - fname: meta.get("PhaseEncodingDirection", "None") - for fname, meta in zip(dwi_files, dwi_file_metadatas) - } - pe_dirs_to_scans = defaultdict(list) - for scan_name, scan_dir in scans_to_pe_dirs.items(): - pe_dirs_to_scans[scan_dir].append(scan_name) - - dwi_series_fieldmaps = {} - for dwi_file in dwi_files: - dwi_series_fieldmaps[dwi_file] = {} - pe_dir = scans_to_pe_dirs[dwi_file] - # if there is no information, don't assume it's ok to combine - if pe_dir is None: - continue - - opposite_pe = pe_dir[0] if pe_dir.endswith("-") else pe_dir + "-" - rpe_dwis = pe_dirs_to_scans[opposite_pe] - - if rpe_dwis: - dwi_series_fieldmaps[dwi_file] = {"suffix": "dwi", "dwi": sorted(rpe_dwis)} - - return dwi_series_fieldmaps - - -def split_by_phase_encoding_direction(dwi_files, metadatas): - """If no fieldmaps have been found for a group of dwi files, split them by PE direction. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - metadatas : :obj:`list` of :obj:`dict` - A list of dictionaries containing metadata for each dwi file. - The only field that is used i "PhaseEncodingDirection". - - Returns - ------- - dwi_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. Each dictionary - has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - One of each direction (Not likely to see in the wild) - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-LR_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-SI_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {'PhaseEncodingDirection': 'i'}, - ... {'PhaseEncodingDirection': 'i-'}, - ... {'PhaseEncodingDirection': 'k'}, - ... {'PhaseEncodingDirection': 'k-'} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i', - 'concatenated_bids_name': 'sub-1_dir-RL'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-LR_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i-', - 'concatenated_bids_name': 'sub-1_dir-LR'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k', - 'concatenated_bids_name': 'sub-1_dir-IS'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-SI_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k-', - 'concatenated_bids_name': 'sub-1_dir-SI'}] - - Repeats of some: - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {'PhaseEncodingDirection': 'i'} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i', - 'concatenated_bids_name': 'sub-1_dir-RL'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - Some missing metadata - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-4_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-5_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {}, - ... {} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-3_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_run-3'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-4_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-4'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-5_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-5'}] - """ - pe_dir_groups = defaultdict(list) - unknowns = [] - for dwi_file, meta in zip(dwi_files, metadatas): - pe_dir = meta.get("PhaseEncodingDirection") - if pe_dir: - pe_dir_groups[pe_dir].append(dwi_file) - else: - unknowns.append(dwi_file) - - dwi_groups = [] - for pe_dir, dwi_group in sorted(pe_dir_groups.items()): - dwi_groups.append( - { - "dwi_series": dwi_group, - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": pe_dir, - "concatenated_bids_name": get_concatenated_bids_name(dwi_group), - } - ) - for unknown in unknowns: - dwi_groups.append( - { - "dwi_series": [unknown], - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": "", - "concatenated_bids_name": get_concatenated_bids_name([unknown]), - } - ) - - return dwi_groups - - -def group_by_warpspace(dwi_files, layout, ignore_fieldmaps): - """Groups a session's DWI files by their acquisition parameters. - - DWIs are grouped by their **warped space**. Two DWI series that are - listed in the IntendedFor field of a fieldmap are assumed to have the same - susceptibility distortions and therefore be in the same warped space. The goal - of this function is to combine DWI series into groups of acquisitions that - are in the same warped space into a list of scans that can be combined after - unwarping. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree - layout : :obj:`pybids.BIDSLayout` - A representation of the BIDS tree - ignore_fieldmaps : :obj:`bool` - If True, ignore any fieldmaps in the ``fmap/`` directory. Images in - ``dwi/`` will still be considered for SDC. - - Returns - ------- - dwi_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. Each dictionary - has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - Set up tests - >>> from qsirecon.utils.bids import collect_data - >>> SUBJECT_ID = "1" - - No fieldmap data, a single DWI series - >>> subject_data, layout = collect_data("easy", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['...sub-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}] - - Two DWIs with the same PE direction, to be concatenated - >>> subject_data, layout = collect_data("concat1", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../concat1/sub-1/dwi/sub-1_run-01_dwi.nii.gz', - '.../concat1/sub-1/dwi/sub-1_run-02_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}] - - Two DWI series intended to SDC each other - >>> subject_data, layout = collect_data("opposite", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - Multiple DWI series in two different PE directions - >>> subject_data, layout = collect_data("opposite_concat", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - A phasediff fieldmap defines the warped group - >>> subject_data, layout = collect_data("phasediff", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - 'suffix': 'phasediff'}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - Two DWI series, each with its own fieldmap/warped space - >>> subject_data, layout = collect_data("separate_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../separate_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../separate_fmaps/sub-1/fmap/sub-1_dir-PA_run-2_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-2'}] - - Same as above but ignoring fieldmaps. Data gets concatenated - >>> subject_data, layout = collect_data("separate_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, True) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - Two DWI series, opposite PE directions, dedicated EPI fieldmap for each - >>> subject_data, layout = collect_data("mixed_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - Same as last one, but ignore fieldmaps. The DWI series will be used for SDC instead - >>> subject_data, layout = collect_data("mixed_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, True) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - - There is no metadata related to epi distortion: don't concatenate anything - >>> subject_data, layout = collect_data("missing_info", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../missing_info/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../missing_info/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - A bizarre mix of PE directions and some missing data - >>> subject_data, layout = collect_data("wtf", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../wtf/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k-', - 'concatenated_bids_name': 'sub-1_dir-IS'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-1'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-2'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../wtf/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - """ - # For doc-building - if layout is None: - LOGGER.warning("Assuming we're building docs") - return [ - { - "dwi_series": dwi_files, - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": "j", - "concatenated_bids_name": "sub-1", - } - ] - - # Get the metadata from every dwi file - dwi_metadatas = [layout.get_metadata(dwi_file) for dwi_file in dwi_files] - # Check for any data in dwi/ that could be used for distortion correction - dwi_series_fieldmaps = find_fieldmaps_from_other_dwis(dwi_files, dwi_metadatas) - - # Find the best fieldmap for each file. - best_fieldmap = {} - grouped_by_fmap = defaultdict(list) - for dwi_file in dwi_files: - all_fmaps = [dwi_series_fieldmaps[dwi_file]] - if not ignore_fieldmaps: - fmap_fmaps = layout.get_fieldmap(dwi_file, return_list=True) - all_fmaps += fmap_fmaps - - # Find the highest priority fieldmap for this dwi file - best_fmap = get_highest_priority_fieldmap(all_fmaps) - best_fieldmap[dwi_file] = best_fmap - - # Add the dwi file to a list of those corrected by this fieldmap - fmap_key = tuple(best_fmap[best_fmap["suffix"]]) if best_fmap["suffix"] else "None" - grouped_by_fmap[fmap_key].append(dwi_file) - - # Create the final groups - dwi_groups = [] - for fmap_key, dwi_group in grouped_by_fmap.items(): - if fmap_key == "None": - dwi_groups.extend( - split_by_phase_encoding_direction( - dwi_group, [layout.get_metadata(dwi_file) for dwi_file in dwi_group] - ) - ) - else: - example_dwi_file = dwi_group[0] - pe_direction = layout.get_metadata(example_dwi_file).get("PhaseEncodingDirection") - dwi_groups.append( - { - "dwi_series": dwi_group, - "fieldmap_info": best_fieldmap[example_dwi_file], - "dwi_series_pedir": pe_direction, - "concatenated_bids_name": get_concatenated_bids_name(dwi_group), - } - ) - - return dwi_groups - - -def merge_dwi_groups(dwi_groups_plus, dwi_groups_minus): - """Convert two dwi groups into a single group that will be concatenated for FSL. - - Parameters - ---------- - dwi_groups_plus : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - dwi_groups_minus : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the same keys as ``dwi_groups_plus``. - - Returns - ------- - merged_group : :obj:`dict` - A dictionary describing the merged group of dwi files. - The dictionary has the same keys as ``dwi_groups_plus``. - - Examples - -------- - - Set up tests - >>> SUBJECT_ID = "1" - - AP/PA fieldmaps and paired DWI series - >>> plus_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - Two series SDC each other - >>> plus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - An odd case: one has an EPI - >>> plus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - """ - dwi_files = [] - rpe_files = [] - fmap_files = [] - - for dwi_group in dwi_groups_plus: - dwi_files += dwi_group["dwi_series"] - fmap_type = dwi_group["fieldmap_info"].get("suffix") - if fmap_type == "dwi": - rpe_files += dwi_group["fieldmap_info"]["dwi"] - elif fmap_type == "epi": - fmap_files += dwi_group["fieldmap_info"]["epi"] - pe_dir = dwi_group["dwi_series_pedir"] - - for dwi_group in dwi_groups_minus: - rpe_files += dwi_group["dwi_series"] - fmap_type = dwi_group["fieldmap_info"].get("suffix") - if fmap_type == "dwi": - dwi_files += dwi_group["fieldmap_info"]["dwi"] - elif fmap_type == "epi": - fmap_files += dwi_group["fieldmap_info"]["epi"] - - dwi_files = sorted(set(dwi_files)) - rpe_files = sorted(set(rpe_files)) - fmap_files = sorted(set(fmap_files)) - fieldmap_info = {"suffix": "rpe_series", "rpe_series": rpe_files} - if fmap_files: - fieldmap_info["epi"] = fmap_files - - merged_group = { - "dwi_series": dwi_files, - "dwi_series_pedir": pe_dir, - "fieldmap_info": fieldmap_info, - "concatenated_bids_name": get_concatenated_bids_name(dwi_files + rpe_files), - } - return merged_group - - -def group_for_eddy(all_dwi_fmap_groups): - """Find matched pairs of phase encoding directions that can be combined for TOPUP/eddy. - - Any groups that don't have a phase encoding direction won't be correctable by eddy/TOPUP. - - Parameters - ---------- - all_dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - eddy_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - Paired DWI series to correct each other: - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - AP/PA EPI fieldmaps - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - Repeated scans per PE direction - >>> dwi_groups = [ - ... {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}, - ... {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - A phasediff fieldmap (Not used by eddy) - >>> dwi_groups = [ - ... {'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - ... 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - ... 'suffix': 'phasediff'}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - 'suffix': 'phasediff'}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - """ - eddy_dwi_groups = [] - eddy_compatible_suffixes = ("dwi", "epi") - session_groups = _group_by_sessions(all_dwi_fmap_groups) - for _, dwi_fmap_groups in session_groups.items(): - for pe_dir in "ijk": - plus_series = [ - dwi_group - for dwi_group in dwi_fmap_groups - if dwi_group.get("dwi_series_pedir") == pe_dir - and dwi_group["fieldmap_info"].get("suffix") in eddy_compatible_suffixes - ] - minus_series = [ - dwi_group - for dwi_group in dwi_fmap_groups - if dwi_group.get("dwi_series_pedir") == pe_dir + "-" - and dwi_group["fieldmap_info"].get("suffix") in eddy_compatible_suffixes - ] - - # Can these be grouped? - if plus_series and minus_series: - eddy_dwi_groups.append(merge_dwi_groups(plus_series, minus_series)) - else: - eddy_dwi_groups.extend(plus_series + minus_series) - - # Add separate groups for non-compatible fieldmaps - for dwi_group in dwi_fmap_groups: - if dwi_group["fieldmap_info"].get("suffix") not in eddy_compatible_suffixes: - eddy_dwi_groups.append(dwi_group) - - return eddy_dwi_groups, { - group["concatenated_bids_name"]: group["concatenated_bids_name"] - for group in eddy_dwi_groups - } - - -def group_for_concatenation(all_dwi_fmap_groups): - """Find matched pairs of phase encoding directions that can be combined after SHORELine. - - Any groups that don't have a phase encoding direction won't be correctable by SHORELine. - - Parameters - ---------- - all_dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - concatenation_grouping : :obj:`dict` - A dictionary mapping the concatenated BIDS name of each group to the name of the - group that it should be concatenated with. - """ - concatenation_grouping = {} - session_groups = _group_by_sessions(all_dwi_fmap_groups) - for _, dwi_fmap_groups in session_groups.items(): - all_images = [] - for group in dwi_fmap_groups: - all_images.extend(group["dwi_series"]) - group_name = get_concatenated_bids_name(all_images) - # Add separate groups for non-compatible fieldmaps - for group in dwi_fmap_groups: - concatenation_grouping[group["concatenated_bids_name"]] = group_name - - return concatenation_grouping - - -def get_concatenated_bids_name(dwi_group): - """Derive the output file name for a group of dwi files. - - Strip away non-shared key/values from the input list of files. This function - assumes you have already split the dwi group into something meaningful and - really want to combine all the inputs. - - Parameters - ---------- - dwi_group : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - - Returns - ------- - fname : :obj:`str` - The BIDS name of the concatenated dwi series. - - Examples - -------- - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-4_dwi.nii.gz' - ... ]) - 'sub-1_dir-AP' - - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz' - ... ]) - 'sub-1' - - - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_acq-HCP-dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-PA_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-PA_run-2_dwi.nii.gz' - ... ]) - 'sub-1_acq-HCP' - - """ - # If a single file, use its name, otherwise use the common prefix - if len(dwi_group) > 1: - fname = _get_common_bids_fields(dwi_group) - parts = fname.split("_") - full_parts = [part for part in parts if not part.endswith("-")] - fname = "_".join(full_parts) - else: - input_fname = dwi_group[0] - fname = split_filename(input_fname)[1] - - if fname.endswith("_dwi"): - fname = fname[:-4] - - return fname.replace(".", "").replace(" ", "") - - -def _get_common_bids_fields(fnames): - """Find the common fields in a list of BIDS filenames. - - Parameters - ---------- - fnames : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - - Returns - ------- - fname : :obj:`str` - The common fields in the filenames. - """ - bids_keys = defaultdict(set) - for fname in fnames: - basename = split_filename(fname)[1] - for token in basename.split("_"): - parts = token.split("-") - if len(parts) == 2: - key, value = parts - bids_keys[key].update((value,)) - - # Find all the keys with a single unique value - common_bids = [] - for key in ["sub", "ses", "acq", "dir", "run"]: - if len(bids_keys[key]) == 1: - common_bids.append(key + "-" + bids_keys[key].pop()) - - return "_".join(common_bids) - - -def _group_by_sessions(dwi_fmap_groups): - """Create a lookup of distortion groups by session - - Parameters - ---------- - dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - ses_lookup : :obj:`dict` - A dictionary mapping session ids to lists of dwi_fmap_groups. - - Examples - -------- - Paired DWI series to correct each other: - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_ses-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-AP_run-1_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_ses-1_dir-PA_run-2'}, - ... {'dwi_series': [ - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2/dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2/dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_ses-2_dir-AP'}, - ... {'dwi_series': [ - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-AP_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_ses-2_dir-PA'}] - """ - ses_lookup = defaultdict(list) - for group in dwi_fmap_groups: - bids_info = get_bids_params(group["concatenated_bids_name"]) - ses_lookup[bids_info["session_id"]].append(group) - - return ses_lookup diff --git a/qsirecon/utils/ingress.py b/qsirecon/utils/ingress.py index 2bf58c6a..0a1ba58e 100644 --- a/qsirecon/utils/ingress.py +++ b/qsirecon/utils/ingress.py @@ -42,59 +42,6 @@ def missing_from_ukb_directory(ukb_subject_dir): return [str(fpath) for fpath in required_files if not fpath.exists()] -def find_ukb_directory(ukb_directory_list, subject_id): - """Find a UKB directory for a given subject ID. - - Parameters - ---------- - ukb_directory_list : :obj:`list` of :obj:`pathlib.Path` - A list of ukb directories to search. - subject_id : :obj:`str` - The subject ID to search for. - - Returns - ------- - ukb_directory : :obj:`pathlib.Path` - The path to the ukb directory. - """ - potential_directories = [ - subdir for subdir in ukb_directory_list if subdir.name.startswith(subject_id) - ] - - # If nothing starts with the subject id, then we're out of luck - if not potential_directories: - raise Exception(f"No UKB directory available for {subject_id}") - - complete_dirs = [] - for potential_directory in potential_directories: - missing_files = missing_from_ukb_directory(potential_directory) - if not missing_files: - complete_dirs.append(potential_directory) - - # Too many complete matches: ambiguous subject ID - if len(complete_dirs) > 1: - raise Exception( - "Provide a more specific subject filter: More than 1 directories match " - + subject_id - + "\n" - + "\n".join(map(str, complete_dirs)) - ) - - # There were potential directories, but none were complete - if not complete_dirs: - error_report = "\n".join( - [ - str(pdir.absolute()) - + " missing:\n " - + "\n ".join(missing_from_ukb_directory(pdir)) - for pdir in potential_directories - ] - ) - raise Exception(f"No complete directories found for {subject_id}:\n{error_report}") - - return - - def create_ukb_layout(ukb_dir, participant_label=None): """Find all valid ukb directories under ukb_dir. diff --git a/qsirecon/utils/misc.py b/qsirecon/utils/misc.py index 72ef65b9..5781bd78 100644 --- a/qsirecon/utils/misc.py +++ b/qsirecon/utils/misc.py @@ -13,60 +13,3 @@ def check_deps(workflow): for node in workflow._get_all_nodes() if (hasattr(node.interface, "_cmd") and which(node.interface._cmd.split()[0]) is None) ) - - -def fix_multi_T1w_source_name(in_files): - """Make up a generic source name when there are multiple T1s. - - >>> fix_multi_T1w_source_name([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) - '/path/to/sub-045_T1w.nii.gz' - """ - import os - - from nipype.utils.filemanip import filename_to_list - - base, in_file = os.path.split(filename_to_list(in_files)[0]) - subject_label = in_file.split("_", 1)[0].split("-")[1] - return os.path.join(base, f"sub-{subject_label}_T1w.nii.gz") - - -def fix_multi_source_name(in_files, dwi_only, anatomical_contrast="T1w"): - """Make up a generic source name when there are multiple source files. - - >>> fix_multi_source_name([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) - '/path/to/sub-045_T1w.nii.gz' - """ - import os - - from nipype.utils.filemanip import filename_to_list - - base, in_file = os.path.split(filename_to_list(in_files)[0]) - subject_label = in_file.split("_", 1)[0].split("-")[1] - if dwi_only: - anatomical_contrast = "dwi" - base = base.replace("/dwi", "/anat") - - return os.path.join(base, f"sub-{subject_label}_{anatomical_contrast}.nii.gz") - - -def add_suffix(in_files, suffix): - """Wrap nipype's fname_presuffix to conveniently just add a suffixfix. - - >>> add_suffix([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test') - 'sub-045_ses-test_T1w_test.nii.gz' - """ - import os.path as op - - from nipype.utils.filemanip import filename_to_list, fname_presuffix - - return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix)) - - -if __name__ == "__main__": - pass diff --git a/qsirecon/utils/testing.py b/qsirecon/utils/testing.py index ec745654..b59c3b35 100644 --- a/qsirecon/utils/testing.py +++ b/qsirecon/utils/testing.py @@ -8,11 +8,8 @@ """ -import json import logging -import tempfile import unittest -from pathlib import Path from networkx.exception import NetworkXUnfeasible from nipype.interfaces import utility as niu @@ -106,205 +103,3 @@ def assert_inputs_set(self, workflow, additional_inputs={}): with self.assertRaises(Exception): # throws an error if the input is already connected workflow.connect([(dummy_node, node, [("dummy", field)])]) - - -def get_grouping_test_data(): - """Write a number of grouping test datasets to base_path.""" - - dataset_desctiption = { - "Acknowledgements": "", - "Authors": [], - "BIDSVersion": "1.0.2", - "DatasetDOI": "", - "Funding": "", - "HowToAcknowledge": "", - "License": "", - "Name": "test_data", - "ReferencesAndLinks": [], - "template": "project", - } - - base_dir = tempfile.mkdtemp() - empty_bids_dir = Path(base_dir) / "empty_bids" - empty_bids_dir.mkdir(parents=True, exist_ok=True) - - def write_json(pth, content): - with pth.open("w") as f: - json.dump(content, f) - - def make_empty_bids(root, project_name): - project_root = root / project_name - project_root.mkdir(parents=True, exist_ok=True) - (project_root / "README").touch() - write_json(project_root / "dataset_description.json", dataset_desctiption) - (project_root / "sub-1" / "dwi").mkdir(parents=True, exist_ok=True) - (project_root / "sub-1" / "fmap").mkdir(parents=True, exist_ok=True) - (project_root / "sub-1" / "anat").mkdir(parents=True, exist_ok=True) - return project_root / "sub-1" - - def write_test_bids(name, files_and_metas): - test_bids = make_empty_bids(empty_bids_dir, name) - for fname, meta in files_and_metas: - _nifti = fname + ".nii.gz" - _json = fname + ".json" - (test_bids / _nifti).touch() - write_json(test_bids / _json, meta) - return test_bids.parent - - # One dwi, no fmaps - write_test_bids("easy", [("dwi/sub-1_dwi", {"PhaseEncodingDirection": "j"})]) - - write_test_bids( - "concat1", - [ - ("dwi/sub-1_run-01_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_run-02_dwi", {"PhaseEncodingDirection": "j"}), - ], - ) - - write_test_bids( - "opposite", - [ - ("dwi/sub-1_dir-AP_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_dwi", {"PhaseEncodingDirection": "j-"}), - ], - ) - - write_test_bids( - "opposite_concat", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-1_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ], - ) - - write_test_bids( - "phasediff", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("fmap/sub-1_magnitude1", {"PhaseEncodingDirection": "j"}), - ("fmap/sub-1_magnitude2", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_phasediff", - { - "PhaseEncodingDirection": "j", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - write_test_bids( - "epi", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - write_test_bids( - "separate_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-1_dwi.nii.gz"], - }, - ), - ( - "fmap/sub-1_dir-PA_run-2_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-2_dwi.nii.gz"], - }, - ), - ], - ) - - write_test_bids( - "mixed_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-1_dwi.nii.gz"], - }, - ), - ( - "fmap/sub-1_dir-AP_run-2_epi", - { - "PhaseEncodingDirection": "j", - "IntendedFor": ["dwi/sub-1_dir-PA_run-2_dwi.nii.gz"], - }, - ), - ], - ) - - write_test_bids( - "missing_info", [("dwi/sub-1_dir-AP_run-1_dwi", {}), ("dwi/sub-1_dir-PA_run-2_dwi", {})] - ) - - write_test_bids( - "wtf", - [ - ("dwi/sub-1_run-1_dwi", {}), - ("dwi/sub-1_run-2_dwi", {}), - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-1_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-IS_dwi", {"PhaseEncodingDirection": "k-"}), - ], - ) - - write_test_bids( - "appa_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ( - "fmap/sub-1_dir-AP_run-2_epi", - { - "PhaseEncodingDirection": "j", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - return empty_bids_dir diff --git a/qsirecon/viz/utils.py b/qsirecon/viz/utils.py index a312f2c2..7e5972e1 100644 --- a/qsirecon/viz/utils.py +++ b/qsirecon/viz/utils.py @@ -103,57 +103,6 @@ def plot_denoise( return out_files -def plot_acpc( - acpc_registered_img, - div_id, - plot_params=None, - order=("z", "x", "y"), - cuts=None, - estimate_brightness=False, - label=None, - compress="auto", -): - """ - Plot the results of an AC-PC transformation. - """ - plot_params = plot_params or {} - - # Do the low-b image first - out_files = [] - if estimate_brightness: - plot_params = robust_set_limits( - acpc_registered_img.get_fdata(dtype="float32").reshape(-1), plot_params - ) - - # Plot each cut axis for low-b - for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = [-20.0, 0.0, 20.0] - if i == 0: - plot_params["title"] = label - else: - plot_params["title"] = None - - # Generate nilearn figure - display = plot_anat(acpc_registered_img, **plot_params) - for coord, axis in display.axes.items(): - axis.ax.axvline(0, lw=1) - axis.ax.axhline(0, lw=1) - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set("id", "%s-%s-%s" % (div_id, mode, uuid4())) - - svg_fig = SVGFigure() - svg_fig.root = xml_data - out_files.append(svg_fig) - - return out_files - - def slices_from_bbox(mask_data, cuts=3, padding=0): """Finds equi-spaced cuts for presenting images""" B = np.argwhere(mask_data > 0) diff --git a/qsirecon/workflows/reports.py b/qsirecon/workflows/reports.py deleted file mode 100644 index 02fbf534..00000000 --- a/qsirecon/workflows/reports.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -qsirecon interactive report workflow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. autofunction:: init_qsirecon_wf - -""" -import logging -from copy import deepcopy - -from nipype.interfaces import utility as niu -from nipype.pipeline import engine as pe - -from .. import config -from ..engine import Workflow -from ..interfaces import DerivativesDataSink -from ..interfaces.ingress import QsiReconDWIIngress -from ..interfaces.interchange import qsirecon_output_names, recon_workflow_input_fields -from ..interfaces.reports import InteractiveReport -from ..utils.bids import collect_data - -LOGGER = logging.getLogger("nipype.workflow") - - -def init_json_preproc_report_wf(subject_list): - """ - This workflow creates a json report for the dmriprep-viewer. - - .. workflow:: - :graph2use: orig - :simple_form: yes - - import os - from qsirecon.workflows.reports import init_json_preproc_report_wf - wf = init_json_preproc_report_wf( - subject_list=['qsirecontest'], - work_dir='.', - output_dir='.') - - - Parameters: - - subject_list : list - List of subject labels - work_dir : str - Directory in which to store workflow execution state and temporary - files - output_dir : str - Directory in which to save derivatives - - """ - work_dir = config.execution.work_dir - output_dir = config.execution.output_dir - - qsirecon_wf = Workflow(name="json_reports_wf") - qsirecon_wf.base_dir = work_dir - - for subject_id in subject_list: - single_subject_wf = init_single_subject_json_report_wf( - subject_id=subject_id, - name="single_subject_" + subject_id + "json_report_wf", - output_dir=output_dir, - ) - - for node in single_subject_wf._get_all_nodes(): - node.config = deepcopy(single_subject_wf.config) - qsirecon_wf.add_nodes([single_subject_wf]) - - return qsirecon_wf - - -def init_single_subject_json_report_wf(subject_id, name): - """ - This workflow examines the output of a qsirecon run and creates a json report for - dmriprep-viewer. These are very useful for batch QC-ing QSIRecon runs. - - .. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.reports import init_single_subject_json_report_wf - - wf = init_single_subject_json_report_wf( - subject_id='test', - name='single_subject_qsirecontest_wf', - reportlets_dir='.', - output_dir='.') - - Parameters - - subject_id : str - List of subject labels - name : str - Name of workflow - output_dir : str - Directory in which to read and save derivatives - - """ - output_dir = config.execution.output_dir - if name in ("single_subject_wf", "single_subject_qsirecontest_wf"): - # for documentation purposes - subject_data = { - "t1w": ["/completely/made/up/path/sub-01_T1w.nii.gz"], - "dwi": ["/completely/made/up/path/sub-01_dwi.nii.gz"], - } - layout = None - LOGGER.warning("Building a test workflow") - else: - subject_data, layout = collect_data(output_dir, subject_id, bids_validate=False) - dwi_files = subject_data["dwi"] - workflow = Workflow(name=name) - scans_iter = pe.Node(niu.IdentityInterface(fields=["dwi_file"]), name="scans_iter") - scans_iter.iterables = ("dwi_file", dwi_files) - inputnode = pe.Node( - niu.IdentityInterface(fields=recon_workflow_input_fields), name="inputnode" - ) - qsirecon_preprocessed_dwi_data = pe.Node( - QsiReconDWIIngress(), name="qsirecon_preprocessed_dwi_data" - ) - - # For doctests - if not name == "fake": - scans_iter.inputs.dwi_file = dwi_files - - interactive_report = pe.Node(InteractiveReport(), name="interactive_report") - - ds_report_json = pe.Node( - DerivativesDataSink(base_directory=output_dir, suffix="viewer"), - name="ds_report_json", - run_without_submitting=True, - ) - - # Connect the collected diffusion data (gradients, etc) to the inputnode - workflow.connect([ - (scans_iter, qsirecon_preprocessed_dwi_data, ([('dwi_file', 'dwi_file')])), - (qsirecon_preprocessed_dwi_data, inputnode, [ - (trait, trait) for trait in qsirecon_output_names]), - (inputnode, interactive_report, [ - ('dwi_file', 'processed_dwi_file'), - ('confounds_file', 'confounds_file'), - ('qc_file', 'qc_file'), - ('mask_file', 'mask_file')]), - (interactive_report, ds_report_json, [('out_report', 'in_file')]) - ]) # fmt:skip - - return workflow diff --git a/tests/grouping_tests.py b/tests/grouping_tests.py deleted file mode 100644 index 9fb22bfb..00000000 --- a/tests/grouping_tests.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Test that scans are grouped by fieldmap correctly. -""" -import pytest -from get_data import bids_data, BIDS_DIR - - -test_grouping_conditions = [ - # combine_all_dwis, ignore, use_syn, prefer_dedicated_fmaps, using_eddy, - (True, [], False, False, True, ('sub-tester_acq-HASC55',), ('rpe_series',)), - (True, [], False, False, False, ('sub-tester_acq-HASC55',), ('rpe_series',)), - - # Don't combine images, but do use fieldmaps - (False, [], False, False, True, ('sub-tester_acq-HASC55AP', 'sub-tester_acq-HASC55PA'), - ('epi', 'epi')), - (False, [], False, False, False, ('sub-tester_acq-HASC55AP', 'sub-tester_acq-HASC55PA'), - ('epi', 'epi')), - # requests that fieldmaps are ignored but scans are combined. Should print error. - # For now runs them separately. - (True, ['fieldmaps'], False, True, True, - ('sub-tester_acq-HASC55AP', 'sub-tester_acq-HASC55PA'), (None, None)), - (True, [], True, True, True, - ('sub-tester_acq-HASC55AP', 'sub-tester_acq-HASC55PA'), ('epi', 'epi')), - (True, ['fieldmaps'], True, True, True, - ('sub-tester_acq-HASC55AP', 'sub-tester_acq-HASC55PA'), ('syn', 'syn')), -] - - -@pytest.mark.parametrize( - "combine_all_dwis,ignore,use_syn,prefer_dedicated_fmaps,output_groups", - test_grouping_conditions) -def test_grouping_options( - combine_all_dwis, ignore, use_syn, prefer_dedicated_fmaps,using_eddy, output_groups, fieldmap_types, bids_data): - from qsirecon.utils.bids import collect_data - from qsirecon.workflows.base import group_by_warpspace, get_session_groups, _get_output_fname - subject_data, layout = collect_data(BIDS_DIR, "tester") - - # Get session groups - dwi_session_groups = get_session_groups(layout, subject_data, combine_all_dwis) - dwi_fmap_groups = [] - for dwi_session_group in dwi_session_groups: - dwi_fmap_groups.extend( - group_by_warpspace(dwi_session_group, - layout, - prefer_dedicated_fmaps, - using_eddy, - "fieldmaps" in ignore, - combine_all_dwis, - use_syn)) - outputs_to_files = dict([ - (_get_output_fname(dwi_group), dwi_group) for dwi_group in dwi_fmap_groups - ]) - output_keys = tuple(sorted(outputs_to_files.keys())) - assert output_keys == output_groups - for key, fmap_type in zip(output_groups, fieldmap_types): - assert outputs_to_files[key]['fieldmap_info']['type'] == fmap_type