Skip to content

Commit

Permalink
Do more.
Browse files Browse the repository at this point in the history
  • Loading branch information
tsalo committed Aug 9, 2024
1 parent a31ab18 commit b2660c4
Show file tree
Hide file tree
Showing 16 changed files with 1 addition and 3,344 deletions.
1 change: 0 additions & 1 deletion qsirecon/interfaces/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,3 @@
# vi: set ft=python sts=4 ts=4 sw=4 et:
from .bids import DerivativesDataSink
from .images import ConformDwi, ValidateImage
from .reports import AboutSummary, SubjectSummary
2 changes: 1 addition & 1 deletion qsirecon/interfaces/bids.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class DerivativesDataSink(SimpleInterface):
>>> from pathlib import Path
>>> import tempfile
>>> from qsirecon.utils.bids import collect_data
>>> from qsiprep.utils.bids import collect_data
>>> tmpdir = Path(tempfile.mkdtemp())
>>> tmpfile = tmpdir / 'a_temp_file.nii.gz'
>>> tmpfile.open('w').close() # "touch" the file
Expand Down
141 changes: 0 additions & 141 deletions qsirecon/interfaces/dsi_studio.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from copy import deepcopy
from glob import glob
from pathlib import Path
from subprocess import PIPE, Popen

import nibabel as nb
import nipype.interfaces.utility as niu
Expand Down Expand Up @@ -122,48 +121,6 @@ def _list_outputs(self):
return outputs


class _DSIStudioQCOutputSpec(TraitedSpec):
qc_txt = File(exists=True, desc="Text file with QC measures")


class DSIStudioQC(SimpleInterface):
output_spec = _DSIStudioQCOutputSpec

def _run_interface(self, runtime):
# DSI Studio (0.12.2) action=qc has two modes, depending on wether the
# input is a file (src.gz|nii.gz)|(fib.gz) or a directory. For
# directories, the action will be run on a number of detected files
# (which *cannot* be symbolic links for some reason).
src_file = fname_presuffix(self.inputs.src_file, newpath=runtime.cwd)
cmd = ["dsi_studio", "--action=qc", "--source=" + src_file]
proc = Popen(cmd, cwd=runtime.cwd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if out:
LOGGER.info(out.decode())
if err:
LOGGER.critical(err.decode())
self._results["qc_txt"] = op.join(runtime.cwd, "qc.txt")
return runtime


class _DSIStudioSrcQCInputSpec(DSIStudioCommandLineInputSpec):
src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio src[.gz] file")


class DSIStudioSrcQC(DSIStudioQC):
input_spec = _DSIStudioSrcQCInputSpec
ext = ".src.gz"


class _DSIStudioFibQCInputSpec(DSIStudioCommandLineInputSpec):
src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio fib[.gz] file")


class DSIStudioFibQC(DSIStudioQC):
input_spec = _DSIStudioFibQCInputSpec
ext = ".fib.gz"


# Step 2 reonstruct ODFs
class DSIStudioReconstructionInputSpec(DSIStudioCommandLineInputSpec):
input_src_file = File(
Expand Down Expand Up @@ -683,57 +640,6 @@ def _run_interface(self, runtime):
return runtime


class _DSIStudioQCMergeInputSpec(BaseInterfaceInputSpec):
src_qc = File(exists=True, mandatory=True)
fib_qc = File(exists=True, mandatory=True)


class _DSIStudioQCMergeOutputSpec(TraitedSpec):
qc_file = File(exists=True)


class DSIStudioMergeQC(SimpleInterface):
input_spec = _DSIStudioQCMergeInputSpec
output_spec = _DSIStudioQCMergeOutputSpec

def _run_interface(self, runtime):
output_csv = runtime.cwd + "/merged_qc.csv"
src_qc = load_src_qc_file(self.inputs.src_qc)
fib_qc = load_fib_qc_file(self.inputs.fib_qc)
src_qc.update(fib_qc)
qc_df = pd.DataFrame(src_qc)
qc_df.to_csv(output_csv, index=False)
self._results["qc_file"] = output_csv
return runtime


class _DSIStudioBTableInputSpec(BaseInterfaceInputSpec):
bval_file = File(exists=True, mandatory=True)
bvec_file = File(exists=True, mandatory=True)
bvec_convention = traits.Enum(
("DIPY", "FSL"),
usedefault=True,
desc="Convention used for bvecs. FSL assumes LAS+ no matter image orientation",
)


class _DSIStudioBTableOutputSpec(TraitedSpec):
btable_file = File(exists=True)


class DSIStudioBTable(SimpleInterface):
input_spec = _DSIStudioBTableInputSpec
output_spec = _DSIStudioBTableOutputSpec

def _run_interface(self, runtime):
if self.inputs.bvec_convention != "DIPY":
raise NotImplementedError("Only DIPY Bvecs supported for now")
btab_file = op.join(runtime.cwd, "btable.txt")
btable_from_bvals_bvecs(self.inputs.bval_file, self.inputs.bvec_file, btab_file)
self._results["btable_file"] = btab_file
return runtime


class _AutoTrackInputSpec(DSIStudioCommandLineInputSpec):
fib_file = File(exists=True, mandatory=True, copyfile=False, argstr="--source=%s")
map_file = File(exists=True, copyfile=False)
Expand Down Expand Up @@ -885,53 +791,6 @@ def stat_txt_to_df(stat_txt_file, bundle_name):
return bundle_stats


def load_src_qc_file(fname, prefix=""):
with open(fname, "r") as qc_file:
qc_data = qc_file.readlines()
data = qc_data[1]
parts = data.strip().split("\t")
dwi_contrast = np.nan
ndc_masked = np.nan
if len(parts) == 7:
_, dims, voxel_size, dirs, max_b, ndc, bad_slices = parts
elif len(parts) == 8:
_, dims, voxel_size, dirs, max_b, _, ndc, bad_slices = parts
elif len(parts) == 9:
_, dims, voxel_size, dirs, max_b, dwi_contrast, ndc, ndc_masked, bad_slices = parts
else:
raise Exception("Unknown QC File format")

voxelsx, voxelsy, voxelsz = map(float, voxel_size.strip().split())
dimx, dimy, dimz = map(float, dims.strip().split())
n_dirs = float(dirs.split("/")[1])
max_b = float(max_b)
dwi_corr = float(ndc)
n_bad_slices = float(bad_slices)
ndc_masked = float(ndc_masked)
dwi_contrast = float(dwi_contrast)
data = {
prefix + "dimension_x": [dimx],
prefix + "dimension_y": [dimy],
prefix + "dimension_z": [dimz],
prefix + "voxel_size_x": [voxelsx],
prefix + "voxel_size_y": [voxelsy],
prefix + "voxel_size_z": [voxelsz],
prefix + "max_b": [max_b],
prefix + "neighbor_corr": [dwi_corr],
prefix + "masked_neighbor_corr": [ndc_masked],
prefix + "dwi_contrast": [dwi_contrast],
prefix + "num_bad_slices": [n_bad_slices],
prefix + "num_directions": [n_dirs],
}
return data


def load_fib_qc_file(fname):
with open(fname, "r") as fibqc_f:
lines = [line.strip().split() for line in fibqc_f]
return {"coherence_index": [float(lines[1][-1])]}


def btable_from_bvals_bvecs(bval_file, bvec_file, output_file):
"""Create a b-table from DIPY-style bvals/bvecs.
Expand Down
118 changes: 0 additions & 118 deletions qsirecon/interfaces/gradients.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,21 @@
"""Handle merging and spliting of DSI files."""

import logging
import os

import nibabel as nb
import numpy as np
from nilearn import image as nim
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
File,
InputMultiObject,
OutputMultiObject,
SimpleInterface,
TraitedSpec,
isdefined,
traits,
)
from nipype.utils.filemanip import fname_presuffix
from sklearn.metrics import r2_score

LOGGER = logging.getLogger("nipype.interface")
tensor_index = {"xx": (0, 0), "xy": (0, 1), "xz": (0, 2), "yy": (1, 1), "yz": (1, 2), "zz": (2, 2)}


class RemoveDuplicatesInputSpec(BaseInterfaceInputSpec):
Expand Down Expand Up @@ -111,103 +106,6 @@ def is_unique_sample(vec):
return runtime


class SliceQCInputSpec(BaseInterfaceInputSpec):
uncorrected_dwi_files = InputMultiObject(File(exists=True), desc="uncorrected dwi files")
ideal_image_files = InputMultiObject(File(exists=True), desc="model-based images")
mask_image = File(exists=True, desc="brain mask")
impute_slice_threshold = traits.Float(0.0, desc="threshold for using imputed data in a slice")
min_slice_size_percentile = traits.CFloat(
10.0,
usedefault=True,
desc="slices bigger than " "this percentile are candidates for imputation.",
)


class SliceQCOutputSpec(TraitedSpec):
imputed_images = OutputMultiObject(File(exists=True), desc="dwi files with imputed slices")
slice_stats = File(exists=True, desc="npy file with the slice-by-TR error matrix")


class SliceQC(SimpleInterface):
input_spec = SliceQCInputSpec
output_spec = SliceQCOutputSpec

def _run_interface(self, runtime):
ideal_image_files = self.inputs.ideal_image_files
uncorrected_image_files = self.inputs.uncorrected_dwi_files

self._results["imputed_images"] = self.inputs.uncorrected_dwi_files
output_npz = os.path.join(runtime.cwd, "slice_stats.npz")
mask_img = nb.load(self.inputs.mask_image)
mask = mask_img.get_fdata() > 0
masked_slices = (mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :]).astype(
int
)
slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True)
min_size = np.percentile(slice_counts, self.inputs.min_slice_size_percentile)
too_small = slice_nums[slice_counts < min_size]
for small_slice in too_small:
masked_slices[masked_slices == small_slice] = 0
valid_slices = slice_nums[slice_counts > min_size]
valid_slices = valid_slices[valid_slices > 0]
slice_scores = []
wb_xcorrs = []
wb_r2s = []
# If impute slice threshold==0 or hmc_model=="none"
if isdefined(ideal_image_files):
for ideal_image, input_image in zip(ideal_image_files, uncorrected_image_files):
slices, wb_xcorr, wb_r2 = _score_slices(
ideal_image, input_image, masked_slices, valid_slices
)
slice_scores.append(slices)
wb_xcorrs.append(wb_xcorr)
wb_r2s.append(wb_r2)
else:
num_trs = len(uncorrected_image_files)
num_slices = mask_img.shape[2]
wb_xcorrs = np.zeros(num_trs)
wb_r2s = np.zeros(num_trs)
slice_scores = np.zeros((num_slices, num_trs))

np.savez(
output_npz,
slice_scores=slice_scores,
wb_r2s=np.array(wb_r2s),
wb_xcorrs=np.array(wb_xcorrs),
valid_slices=valid_slices,
masked_slices=masked_slices,
slice_nums=slice_nums,
slice_counts=slice_counts,
)
self._results["slice_stats"] = output_npz
return runtime


def _score_slices(ideal_image, input_image, masked_slices, valid_slices):
"""Compute similarity metrics on a pair of images."""

def crosscor(vec1, vec2):
v1bar = vec1 - vec1.mean()
v2bar = vec2 - vec2.mean()
return np.inner(v1bar, v2bar) ** 2 / (np.inner(v1bar, v1bar) * np.inner(v2bar, v2bar))

slice_scores = np.zeros(valid_slices.shape)
ideal_data = nb.load(ideal_image).get_fdata()
input_data = nb.load(input_image).get_fdata()
for nslice, slicenum in enumerate(valid_slices):
slice_mask = masked_slices == slicenum
ideal_slice = ideal_data[slice_mask]
data_slice = input_data[slice_mask]
slice_scores[nslice] = crosscor(ideal_slice, data_slice)

global_mask = masked_slices > 0
wb_ideal = ideal_data[global_mask]
wb_input = input_data[global_mask]
global_xcorr = crosscor(wb_input, wb_ideal)
global_r2 = r2_score(wb_input, wb_ideal)
return slice_scores, global_xcorr, global_r2


class ExtractB0sInputSpec(BaseInterfaceInputSpec):
b0_indices = traits.List()
bval_file = File(exists=True)
Expand Down Expand Up @@ -278,19 +176,3 @@ def concatenate_bvecs(input_files):
if not stacked.shape[1] == 3:
stacked = stacked.T
return stacked


def write_concatenated_fsl_gradients(bval_files, bvec_files, out_prefix):
bvec_file = out_prefix + ".bvec"
bval_file = out_prefix + ".bval"
stacked_bvecs = concatenate_bvecs(bvec_files)
np.savetxt(bvec_file, stacked_bvecs.T, fmt="%.8f", delimiter=" ")
concatenate_bvals(bval_files, bval_file)
return bval_file, bvec_file


def get_vector_nii(data, affine, header):
hdr = header.copy()
hdr.set_data_dtype(np.dtype("<f4"))
hdr.set_intent("vector", (), "")
return nb.Nifti1Image(data[:, :, :, np.newaxis, :].astype(np.dtype("<f4")), affine, hdr)
30 changes: 0 additions & 30 deletions qsirecon/interfaces/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

import nibabel as nb
import numpy as np
from dipy.io import read_bvals_bvecs
from nipype import logging
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
Expand Down Expand Up @@ -276,35 +275,6 @@ def bvec_to_rasb(bval_file, bvec_file, img_file, workdir):
return np.fromstring(out, dtype=float, sep=" ")[:3]


def split_bvals_bvecs(bval_file, bvec_file, img_files, deoblique, working_dir):
"""Split bvals and bvecs into one text file per image."""
if deoblique:
LOGGER.info("Converting oblique-image bvecs to world coordinate reference frame")
bvals, bvecs = read_bvals_bvecs(bval_file, bvec_file)
split_bval_files = []
split_bvec_files = []
for nsample, (bval, bvec, img_file) in enumerate(zip(bvals[:, None], bvecs, img_files)):
bval_fname = fname_presuffix(bval_file, suffix="_%04d" % nsample, newpath=working_dir)
bvec_suffix = "_ortho_%04d" % nsample if not deoblique else "_%04d" % nsample
bvec_fname = fname_presuffix(bvec_file, bvec_suffix, newpath=working_dir)
np.savetxt(bval_fname, bval)
np.savetxt(bvec_fname, bvec)

# re-write the bvec deobliqued, if requested
if deoblique:
rasb = bvec_to_rasb(bval_fname, bvec_fname, img_file, working_dir)
# Convert to image axis orientation
ornt = nb.aff2axcodes(nb.load(img_file).affine)
flippage = np.array([1 if ornt[n] == "RAS"[n] else -1 for n in [0, 1, 2]])
deobliqued_bvec = rasb * flippage
np.savetxt(bvec_fname, deobliqued_bvec)

split_bval_files.append(bval_fname)
split_bvec_files.append(bvec_fname)

return split_bval_files, split_bvec_files


def to_lps(input_img, new_axcodes=("L", "P", "S")):
if isinstance(input_img, str):
input_img = nb.load(input_img)
Expand Down
Loading

0 comments on commit b2660c4

Please sign in to comment.