From d071473252639542ec98bf1b5f8fa13d2d8fff2e Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 9 Aug 2024 11:07:11 -0400 Subject: [PATCH] Start removing QSIPrep-specific code and documentation (#4) * Move qsirecon workflow. * Fix imports. * Update CHANGES and README. * Drop exec_mode. * Change some preprocessing to reconstruction. --- .zenodo.json | 2 +- CHANGES.rst | 497 ----------------- Dockerfile | 2 +- README.rst | 22 +- docs/api.rst | 12 +- docs/citing.rst | 2 +- docs/comparisons.md | 2 +- docs/index.rst | 1 - docs/preprocessing.rst | 902 ------------------------------- docs/usage.rst | 7 +- long_description.rst | 4 +- pyproject.toml | 2 +- qsirecon/cli/parser.py | 4 +- qsirecon/cli/run.py | 14 +- qsirecon/cli/workflow.py | 40 +- qsirecon/config.py | 4 +- qsirecon/tests/test_cli.py | 2 +- qsirecon/workflows/base.py | 638 +++++++++------------- qsirecon/workflows/recon/base.py | 344 ------------ 19 files changed, 290 insertions(+), 2211 deletions(-) delete mode 100644 docs/preprocessing.rst delete mode 100644 qsirecon/workflows/recon/base.py diff --git a/.zenodo.json b/.zenodo.json index a1c9caa7..1b40a96f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -74,7 +74,7 @@ "dMRI", "BIDS" ], - "title": "QSIRecon: An integrative platform for preprocessing and reconstructing diffusion MRI", + "title": "QSIRecon: An integrative platform for reconstructing diffusion MRI", "license": "BSD-3-Clause", "upload_type": "software" } \ No newline at end of file diff --git a/CHANGES.rst b/CHANGES.rst index 8d0ed53b..e69de29b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,497 +0,0 @@ -0.22.0 (July 19, 2024) -====================== - -Continuing the update to current NiPreps best practices, an update to PyAFQ and a few bugfixes. - - * [ENH] Compress recon derivatives (#787) - * [FIX] Make hsvs workflow work (#784) - * [FIX] Use new bundle names (#783) - * [RF] Drop functions to read sidecar JSONs (#760) - * [FIX] FUGUE bug (#769) - * [ENH] Save SIFT2 mu parameter (#774) - * [FIX] Fix the mif2fib workflow (#778) - * [FIX] CI test file checking - * [CI] Update expected AFQ outputs again - * [CI] add optional bundles (#771) - * [ENH] Update dependencies in docker image (#768) - * [ENH] Upgrade pyAFQ to 1.3.2 (#764) - * [RF] Run integration tests with pytest (#763) - * [FIX] API documentation (#748) - * [ENH] Update to NiPreps-style config file (#745) - - -0.21.4 (May 4, 2024) -==================== - - -## What's Changed -The effort to update to current NiPreps standards has begun, plus another bugfix in the TOPUP workflow. - -### Other Changes -* Remove copy of Niworkflows in favor of dependency by @tsalo in https://github.com/PennLINC/qsirecon/pull/709 -* [FIX] stop bizarre argsort behavior in best b=0 by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/744 - - - -0.21.3 (May 2, 2024) -==================== - -## What's Changed - -This update addresses an important coregistration bug (#740), makes CPU Eddy much more usable and fixes a bug in the PyAFQ workflow. - -### Other Changes -* [FIX] PyAFQ was not being written to derivatives by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/738 -* [FIX] use b0 ref from eddy-processed data by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/739 -* [MAINT] remove pypi by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/742 -* [ENH] add parallel argument for topup by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/741 -* [ENH] Allow multithreading in eddy by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/743 - - - -0.21.1 (Apr 25, 2024) -===================== - -There are important software updates in this release, along with a lot of infrastructure improvements. - -## Important Changes - * FSL version 6.0.7.8 is now in qsirecon. This contains 2 [serious bugfixes](https://fsl.fmrib.ox.ac.uk/fsl/docs/#/development/history/changelog-6.0.7.8.md). One has to do with susceptibility-by-volume correction and the other resulted in incorrect CNR values being calculated. - * DSI Studio has been updated to fix a bug in [Neighboring DWI Correlation](https://github.com/frankyeh/DSI-Studio/issues/84). - * You can use `"mporder"` in your eddy config json file and the slice timings will automatically be created and passed to eddy (even if you're concatenating runs) - * Recon workflows can now include a "bundle mapping" and "scalar mapping", where scalars created in individual workflows can be mapped to a template or summarized inside autotrack bundles. This does not do tract profiles or surface mapping - yet. See `qsirecon/data/pipelines/hbcd_scalar_maps.json` for an example. - * The recon derivatives are written approximately according to BEP-016 - -## New Features - -* [ENH] Add workflow to resample recon scalars to template space by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/688 -* Support complex-valued dwidenoising by @tsalo in https://github.com/PennLINC/qsirecon/pull/679 -* [ENH] Add ReconScalars node to recon workflows by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/683 -* Pin current version of pyAFQ (1.3.1) by @arokem in https://github.com/PennLINC/qsirecon/pull/729 -* [ENH] Add TORTOISE Estimator recon workflows by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/674 -* [ENH] Make Eddy's slice2vol much easier to use by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/710 - -## Other Changes -* Apply stylistic changes to `workflows/base.py` by @tsalo in https://github.com/PennLINC/qsirecon/pull/678 -* Pin Nilearn version by @tsalo in https://github.com/PennLINC/qsirecon/pull/687 -* Add a series of infrastructure files from ASLPrep/XCP-D by @tsalo in https://github.com/PennLINC/qsirecon/pull/684 -* Run isort and remove unused imports by @tsalo in https://github.com/PennLINC/qsirecon/pull/690 -* Apply stylistic changes to `workflows/utils.py` by @tsalo in https://github.com/PennLINC/qsirecon/pull/680 -* Remove unused classes flagged by vulture by @tsalo in https://github.com/PennLINC/qsirecon/pull/693 -* [RF] Use Black to reformat package by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/701 - -## Bugfixes -* [FIX] Prevent undecodable terminal output in calc_connectivity by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/711 -* [FIX] save recon anat files by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/702 -* [FIX] revert citeproc for old pandoc by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/736 -* [FIX] recon workflows when --anat-modality T2w was used by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/734 -* [FIX] remove old nodes from csdsi_3dshore.json by @kjamison in https://github.com/PennLINC/qsirecon/pull/733 -* [ENH] Update TORTOISE for lower cuda memory use by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/730 - - - -0.20.0 (Jan 12, 2024) -====================== -Please note there is no corresponding release on PyPI for this version - -## New features -* [ENH] allow topup+drbuddi for hbcd by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/667 - * Adds a 2-stage distortion correction option `--pepolar-method TOPUP+DRBUDDI`, which will run TOPUP -> Eddy -> DRBUDDI -* [ENH] Use UKB processed data as input for recon workflows by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/651 - * This adds the --recon-input-pipeline, which lets you run recon workflows on UKB data -* [ENH] Update to python 3.10 by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/670 - * Hopefully this will address the hang-after-crashing problem in the recent releases - -## Bugfixes/Docs -* DOC: Add SMeisler and JHLegarreta to contributors list by @jhlegarreta in https://github.com/PennLINC/qsirecon/pull/642 -* Fixes typos on FreeSurfer requirements for ss3t hsvs recon by @pcamach2 in https://github.com/PennLINC/qsirecon/pull/414 -* Fix RTD build by @tsalo in https://github.com/PennLINC/qsirecon/pull/652 -* ENH: conform bvals to shells separated by b0_threshold by @cookpa in https://github.com/PennLINC/qsirecon/pull/660 -* [FIX] remove unneeded "method" from tracking by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/641 -* FIX: allow finding of lesion rois by @psadil in https://github.com/PennLINC/qsirecon/pull/659 -* MISC: Remove outdated dsi_studio tracking parameters by @cookpa in https://github.com/PennLINC/qsirecon/pull/668 -* [DOC] Add documentation for dsi_studio_autotrack reconstruction workflow by @valeriejill in https://github.com/PennLINC/qsirecon/pull/669 -* [ENH] Update BIDS validator to 1.8.4 by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/671 - - -0.19.0 (August 10, 2023) -======================== - -Addresses stability issues in the 0.18 releases. Huge improvements to AutoTrack recon workflow -CPU use and improvements in memory use for synthseg and synthstrip - - * [ENH] limit the synths to 1 thread (#608) - * [DOC] fix typo in docs (#606) - * [ENH] Stabilize autotrack performance (#604) - * [CI] Add test for tensor-based head motion correction (#605) - * [FIX] fixes steinhardt computation (#603) - - -0.18.1 (June 26, 2023) -====================== - -Bugfixes since 0.18.0 - -Bugfix: - * [FIX] add btable to merge when averaging outputs (#594) - -0.18.0 (June 9, 2023) -===================== - -No technical changes to the pipeline here, but citations and methods boilerplate have been updated to -reflect the changes in 0.18.0alpha0. - - - -0.18.0alpha0 (May 26, 2023) -=========================== - -First release moving towards 1.0! Please open bug reports if anything suspicious comes up. This release -changes the anatomical workflow significantly, synthstrip and synthseg are used. The recon workflow -"dsi_studio_autotrack" has also been added. - -## What's Changed -* Bump sentry-sdk from 0.13.1 to 1.14.0 by @dependabot in https://github.com/PennLINC/qsirecon/pull/539 -* [ENH] Update FreeSurfer to 7.3.1, dmri-amico to 1.5.4 by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/537 -* WIP: ENH: Make pyAFQ tests faster, add export all by @36000 in https://github.com/PennLINC/qsirecon/pull/534 -* [ENH] move biascorrect so it runs on resampled data by default by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/527 -* [Fix] Fix threading on DRBUDDI interface by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/540 -* [ENH] add CNR to the imageqc.csv by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/541 -* [FIX] pin pandas version to < 2.0.0 by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/543 -* ENH: Replace avscale with non-fsl tools by @jbh1091 in https://github.com/PennLINC/qsirecon/pull/542 -* ENH: Replace fsl applymask by @jbh1091 in https://github.com/PennLINC/qsirecon/pull/544 -* Replace fsl split by @jbh1091 in https://github.com/PennLINC/qsirecon/pull/548 -* [FIX] Update distortion_group_merge.py by @smeisler in https://github.com/PennLINC/qsirecon/pull/555 -* [ENH] Redo anatomical workflow by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/553 -* [FIX] remove pre bids-filter acq type argument by @octomike in https://github.com/PennLINC/qsirecon/pull/557 -* FIX: Replace deprecated `np.int` instances by @smeisler in https://github.com/PennLINC/qsirecon/pull/558 -* [WIP] ENH: 482 remove fsl dependency by @jbh1091 in https://github.com/PennLINC/qsirecon/pull/498 -* [ENH] Update TORTOISE for improved T2w registration by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/564 -* [FIX] T2w anat-modality issues by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/565 -* [FIX] update boost in tortoise by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/569 -* [FIX] connections on multi-anat workflow by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/572 -* [ENH] Update DSI Studio to the latest commit by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/573 -* [ENH] Add DSI Studio AutoTrack recon workflow by @mattcieslak in https://github.com/PennLINC/qsirecon/pull/576 - -## New Contributors -* @dependabot made their first contribution in https://github.com/PennLINC/qsirecon/pull/539 -* @jbh1091 made their first contribution in https://github.com/PennLINC/qsirecon/pull/542 -* @smeisler made their first contribution in https://github.com/PennLINC/qsirecon/pull/555 - -**Full Changelog**: https://github.com/PennLINC/qsirecon/compare/0.17.0...0.18.0alpha0 - - -0.16.1 (October 10, 2022) -========================= - -Adds a critical fix for ABCD-style acquisitions (described in #449). This change forces -TOPUP to use the raw, unprocessed b=0 images from the DWI series and the epi fieldmaps to -estimate distortion. Previously, the most-denoised version of each image was used in -TOPUP. To disable this change and return to the previous behavior, use the -`--denoised-image-sdc` flag. - -Note, **this is a change in the default behavior of QSIRecon!!** - -*Upgrades* - - * Update ITK to 5.3, update ANTs #449 - * Add `--denoised-image-sdc` #465 - - -*Bug fixes* - - * Use safe_load instead of load for yaml #443 - * Add fugue and prelude back to the qsirecon image #463 - - -0.16.0RC2 (June 1, 2022) -======================== - - * Adds multithreading to connectome2tck #429 - -0.16.0RC2 (June 1, 2022) -======================== - -Fixes a naming error in the schaefer 400 atlas #428 - -0.16.0RC1 (May 30, 2022) -======================== - -Major additions to the reconstruction workflows! Most notably PyAFQ is available -as a reconstruction workflow. The default atlases included in QSIRecon have been -updated to include subcortical regions if they weren't already present in the -original atlas. - - * Add PyAFQ reconstruction workflows #398 Credit: @36000 - * Make sure all recon workflows respect omp_nthreads #368 - * Add DKI derivatives #371 - * Properly transform 4D CNR images from Eddy #393 - * Update amico to version 22.4.1 #394 - * Fix concatenation bug #403 credit: @cookpa - * Prevent divide by zero error #405 credit: @cookpa - * Critical Fix, use correct transform to get atlases into T1w space #417 - * Add resampled atlases back into derivatives #418 - * Add connectome2tck exemplar streamlines for mrtrix connectivity workflows #420 - * Update the atlases to include subcortical regions #426 [details here](https://github.com/PennLINC/qsirecon-atlas/blob/main/QSIRecon%20atlases.ipynb) - -0.15.2 (March 3, 2022) DEPRECATED -================================== - -**WARNING** There is an bug in the connectome pipelines that makes the connectivity -matrices unreliable. Do not use this version for connectome estimation. - -Due to persistent difficulties with crashing ODF plots in the reconstruction workflows, -there is now a `--skip-odf-reports` option that will disable the ODF and peak plots -in the html reports. This should only be used once you've run some test workflows -with the reports still enabled, so you know that your ODFs are correctly oriented. - - * Make ODF Plots optional (#364) - * Bugfix: ABCD gradient data for extrapolation (#363) - * Adds `dipy_dki` reconstruction workflow (#366) - - -0.15.1 (February 28, 2022) DEPRECATED -====================================== - -**WARNING** There is an bug in the connectome pipelines that makes the connectivity -matrices unreliable. Do not use this version for connectome estimation. - -A lot of changes in QSIRecon. The big-picture changes are - - 1. The build system was redone so a multistage build is used in a - different repository (https://github.com/PennLINC/qsirecon_build). - The container should be about half as big as the last release. - 2. The way anatomical masks are handled in reconstruction workflows - has been changed so that FreeSurfer data can be incorporated. - 3. FAST-based anatomically-constrained tractography is now deprecated in - QSIRecon. If you're going to use anatomical constraints, they should be - very accurate. The hybrid surface-volume segmentation (HSVS) is - *amazing* and should be considered the default way to use the - MRtrix3/3Tissue workflows. The - [documentation](https://qsirecon.readthedocs.io/en/latest/reconstruction.html) - describes the new built-in workflow names. - 4. The reconstruction workflows have been totally refactored. This won't - affect the outputs of the reconstruction workflows, but will affect - anyone who is using intermediate files from the working directory. - The working directories no longer have those unfortunate `..`'s in - their names. - 5. FSL is updated to 6.0.5.1! - -Since these are a lot of changes, please be vigilant and check your results! -The QSIRecon preprocessing workflows have not changed with this release, but -the dependencies have been upgraded for almost everything. - - * Update FSL to 6.0.5.1 (#334) - * Move ODF plotting to a cli tool so xvfb is handled more robustly (#357) - * Better FreeSurfer license documentation (#355) - * Edited libQt5Core.so.5 so it's loadable in singularity on CentOS (#336) - * Fixed typo in patch2self (#338) - * Inaccurate bids-validator errors were removed (#340) - * Bug in `--recon-input` fixed #286 - * Correct streamline count is reported in the mrtrix connectivity matrices (#330) - * Add option to ingress freesurfer data (#287) - * Add Nature Methods citation to dataset_description.json - * Refactor build system (#341) - * SHORELine bugfixes (#301) - * Bugfix: handle cases where there is only one b=0 (#279) - -0.14.3 (September 16, 2021) -=========================== -Change in behavior in Patch2Self: - - * Updates Patch2Self with optimal parameters (use OLS instead of ridge) - -0.14.2 (July 11, 2021) -====================== -Bugfixes and documentation - - * Updates documentation for containers (#270) - * Fixes a bug when reading fieldmap metadata from datalad inputs (#271) - * Change incorrect option in the documentation (#272) - -0.14.0 (July 2, 2021) -===================== -Adds a new reconstruction workflow for the NODDI model. - - * Adds NODDI reconstruction workflow (#257). Thanks @cookpa! - * Fixes issue with unequal aspect ratios in q-space plots (#266) - -0.13.1 (June 14, 2021) -====================== - - * Adds a flag for a BIDS filter file #256 - * Fixes a bug where --dwi-only is selected along with --intramodal-template - -0.13.0 (May 5, 2021) -==================== -Many bugfixes - - * Fix bug that produced flipped scalar images (#251) - * Added a default working directory to prevent uninterpretable error message (#250) - * Fix a bug in the `dipy_3dshore` reconstruction workflow (#249) - * Remove hardlinking from DSI Studio interfaces (#214) - * Add an option to use a BIDS database directory (#247) - * Fix bug in interactive reports for HCP-style acquisitions (#238) - * Update defaults for `Patch2Self` (#230, #239) - * Remove cmake installer from docker image after compiling ANTS (#229) - -0.13.0RC1 (January 19, 2021) -============================ -This version introduces major changes to the TOPUP/eddy workflow. Feedback would be greatly -appreciated! - - * Added new algorithm for selecting b=0 images for distortion corretion (#202) - * Added the Patch2Self denoising method (#203, credit to @ShreyasFadnavis) - * Documentation has been expanded significantly (#212) - * Boilerplate for DWI preprocessing is greatly expanded (#200) - - -0.12.2 (November 7, 2020) -========================= -Adds options for processing infant dMRI data. Also enables running without a T1w -image. - - * Adds ``--dwi-only`` and ``--infant`` options to QSIRecon. (#177) - - -0.11.0 (August 12, 2020) -======================== -NEW: Workflow defaults have changed. T1w-based spatial normalization is done by -default (disabled by ``--skip-t1-based-spatial-normalization``) and dwi scans -are merged before motion correction by default (disabled by ``--separate-all-dwis``). - - * Deprecate some commandline arguments, change defaults (#168) - * Fix typo in workflow names (#162) - * Fix bug from 0.10.0 where ODFs were not appearing in plots (#160) - - -0.10.0 (August 4, 2020) -======================= - - * Adds support for oblique acquisitions (#156) - - -0.9.0beta1 (June 17, 2020) -========================== - - * Adds support for HCP lifespan sequences - * Introduces --distortion-group-merge option for combining paired scans - -0.8.0 (February 12, 2020) -========================= - - * Removes oblique angles from T1w headers to fix N4 bug (#103) - -0.7.2 (February 4, 2020) -======================== - - * Fixed a bug in b=0 masking when images have high signal intensity in ventricles (#99) - -0.7.1 (January 29, 2020) -======================== - - * Image QC summary data is produced for each output (#95) - * Update DSI Studio (#88) - * Update ANTs (#80) - * Include workflows for ss3t (#82) - * Add some boilerplate to the FSL workflow (#38) - * Reduce the number of calls to N4 (#74, #89) - * Add CUDA capability in the containers (#75) - * Add mrdegibbs and accompanying reports (#58) - * Fix reports graphics (#64) - * Rework the DWI grouping algorithm (#92) - -0.6.7 (January 9 2020) -====================== -This release adds some rather big updates to QSIRecon. - * FSL is updated to version 6.0.3 - * CUDA v9.1 support is added to the image (works with GPUS in Docker and Singularity) - * A new robust b=0 masking algorith is introduced. - -0.6.5 (Nov 21, 2019) -==================== - * Improved handling of Freesurfer path (#50) - * Better logic in commandline argument checking (#50, #62) - * More robust brain masking for b=0 reference images (#73) - * Bugfix for reverse phase encoding directon dwi series (#68) - * Bugfix for warping eddy's CNR output (#72) - -0.6.4, 0.6.4-1 (Nov 11, 2019) -============================== - * IMPORTANT: commandline call changed to use official BIDS App - * eddy will use multiple cores if available - * Fixed bug in sentry interaction - - -0.6.2, 0.6.3RC1, 0.6.3RC2 (October 27, 2019) -============================================ - - * Bugfix: masking was not working on eddy. - * Bugfix: static versioning was not workign in the container. - * New graphics in the documentation. - * Use BSpline Interpolation if --output-resolution is higher than the input resolution. - - -0.6.0RC1, 0.6.2 (October 13, 2019) -================================== - -An issue was discovered in how voxel orientation interacts with TOPUP/eddy and outside -fieldmaps. Unless everything is in LAS+ prior to going into TOPUP/eddy, the warps are -incorrectly applied at the end of eddy. This resulted in fieldmap unwarping reports that -looked good but a final output that is bizarrely warped. Additionally, GRE fieldmaps would -result in outputs being under-unwarped. To fix all of these, TOPUP (if PEPOLAR fieldmaps are -being used) and eddy occur in LAS+, then their outputs are converted to LPS+ for GRE fieldmaps, -SyN. The rest of the pipeline happens in LPS+, like the SHORELine version. - - * Update installation method to match fMRIPrep - * Add CI tests for reconstruction workflows - * Make the ``--sloppy`` option affect the reconstruction workflows - * Fixes bug in 3dSHORE reconstruction (incorrect scaling) - * CRITICAL bug fix: convert everything to LAS+ if eddy is going to be used - * Added built-in reconstruction workflows - * Added Brainnetome, AICHA and the remaining Schaefer atlases - - -0.5.1, 0.5.1a, 0.5.2 (September 10, 2019) -========================================== - - * Address issues in Nipype causing random crashes - - -0.5.0 (August 11, 2019) -======================= - - * Use antsMultiVariateTemplateConstruction2.sh to make a b=0 template across scan groups - * Control the number of template iterations and deformation model with - ``--intramodal_template_iters`` and ``--intramodal_template_transform``. - -0.4.6 (July 23, 2019) -===================== - - * More documentation updates - * MSD calculated for MAPMRI - -0.4.5 (July 22, 2019) -===================== - - * Scalar outputs from MAPMRI - -0.4.4 (July 19, 2019) -====================== - - * Default eddy configuation changed to not use CUDA by default. - * Valerie added content to documentation - -0.4.3 (July 18, 2019) -===================== - -FSL tools are used to match SHORELine motion parameters to those from eddy. - - * Fieldcoefs are calculated from PEPOLAR and GRE fieldmaps and sent to TOPUP - * Motion estimates from SHORELine match eddy - -0.4.0 (June 7, 2019) -==================== - -Add workflows for eddy and TOPUP. - - * Adds eddy tests on CircleCI. diff --git a/Dockerfile b/Dockerfile index 61ce3066..4a131961 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,7 +26,7 @@ ARG VCS_REF ARG VERSION LABEL org.label-schema.build-date=$BUILD_DATE \ org.label-schema.name="qsirecon" \ - org.label-schema.description="qsirecon - q Space Images preprocessing tool" \ + org.label-schema.description="qsirecon - q Space Images postprocessing tool" \ org.label-schema.url="http://qsirecon.readthedocs.io" \ org.label-schema.vcs-ref=$VCS_REF \ org.label-schema.vcs-url="https://github.com/pennlinc/qsirecon" \ diff --git a/README.rst b/README.rst index d43d23b7..f30c4582 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,6 @@ .. include:: links.rst -QSIRecon: Preprocessing and analysis of q-space images +QSIRecon: Reconstruction of preprocessed q-space images ======================================================= .. image:: https://img.shields.io/badge/Source%20Code-pennlinc%2Fqsirecon-purple @@ -36,34 +36,18 @@ About ``qsirecon`` configures pipelines for processing diffusion-weighted MRI (dMRI) data. The main features of this software are - 1. A BIDS-app approach to preprocessing nearly all kinds of modern diffusion MRI data. - 2. Automatically generated preprocessing pipelines that correctly group, distortion correct, - motion correct, denoise, coregister and resample your scans, producing visual reports and - QC metrics. - 3. A system for running state-of-the-art reconstruction pipelines that include algorithms + 1. A system for running state-of-the-art reconstruction pipelines that include algorithms from Dipy_, MRTrix_, `DSI Studio`_ and others. - 4. A novel motion correction algorithm that works on DSI and random q-space sampling schemes .. image:: https://github.com/PennLINC/qsirecon/raw/main/docs/_static/workflow_full.png -.. _preprocessing_def: - -Preprocessing -~~~~~~~~~~~~~~~ - -The preprocessing pipelines are built based on the available BIDS inputs, ensuring that fieldmaps -are handled correctly. The preprocessing workflow performs head motion correction, susceptibility -distortion correction, MP-PCA denoising, coregistration to T1w images, spatial normalization -using ANTs_ and tissue segmentation. - - .. _reconstruction_def: Reconstruction ~~~~~~~~~~~~~~~~ -The outputs from the :ref:`preprocessing_def` pipelines can be reconstructed in many other +The outputs from BIDS-compliant preprocessing pipelines can be reconstructed in many other software packages. We provide a curated set of :ref:`recon_workflows` in ``qsirecon`` that can run ODF/FOD reconstruction, tractography, Fixel estimation and regional connectivity. diff --git a/docs/api.rst b/docs/api.rst index 788cac95..83c5c692 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -16,17 +16,6 @@ Internal configuration system Library API *********** -Preprocessing Workflows ------------------------ - -.. toctree:: - :glob: - - api/qsirecon.workflows.base - api/qsirecon.workflows.anatomical - api/qsirecon.workflows.dwi - api/qsirecon.workflows.fieldmap - Reconstruction Workflows ------------------------ @@ -34,6 +23,7 @@ Reconstruction Workflows .. toctree:: :glob: + api/qsirecon.workflows.base api/qsirecon.workflows.recon diff --git a/docs/citing.rst b/docs/citing.rst index acf4f62e..e188bf95 100644 --- a/docs/citing.rst +++ b/docs/citing.rst @@ -5,7 +5,7 @@ =============== Citing qsirecon =============== -.. [Cieslak2022] Cieslak, M., Cook, P.A., He, X. et al. (2021) QSIRecon: an integrative platform for preprocessing and reconstructing diffusion MRI data. Nat Methods 18 (775–778) doi: https://doi.org/10.1038/s41592-021-01185-5 +.. [Cieslak2022] Cieslak, M., Cook, P.A., He, X. et al. (2021) QSIPrep: an integrative platform for preprocessing and reconstructing diffusion MRI data. Nat Methods 18 (775–778) doi: https://doi.org/10.1038/s41592-021-01185-5 .. [Fonov2011] Fonov VS, Evans AC, Botteron K, Almli CR, McKinstry RC, Collins DL and BDCG, Unbiased average age-appropriate atlases for pediatric studies, NeuroImage 54(1), 2011 diff --git a/docs/comparisons.md b/docs/comparisons.md index 0f2ee5c2..d63706e8 100644 --- a/docs/comparisons.md +++ b/docs/comparisons.md @@ -21,7 +21,7 @@ their current feature sets. These other ## Preprocessing -| | QSIRecon | Tractoflow | PreQual | MRtrix3_connectome | dMRIPrep | +| | QSIPrep | Tractoflow | PreQual | MRtrix3_connectome | dMRIPrep | | --------------------------------------------------- | :----------------------: | :--------------------: | :---------------------: | :----------------: | :--------------: | | BIDS App | ✔ | ✔ | ✘ | ✔ | ✔ | | Gradient direction sanity check | Q-form matching | ✘ | `dwigradcheck` | ✘ | ✘ | diff --git a/docs/index.rst b/docs/index.rst index f2738476..88fe6fe3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,6 @@ Contents installation quickstart usage - preprocessing reconstruction contributors citing diff --git a/docs/preprocessing.rst b/docs/preprocessing.rst deleted file mode 100644 index 5c61e547..00000000 --- a/docs/preprocessing.rst +++ /dev/null @@ -1,902 +0,0 @@ -.. include:: links.rst - -.. _preprocessing: - -Preprocessing -============= - -Building a pipeline --------------------- - -QSIRecon builds a pipeline based on your BIDS inputs. In general the pipeline will incorporate -all the data it knows how to handle (i.e. fieldmaps, dMRI and anatomical data) automatically. -There may be cases where you want to change the default behavior, particularly in regard to - - 1. :ref:`merging` - 2. :ref:`merge_denoise` - 3. Head motion correction, either - - a. :ref:`fsl_wf` - b. :ref:`dwi_hmc` - - 4. :ref:`dwi_only` - - -.. _merging: - -Merging multiple scans from a session -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For q-space imaging sequences it is common to have multiple separate scans to -acquire the entire sampling scheme. These scans get aligned and merged into -a single DWI series before reconstruction. It is also common to collect -a DWI scan (or scans) in the reverse phase encoding direction to use for -susceptibility distortion correction (SDC). - -This creates a number of possible scenarios for preprocessing your DWIs. These -scenarios can be controlled by the ``--separate_all_dwis`` argument. If your study -has multiple sessions, DWI scans will *never* be combined across sessions. -Merging only occurs within a session. - -If ``--separate-all-dwis`` is present in the commandline call, each dwi -scan in the ``dwi`` directories will be processed independently. You will have one -preprocessed output per each DWI file in your input. - -Otherwise (default) the DWI scans will be merged (i.e. their images will be concatenated). -The merging affects the pipeline at different stages. If all DWIs in a session -are in the same PE direction, they will be merged into a single series. If there are -two PE directions detected in the DWI scans and ``'fieldmaps'`` is not in ``ignore``, -images are combined according to their PE direction, and their b0 reference images are used to -perform SDC. Further complicating this is the FSL workflow, which combines distortion correction -with eddy/motion correction and will merge scans with different PE directions. - -If you have some scans you want to combine and others you want to preprocess separately, -you can call qsirecon more than once with BIDS filters to process the different scans. - -.. _bids_filters: - -Using BIDS filters -^^^^^^^^^^^^^^^^^^^ - -BIDS filters allow users to filter the set of images available to QSIRecon at run -time. BIDS filters should be stored in a json file and passed to QSIRecon with -the ``--bids-filter-file`` option. -Filters modify "queries", which are used to find data for each data type. -NOTE: this is illustrating how modalities are queried in general, and is not the format -of the file you will send to ``--bids-filter-file``. The queries in QSIRecon are:: - - { - "fmap": {"datatype": "fmap"}, - "sbref": {"datatype": "func", "suffix": "sbref"}, - "flair": {"datatype": "anat", "suffix": "FLAIR"}, - "t2w": {"datatype": "anat", "suffix": "T2w"}, - "t1w": {"datatype": "anat", "suffix": "T1w"}, - "roi": {"datatype": "anat", "suffix": "roi"}, - "dwi": {"datatype": "dwi", "suffix": "dwi"} - } - -Each query has several "entities", which can be modified by filters. The list of -supported entities is `here -`__. -To filter data, modify the queries by changing one or more of the supported -entities in the BIDS filter file. The general format of the filter file is:: - - { - "query": { "entity": "value" } - } - -The entities specified in the filter file are added to the queries, so you only -need to include entities you want to use for filtering. For example, this could -be the contents of a valid BIDS filter file:: - - { - "t1w": { "session": "MR1" }, - "dwi": { "session": "MR1", "run": "1" } - } - -this modifies the "t1w" and "dwi" queries, and filters both T1w and DWI scans to -select session "MR1". It also filters on the run number for DWI scans only. - -Multiple runs can be selected by passing arrays. For example:: - - { - "dwi": { "run": [2,3] } - } - -filters the "dwi" query for runs 2 and 3. - -You can enable regular expressions for more detailed filtering, for example:: - - { - "t1w": { "acquisition": "(?i)mprage", "regex_search": "true" }, - } - -will do a case-insensitive match of "mprage" within the "t1w" query. - -.. _merge_denoise: - -Denoising and Merging Images -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The user can decide whether to do certain preprocessing steps and, if so, -whether they are performed *before* or *after* the DWI series are -concatenated. Specifically, image denoising (using ``dwidenoise`` or -``patch2self``) can be disabled with ``--denoise-method none``. Gibbs -unringing (using ``mrdegibbs``) is disabled by default but can be enabled -with ``--unringing-method mrdegibbs``. B1 bias field correction is applied by -default (using ``dwibiascorrect``) and can be disabled with the -``--dwi-no-biascorr`` option. The intensity of b=0 images is harmonized -across scans (i.e. scaled to an average value) by default, but this can be -turned off using ``--dwi-no-b0-harmonization``. - -Together, denoising (MP-PCA or patch2self), Gibbs unringing B1 bias field -correction and b=0 intensity normalization are referred to as *denoising* in -QSIRecon. Each of these image processing operations has assumptions about its -inputs and changes the distribution of noise in its outputs. Although the -inclusion of each operation can be decided by the user, the order in which -they are applied relative to one another is fixed. MP-PCA or patch2self are -applied directly to the BIDS inputs, which should be uninterpolated and as -"raw" as possible. Although Gibbs unringing should be performed on "raw" -data, it is recommended in the MRtrix3 documentation to apply MP-PCA before -Gibbs unringing. B1 bias field correction and b=0 intensity harmonization -do not have as specific requirements about their inputs so are run last. - -The last, and potentially very important decision, is whether the denoising -operations are applied to each input DWI series individually or whether the -denoising operations are applied to the concatenated input DWI files. At -present, there is little data to guide this choice. The more volumes -available, the more data MP-PCA/patch2self have to work with. However, if -there if the head is in a vastly different location in different scans, -denoising might be impacted in unpredictable ways. - -Consider MP-PCA. If a voxel contains CSF in one DWI series and the subject -repositions their head between scans so that the voxel contains corpus -callosum in the next DWI series, the non-noise signal will be very different -in the two series. Similarly, if the head is repositioned different areas -will be closer to the head coil and therefore be inconsistently affected by -B1 bias field. Similar problems can also occur *within* a DWI series due to -subject head motion, but these methods have been shown to work well even in -the presence of within-scan head movement. If the head position changes -across scans is of a similar magnitude to that of within-scan head motion, it -is likely fine to use the ``--denoise-after-combining`` option. To gauge how -much between-scan motion occurred, users can inspect the :ref:`qc_data` to see -whether Framewise Displacement is large where a new series begins. - -By default, the scans in the same warped space are individually denoised before -they are concatenated. When warped groups are concatenated an additional b=0 -image intensity normalization is performed. - - - - - -Preprocessing HCP-style -^^^^^^^^^^^^^^^^^^^^^^^ - -QSIRecon can be configured to produce a very similar pipeline to the HCP dMRI pipelines. -HCP and HCP-Lifespan scans acquire complete multi-shell sequences in opposing phase -encoding directions, making them a special case where :ref:`sdc_pepolar` are used -and the corrected images from both PE directions are averaged at the end. To produce -output from ``qsirecon`` that is directly comparable to the HCP dMRI pipeline you -will want to include:: - - --distortion-group-merge average \ - --combine-all-dwis \ - -If you want to disable the image pair averaging and get a result with twice as -many images, you can substitute ``average`` with ``concat``. - - -.. _outputs: - -Outputs of qsirecon -------------------- - -qsirecon generates three broad classes of outcomes: - - 1. **Visual QA (quality assessment) reports**: - one :abbr:`HTML (hypertext markup language)` per subject, - depicting images that provide a sanity check for each step of the pipeline. - - 2. **Pre-processed imaging data** such as anatomical segmentations, realigned and resampled - diffusion weighted images and the corresponding corrected gradient files in FSL and MRTrix - format. - - 3. **Additional data for subsequent analysis**, for instance the transformations - between different spaces or the estimated head motion and model fit quality calculated - during model-based head motion correction. - - 4. **Quantitative QA**: - A single-line csv file per subject summarizing subject motion, coregistration quality and - image quality. - - -Visual Reports -^^^^^^^^^^^^^^^ - -qsirecon outputs summary reports, written to ``/qsirecon/sub-.html``. These reports provide a quick way to -make visual inspection of the results easy. One useful graphic is the -animation of the q-space sampling scheme before and after the pipeline. Here -is a sampling scheme from a DSI scan: - -.. figure:: _static/sampling_scheme.gif - :scale: 75% - - A Q5 DSI sampling scheme before (left) and after (right) preprocessing. - This is useful to confirm that the gradients have indeed been rotated and - that head motion correction has not disrupted the scheme extensively. - - -Preprocessed data (qsirecon *derivatives*) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are additional files, called "Derivatives", written to -``/qsirecon/sub-/``. - -Derivatives related to T1w files are nearly identical to those produced by ``FMRIPREP`` and -can be found in the ``anat`` subfolder: - -- ``*T1w_brainmask.nii.gz`` Brain mask derived using ANTs' ``antsBrainExtraction.sh``. -- ``*T1w_class-CSF_probtissue.nii.gz`` -- ``*T1w_class-GM_probtissue.nii.gz`` -- ``*T1w_class-WM_probtissue.nii.gz`` tissue-probability maps. -- ``*T1w_dtissue.nii.gz`` Tissue class map derived using FAST. -- ``*T1w_preproc.nii.gz`` Bias field corrected T1w file, using ANTS' N4BiasFieldCorrection -- ``*T1w_space-MNI152NLin2009cAsym_brainmask.nii.gz`` Same as ``_brainmask`` above, but in MNI space. -- ``*T1w_space-MNI152NLin2009cAsym_class-CSF_probtissue.nii.gz`` -- ``*T1w_space-MNI152NLin2009cAsym_class-GM_probtissue.nii.gz`` -- ``*T1w_space-MNI152NLin2009cAsym_class-WM_probtissue.nii.gz`` Probability tissue maps, transformed into MNI space -- ``*T1w_space-MNI152NLin2009cAsym_dtissue.nii.gz`` Same as ``_dtissue`` above, but in MNI space -- ``*T1w_space-MNI152NLin2009cAsym_preproc.nii.gz`` Same as ``_preproc`` above, but in MNI space -- ``*T1w_space-MNI152NLin2009cAsym_target-T1w_warp.h5`` Composite (warp and affine) transform to map from MNI to T1 space -- ``*T1w_target-MNI152NLin2009cAsym_warp.h5`` Composite (warp and affine) transform to transform T1w into MNI space - -.. Note: - These are in LPS+ orientation, so are not identical to FMRIPREP's anatomical outputs - -Derivatives related to diffusion images are in the ``dwi`` subfolder. - -- ``*_confounds.tsv`` A tab-separated value file with one column per calculated confound and one row per timepoint/volume - -Volumetric output spaces include ``T1w`` (default) and ``MNI152NLin2009cAsym``. - -- ``*dwiref.nii.gz`` The b0 template -- ``*desc-brain_mask.nii.gz`` The generous brain mask that should be reduced probably -- ``*desc-preproc_dwi.nii.gz`` Resampled DWI series including all b0 images. -- ``*desc-preproc_dwi.bval``, ``*desc-preproc_dwi.bvec`` FSL-style bvals and bvecs files. - *These will be incorrectly interpreted by MRTrix, but will work with DSI Studio and Dipy.* - Use the ``.b`` file for MRTrix. -- ``desc-preproc_dwi.b`` The gradient table to import data into MRTrix. This and the - ``_dwi.nii.gz`` can be converted directly to a ``.mif`` file using the ``mrconvert -grad _dwi.b`` - command. -- ``*bvecs.nii.gz`` Each voxel contains a gradient table that has been adjusted for local - rotations introduced by spatial warping. -- ``*cnr.nii.gz`` Each voxel contains a contrast-to-noise model defined as the variance of the - signal model divided by the variance of the error of the signal model. - - -.. _dwi_confounds: - -Confounds -^^^^^^^^^^^^ - -See implementation on :func:`~qsirecon.workflows.dwi.confounds.init_dwi_confs_wf`. - - -For each DWI processed by qsirecon, a -``/qsirecon/sub-/func/sub-_task-_run-_confounds.tsv`` -file will be generated. These are :abbr:`TSV (tab-separated values)` tables, -which look like the example below:: - - framewise_displacement trans_x trans_y trans_z rot_x rot_y rot_z hmc_r2 hmc_xcorr original_file grad_x grad_y grad_z bval - - n/a -0.705 -0.002 0.133 0.119 0.350 0.711 0.941 0.943 sub-abcd_dwi.nii.gz 0.000 0.000 0.000 0.000 - 16.343 -0.711 -0.075 0.220 0.067 0.405 0.495 0.945 0.946 sub-abcd_dwi.nii.gz 0.000 0.000 0.000 0.000 - 35.173 -0.672 -0.415 0.725 0.004 0.468 1.055 0.756 0.766 sub-abcd_dwi.nii.gz -0.356 0.656 0.665 3000.000 - 45.131 0.021 -0.498 1.046 0.403 0.331 1.400 0.771 0.778 sub-abcd_dwi.nii.gz -0.935 0.272 0.229 3000.000 - 37.506 -0.184 0.117 0.723 0.305 0.138 0.964 0.895 0.896 sub-abcd_dwi.nii.gz -0.187 -0.957 -0.223 2000.000 - 16.388 -0.447 0.020 0.847 0.217 0.129 0.743 0.792 0.800 sub-abcd_dwi.nii.gz -0.111 -0.119 0.987 3000.000 - -The motion parameters come from the model-based head motion estimation -workflow. The ``hmc_r2`` and ``hmc_xcorr`` are whole-brain r^2 values and -cross correlation scores (using the ANTs definition) between the -model-generated target image and the motion-corrected empirical image. The -final columns are not really confounds, but book-keeping information that -reminds us which 4d DWI series the image originally came from and what -gradient direction (``grad_x``, ``grad_y``, ``grad_z``) and gradient strength -``bval`` the image came from. This can be useful for tracking down -mostly-corrupted scans and can indicate if the head motion model isn't -working on specific gradient strengths or directions. - -.. _qc_data: - -Quality Control data -^^^^^^^^^^^^^^^^^^^^^ - -A single-line csv file (``desc-ImageQC_dwi.csv``) is created for each output -image. This file is particularly useful for comparing the relative quality -across subjects before deciding who to include in a group analysis. The -columns in this file come from DSI Studio's QC calculation and is described -in [Yeh2019]_. Columns prefixed by ``raw_`` reflect QC measurements from the -data before preprocessing. Columns prefixed by ``t1_`` or ``mni_`` contain QC -metrics calculated on the preprocessed data. Motion parameter summaries are -also provided, such as the mean and max of framewise displacement -(``mean_fd``, ``max_fd``). The max and mean absolute values for translation -and rotation are ``max_translation`` and ``max_rotation`` and the maxima of -their derivatives are in ``max_rel_translation`` and ``max_rel_rotation``. -Finally, the difference in spatial overlap between the anatomical mask and -the anatomical brain mask and the DWI brain mask is calculated using the Dice -distance in ``t1_dice_distance`` and ``mni_dice_distance``. - -Confounds and "carpet"-plot on the visual reports -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -fMRI has been using a "carpet" visualization of the -:abbr:`BOLD (blood-oxygen level-dependent)` time-series (see [Power2016]_), -but this type of plot does not make sense for DWI data. Instead, we plot -the cross-correlation value between each raw slice and the HMC model signal -resampled into that slice. -This plot is included for each run within the corresponding visual report. -Examples of these plots follow: - - -.. figure:: _static/sub-abcd_carpetplot.svg - :scale: 100% - - For SHORELine higher scores appear more yellow, while lower scores - are more blue. Not all slices contain the same number of voxels, - so the number of voxels in the slice is represented in the color bar - to the left of the image plot. The more yellow the pixel, the more - voxels are present in the slice. Purple pixels reflect slices with fewer - brain voxels. - -.. figure:: _static/sub-pnc_carpetplot.png - :scale: 40% - - For eddy slices with more outliers appear more yellow, while fewer - outliers is more blue. - -.. _workflow_details: - -Preprocessing pipeline details ------------------------------- - -``qsirecon`` adapts its pipeline depending on what data and metadata are -available and are used as the input. - - -Processing the *Subject Anatomical Reference* T1w or T2w images -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:func:`qsirecon.workflows.anatomical.init_anat_preproc_wf` - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.anatomical import init_anat_preproc_wf - wf = init_anat_preproc_wf(omp_nthreads=1, - reportlets_dir='.', - output_dir='.', - dwi_only=False, - infant_mode=False, - template='MNI152NLin2009cAsym', - output_spaces=['T1w'], - output_resolution=1.25, - skull_strip_template='OASIS', - force_spatial_normalization=True, - freesurfer=True, - longitudinal=False, - debug=False, - hires=True, - num_t1w=1) - -As of version 0.18 QSIRecon has been changed to be very flexible with anatomical -processing workflows. Versions prior to 0.18 were focused on the T1w images and -provided only 2 possible templates. Version 0.18 introduces 2 terms that -simplify the anatomical processing and open up new opportunities for choosing -a template. First, is the *subject anatomical reference* and the second is the -*template anatomical reference*. - -As a dMRI-focused tool, QSIRecon only uses an *anatomical reference* image for an -extra-robust brain extraction and to get a tissue segmentation for visualizing -the susceptibility distortion correction results. The anatomical worflows -leverage fast and powerful tools from FreeSurfer, namely ``SynthStrip`` and -``SynthSeg`` to perform brain extraction and segmentation. - -Many imaging protocols acquire some high-resolution, undistorted anatomical -reference scans. QSIRecon can use either T1-weighted ot T2-weighted 3D images as -the *anatomical reference*. To specify which contrast you'd like to use for your -anatomical reference, be sure to specify ``--anatomical-contrast`` as either -``T1w``, ``T2w`` or ``none``. Specifying ``none`` is equivalent to the previous -option of ``--dwi-only``, where no anatomical images are used from the input -data and the AC-PC alignment is based either on the adult or infant MNI -templates. - -We discourage the use of ``--anatomical-contrast none`` in most cases. It is -very rare to have dMRI data without any kind of T1w or T2w image from the -same individual. - -Regardless of whether you are using T1w or T2w images as your anatomical reference, -the following steps will be applied to the anatomical reference images: -Processing the *Anatomical Reference* images - - 1. The anatomical sub-workflow begins by constructing an average image by - :ref:`conforming ` all found T1w or T2w images to LPS+ - orientation and a common voxel size. - 2. If there are multiple images of the preferred anatomical contrast, they will - be bias corrected using N4 and aligned to one another. If ``--longitudinal`` - is specified they will be unbiasedly registered to each other using ANTs. - Otherwise all the images are registered to the first image (see - `Longitudinal T1w processing`_). - 3. Brain extraction is performed using ``SynthStrip``. - -.. figure:: _static/brainextraction_t1.svg - :scale: 100% - - Brain extraction - - 4. Brain tissue segmentation is performed using ``SynthStrip`` - -.. figure:: _static/segmentation.svg - :scale: 100% - - Brain tissue segmentation. - - 5. Rigid alignment to the *subject anatomical reference*. This can take - two forms. If the *template anatomical reference* is a standard - template, this will effectively AC-PC align the output data. If the - *template anatomical reference* is another scan of the same - individual (e.g. the output of fmriprep), the output will be aligned - to this externam image. - - 6. If the template anatomical reference is not a native scan, then ANTs' - ``antsRegistration`` will register the subject images to the template - anatomical reference in a multiscale, mutual-information based, nonlinear - registration scheme. - - -.. _t1preproc_steps: - -Handling Lesions and abnormalities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When processing images from patients with focal brain lesions (e.g. stroke, tumor -resection), it is possible to provide a lesion mask to be used during spatial -normalization to MNI-space [Brett2001]_. -ANTs will use this mask to minimize warping of healthy tissue into damaged -areas (or vice-versa). -Lesion masks should be binary NIfTI images (damaged areas = 1, everywhere else = 0) -in the same space and resolution as the T1 image, and follow the naming convention specified in -`BIDS Extension Proposal 3: Common Derivatives `_ -(e.g. ``sub-001_T1w_label-lesion_roi.nii.gz``). -This file should be placed in the ``sub-*/anat`` directory of the BIDS dataset -to be run through ``qsirecon``. - -.. figure:: _static/T1MNINormalization.svg - :scale: 100% - - Animation showing T1w to MNI normalization - - -Longitudinal T1w processing -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the case of multiple T1w images (across sessions and/or within a session), -T1w images are merged into a single template image using FreeSurfer's -``mri_robust_template``. This template may be *unbiased*, or equidistant from -all source images, or aligned to the first image (determined lexicographically -by session label). For two images, the additional cost of estimating an unbiased -template is trivial and is the default behavior, but, for greater than two -images, the cost can be a slowdown of an order of magnitude. -Therefore, in the case of three or more images, ``qsirecon`` constructs -templates aligned to the first image, unless passed the ``--longitudinal`` -flag, which forces the estimation of an unbiased template. - -.. note:: - - The preprocessed T1w image defines the ``T1w`` space. - In the case of multiple T1w images, this space may not be precisely aligned - with any of the original images. - Reconstructed surfaces and functional datasets will be registered to the - ``T1w`` space, and not to the input images. - - -Processing Infant Data -^^^^^^^^^^^^^^^^^^^^^^ - -When processing infant DWI data, users may add ``--infant`` to their -QSIRecon call. This will swap the default MNI152NLin2009cAsym template -with the MNI infant template. It is highly advisable to also include -``--dwi-only`` to avoid problems with T1w skull-stripping. - -.. _dwi_overview: - -DWI preprocessing -^^^^^^^^^^^^^^^^^ - -:func:`qsirecon.workflows.dwi.base.init_dwi_preproc_wf` - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.dwi.base import init_dwi_preproc_wf - wf = init_dwi_preproc_wf(dwi_only=False, - scan_groups={'dwi_series': ['fake.nii'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j'}, - source_file='/data/sub-1/dwi/sub-1_dwi.nii.gz', - output_prefix='', - ignore=[], - b0_threshold=100, - motion_corr_to='iterative', - b0_to_t1w_transform='Rigid', - hmc_model='3dSHORE', - hmc_transform='Rigid', - shoreline_iters=2, - impute_slice_threshold=0, - eddy_config=None, - reportlets_dir='.', - output_spaces=['T1w'], - dwi_denoise_window=5, - denoise_method='dwidenoise', - unringing_method='mrdegibbs', - b1_biascorr_stage='final', - no_b0_harmonization=False, - denoise_before_combining=True, - template='MNI152NLin2009cAsym', - output_dir='.', - omp_nthreads=1, - fmap_bspline=False, - fmap_demean=True, - use_syn=True, - force_syn=False, - low_mem=False, - sloppy=True, - layout=None) - - -Preprocessing of :abbr:`DWI (Diffusion Weighted Image)` files is -split into multiple sub-workflows described below. - -.. _fsl_wf: - -Head-motion / Eddy Current/ Distortion correction (FSL) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:func:`qsirecon.workflows.dwi.fsl.init_fsl_hmc_wf` - -FSL provides the most widely-used tools for head motion correction, eddy -current correction and susceptibility distortion correction. These tools -are designed to work directly with one another and share a file format that -is unique to their workflow. - -To ensure that the FSL workflow works as intended, all inputs are forced into -to the FSL standard orientation. The head motion, eddy current and suscebtibility -distortion corrections are applied at the end of ``eddy``, which means that -there will be *two* total interpolations in the FSL-based qsirecon workflow, as -the final interpolation into T1w/AC-PC space is done externally in ANTs. - -The FSL workflow can take three different forms. - - 1. No distortion correction - 2. PEPOLAR distortion correction (using topup) - 3. Fieldmap-based distortion correction - -No distortion correction -++++++++++++++++++++++++ - -If there are no fieldmap images or the user has specified ``--ignore fieldmaps``, -no distortion correction will occur. In this case, only head motion correction -and eddy current correction will be performed. The workflow looks like this: - -.. workflow:: - :graph2use: colored - :simple_form: yes - - from qsirecon.workflows.dwi.fsl import init_fsl_hmc_wf - wf = init_fsl_hmc_wf({'dwi_series':['dwi1.nii', 'dwi2.nii'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j'}, - b0_threshold=100, - impute_slice_threshold=0., - fmap_demean=False, - fmap_bspline=False, - eddy_config=None, - source_file='/path/to/dwi/sub-X_dwi.nii.gz', - omp_nthreads=1) - - -PEPOLAR (TOPUP) Distortion Correction -+++++++++++++++++++++++++++++++++++++ - -When images with different phase encoding directions are available, either -dedicated fieldmaps (in the ``fmap/`` directory) or DWI series -(in the ``dwi/`` directory), example b=0 images can be - -.. workflow:: - :graph2use: colored - :simple_form: yes - - from qsirecon.workflows.dwi.fsl import init_fsl_hmc_wf - wf = init_fsl_hmc_wf({'dwi_series': [ - '.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}, - b0_threshold=100, - impute_slice_threshold=0., - fmap_demean=False, - fmap_bspline=False, - eddy_config=None, - source_file='/path/to/dwi/sub-X_dwi.nii.gz', - omp_nthreads=1) - - -Fieldmap-based Distortion Correction -++++++++++++++++++++++++++++++++++++ - -If a GRE fieldmap or SyN-based fieldmapless distortion correction -are detected, these will be performed on the outputs of ``eddy``. -For details see :ref:`dwi_sdc`. - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.dwi.fsl import init_fsl_hmc_wf - wf = init_fsl_hmc_wf({'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - 'suffix': 'phasediff'}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - b0_threshold=100, - impute_slice_threshold=0., - fmap_demean=False, - fmap_bspline=False, - eddy_config=None, - source_file='/path/to/dwi/sub-X_dwi.nii.gz', - omp_nthreads=1) - -.. _configure_eddy: - -Configuring ``eddy`` -+++++++++++++++++++++ - -``eddy`` has many configuration options. Instead of making these commandline -options, you can specify them in a JSON file and pass that to ``qsirecon`` -using the ``--eddy-config`` option. An example (default) eddy config json can -be viewed or downloaded `here -`__ - - - -.. _dwi_hmc: - -Head-motion estimation (SHORELine) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:func:`qsirecon.workflows.dwi.hmc.init_dwi_hmc_wf` - - -A long-standing issue for q-space imaging techniques, particularly DSI, has -been the lack of motion correction methods. DTI and multi-shell HARDI have -had ``eddy_correct`` and ``eddy`` in FSL, but DSI has relied on aligning the -interleaved b0 images and applying the transforms to nearby non-b0 images. - -``qsirecon`` introduces a method for head motion correction that iteratively -creates target images based on ``3dSHORE`` or ``MAPMRI`` fits. -First, all b0 images are aligned to a midpoint b0 image (or the first b0 image -if ``hmc_align_to="first"``) and each non-b0 image is transformed along with -its nearest b0 image. - -Then, for each non-b0 image, a ``3dSHORE`` or ``MAPMRI`` -model is fit to all the other images with that image left out. The model is then -used to generate a target signal image for the gradient direction and magnitude -(i.e. q-space coordinate) of the left-out image. The left-out image is registered -to the generated target -signal image and its vector is rotated accordingly. A new model is fit on the -transformed images and their rotated vectors. The leave-one-out procedure is -then repeated on this updated DWI and gradient set. - -If ``"none"`` is specified as the hmc_model, then only the b0 images are used -and the non-b0 images are transformed based on their nearest b0 image. This -is probably not a great idea. - -Susceptibility distortion correction is run as part of this pipeline to be -consistent with the ``TOPUP``/``eddy`` workflow. - -Ultimately a list of 6 (or 12)-parameters per time-step is written and -fed to the :ref:`confounds workflow `. These are used to -estimate framewise displacement. Additionally, measures of model fits -are saved for each slice for display in a carpet plot-like thing. - - -.. workflow:: - :graph2use: colored - :simple_form: yes - - from qsirecon.workflows.dwi.hmc_sdc import init_qsirecon_hmcsdc_wf - wf = init_qsirecon_hmcsdc_wf({'dwi_series':['dwi1.nii', 'dwi2.nii'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j'}, - source_file='/data/sub-1/dwi/sub-1_dwi.nii.gz', - b0_threshold=100, - hmc_transform='Affine', - hmc_model='3dSHORE', - hmc_align_to='iterative', - template='MNI152NLin2009cAsym', - shoreline_iters=1, - impute_slice_threshold=0, - omp_nthreads=1, - fmap_bspline=False, - fmap_demean=False, - use_syn=True, - force_syn=False, - name='qsirecon_hmcsdc_wf', - dwi_metadata={}) - - -.. _dwi_sdc: - -Susceptibility correction methods -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:func:`qsirecon.workflows.fieldmap.base.init_sdc_wf` - -.. figure:: _static/unwarping.svg - -The are three kinds of SDC available in qsirecon: - - 1. :ref:`sdc_pepolar` (also called **blip-up/blip-down**): - This is the implementation from sdcflows, using 3dQwarp to - correct a DWI series using a fieldmap in the fmaps directory [Jezzard1995]_. - The reverse phase encoding direction scan can come from the fieldmaps directory - or the dwi directory. If using :ref:`fsl_wf`, then ``TOPUP`` is used for this correction. - Also relevant is :ref:`best_b0`. - - 2. :ref:`sdc_phasediff`: Use a B0map sequence that includes at lease one magnitude - image and two phase images or a phasediff image. - - 3. :ref:`sdc_fieldmapless`: The SyN-based susceptibility distortion correction - implemented in FMRIPREP. To use this method, include argument ``--use-syn-sdc`` when - calling qsirecon. Briefly, this method estimates a SDC warp using ANTS SyN based - on an average fieldmap in MNI space. For details on this method. - -``qsirecon`` determines if a fieldmap should be used based on the ``"IntendedFor"`` -fields in the JSON sidecars in the ``fmap/`` directory. - -.. _best_b0: - -Selecting representative images for PEPOLAR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``TOPUP`` estimates EPI distortion based on the shapes of images with -different phase encoding directions and total readout times (i.e. warped -groups). It is therefore ideal to provide less-noisy images as inputs, so the -registration has plenty of accurate anatomical features to work with. - -For diffusion-weighted MRI, the b=0 images are used as input to TOPUP. While -these contain a lot of anatomical detail, they can also contain troublesome -artefacts such as spin history, head motion and slice dropout. - -In QSIRecon versions up until 0.13, up to 3 b=0 images were selected per -warped group as input to ``TOPUP``. The images were selected to be -evenly spaced within their acquisitions. - -In versions 0.13 and later, QSIRecon finds the "most representative" b=0 -images per warped group. A nearly identical approach is used in the -developmental HCP pipelines, where a pairwise spatial correlation score is -calculated between all b=0 images of the same warped group and the images -with the highest average correlation to the other images are used as input -to ``TOPUP``. To see which images were selected, examine the ``selected_for_topup`` -column in the confounds tsv file. - - -.. _dwi_only: - -Using only DWI data (bypassing the T1w workflows) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is possible to use QSIRecon to process *only* diffusion-weighted images. In -the case of infant data, where robust skull-stripping methods are not -currently available, or where anatomical preprocessing has already been -performed in another pipeline, the user can specify ``--dwi-only``. - -Instead of registering the b=0 template image to the skull-stripped T1w -image, the b=0 template is registered directly to a template and only the -rigid part of the transformation is kept. This results in an AC-PC aligned -b=0 template that maintains the shape and size of the original image. - -In this case the ``b0_anat_coreg`` workflow instead registers the b=0 reference -to an AC-PC-oriented template and the rigid components of the coregistration -transform are extracted. - -.. _dwi_ref: - -DWI reference image estimation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:func:`qsirecon.workflows.dwi.util.init_dwi_reference_wf` - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.anatomical import init_dwi_reference_wf - wf = init_dwi_reference_wf(omp_nthreads=1, - gen_report=True, - source_file="sub-1_dwi.nii.gz", - register_t1=True) - -This workflow estimates a reference image for a DWI series. This -procedure is different from the DWI reference image workflow in the -sense that true brain masking isn't usually done until later in the -pipeline for DWIs - - -.. _resampling: - -Pre-processed DWIs in a different space -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:func:`qsirecon.workflows.dwi.resampling.init_dwi_trans_wf` - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.dwi.resampling import init_dwi_trans_wf - wf = init_dwi_trans_wf(source_file='sub-1_dwi.nii.gz', - template="ACPC", - output_resolution=1.2, - use_compression=True, - to_mni=False, - write_local_bvecs=True, - mem_gb=3, - omp_nthreads=1) - -A DWI series is resampled to an output space. The ``output_resolution`` is -specified on the commandline call. All transformations, including head motion -correction, susceptibility distortion correction, coregistration and (optionally) -normalization to the template is performed in a single shot using a Lanczos kernel. - -There are two ways that the gradient vectors can be saved. This workflow always -produces a FSL-style bval/bvec pair for the image and a MRTrix .b gradient table -with the rotations from the linear transforms applied. You can also write out -a ``local_bvecs`` file that contains a 3d vector that has been rotated to account -for nonlinear transforms in each voxel. I'm not aware of any software that can -use these yet, but it's an interesting idea. - - -.. _b0_reg: - -b0 to T1w registration -^^^^^^^^^^^^^^^^^^^^^^ - -:func:`qsirecon.workflows.dwi.registration.init_b0_to_anat_registration_wf` - -.. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.dwi.registration import init_b0_to_anat_registration_wf - wf = init_b0_to_anat_registration_wf( - mem_gb=3, - omp_nthreads=1, - transform_type="Rigid", - write_report=False) - -This just uses `antsRegistration`. - - - -.. topic:: References - - .. [Power2016] Power JD, A simple but useful way to assess fMRI scan qualities. - NeuroImage. 2016. doi: `10.1016/j.neuroimage.2016.08.009 `_ diff --git a/docs/usage.rst b/docs/usage.rst index dae9e985..eb6ebf85 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -3,9 +3,10 @@ Usage ----- -The ``qsirecon`` preprocessing workflow takes as principal input the path of -the dataset that is to be processed. The input dataset is required to be in -valid :abbr:`BIDS (Brain Imaging Data Structure)` formate at least one +The ``qsirecon`` postprocessing workflow takes as principal input the path of +the preprocessing derivatives dataset that is to be processed. +The input dataset is required to be in +valid :abbr:`BIDS (Brain Imaging Data Structure)` format with at least one diffusion MRI series. The T1w image and the DWI may be in separate BIDS folders for a given subject. We highly recommend that you validate your dataset with the free, online `BIDS Validator diff --git a/long_description.rst b/long_description.rst index 0a094d75..26728156 100644 --- a/long_description.rst +++ b/long_description.rst @@ -1,10 +1,10 @@ -qsirecon borrows heavily from FMRIPREP to build workflows for preprocessing q-space images +QSIRecon borrows heavily from fMRIPrep to build workflows for preprocessing q-space images such as Diffusion Spectrum Images (DSI), multi-shell HARDI and compressed sensing DSI (CS-DSI). It utilizes Dipy and ANTs to implement a novel high-b-value head motion correction approach using q-space methods such as 3dSHORE to iteratively generate head motion target images for each gradient direction and strength. -Since qsirecon uses the FMRIPREP workflow-building strategy, it can also generate methods +Since qsirecon uses the fMRIPrep workflow-building strategy, it can also generate methods boilerplate and quality-check figures. Users can also reconstruct orientation distribution functions (ODFs), fiber orientation diff --git a/pyproject.toml b/pyproject.toml index 649f3659..882e16b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "qsirecon" -description = "qsirecon builds workflows for preprocessing and reconstructing q-space images" +description = "qsirecon builds workflows for reconstructing q-space images" readme = "long_description.rst" authors = [{name = "The PennLINC developers"}] classifiers = [ diff --git a/qsirecon/cli/parser.py b/qsirecon/cli/parser.py index ea20ebb7..8ea43a31 100644 --- a/qsirecon/cli/parser.py +++ b/qsirecon/cli/parser.py @@ -145,7 +145,7 @@ def _bids_filter(value, parser): is_release = not any((currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)) parser = ArgumentParser( - description=f"{verstr}: q-Space Image Preprocessing workflows", + description=f"{verstr}: q-Space Image Reconstruction Workflows", formatter_class=ArgumentDefaultsHelpFormatter, **kwargs, ) @@ -168,7 +168,7 @@ def _bids_filter(value, parser): "output_dir", action="store", type=Path, - help="The output path for the outcomes of preprocessing and visual reports", + help="The output path for the outcomes of postprocessing and visual reports", ) parser.add_argument( "analysis_level", diff --git a/qsirecon/cli/run.py b/qsirecon/cli/run.py index cfdd66f2..b5b2a112 100644 --- a/qsirecon/cli/run.py +++ b/qsirecon/cli/run.py @@ -22,7 +22,7 @@ # # https://www.nipreps.org/community/licensing/ # -"""q-Space Image preprocessing and reconstruction workflows.""" +"""q-Space Image reconstruction workflows.""" from .. import config @@ -81,10 +81,7 @@ def main(): exitcode = retval.get("return_code", 0) qsirecon_wf = retval.get("workflow", None) - exec_mode = retval.get("exec_mode", "QSIRecon") - output_dir = ( - config.execution.qsirecon_dir if exec_mode == "QSIRecon" else config.execution.output_dir - ) + output_dir = config.execution.output_dir # CRITICAL Load the config from the file. This is necessary because the ``build_workflow`` # function executed constrained in a process may change the config (and thus the global @@ -125,9 +122,9 @@ def main(): config.loggers.workflow.log( 15, - "\n".join([f"{exec_mode} config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]), + "\n".join(["config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]), ) - config.loggers.workflow.log(25, f"{exec_mode} started!") + config.loggers.workflow.log(25, "QSIRecon started!") errno = 1 # Default is error exit unless otherwise set try: qsirecon_wf.run(**config.nipype.get_plugin()) @@ -145,7 +142,7 @@ def main(): if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e): sentry_sdk.capture_exception(e) - config.loggers.workflow.critical("%s failed: %s", exec_mode, e) + config.loggers.workflow.critical("%s failed: %s", "QSIRecon", e) raise else: config.loggers.workflow.log(25, "QSIRecon finished successfully!") @@ -233,7 +230,6 @@ def main(): exitcode = retval.get("return_code", 0) qsirecon_wf = retval.get("workflow", None) - exec_mode = "QSIRecon" # CRITICAL It would be bad to let the config file be changed between prep and recon. # config.load(config_file) diff --git a/qsirecon/cli/workflow.py b/qsirecon/cli/workflow.py index c7984881..cba54128 100644 --- a/qsirecon/cli/workflow.py +++ b/qsirecon/cli/workflow.py @@ -37,7 +37,7 @@ from pkg_resources import resource_filename as pkgrf -def build_workflow(config_file, exec_mode, retval): +def build_workflow(config_file, retval): """Create the Nipype Workflow that supports the whole execution graph.""" from niworkflows.utils.bids import collect_participants @@ -47,7 +47,7 @@ def build_workflow(config_file, exec_mode, retval): from .. import config from ..utils.misc import check_deps from ..viz.reports import generate_reports - from ..workflows.recon import init_qsirecon_wf + from ..workflows.base import init_qsirecon_wf config.load(config_file) build_log = config.loggers.workflow @@ -58,26 +58,7 @@ def build_workflow(config_file, exec_mode, retval): retval["return_code"] = 1 retval["workflow"] = None - # Figure out which workflow we want automatically if - if exec_mode == "auto": - if config.execution.recon_input: - exec_mode = "QSIRecon" - build_log.info("Running recon-only mode: --recon-input was used.") - if config.execution.recon_only: - config.loggers.workflow.warning( - "Argument --recon-only is not needed if --recon-input is specified." - ) - else: - exec_mode = "QSIRecon" - - if exec_mode == "QSIRecon": - workflow_builder = init_qsirecon_wf - elif exec_mode == "QSIRecon": - workflow_builder = init_qsirecon_wf - else: - raise Exception(f"Unknown mode: {exec_mode}") - - banner = [f"Running {exec_mode} version {version}"] + banner = [f"Running QSIRecon version {version}"] notice_path = Path(pkgrf("qsirecon", "data/NOTICE")) if notice_path.exists(): banner[0] += "\n" @@ -106,7 +87,7 @@ def build_workflow(config_file, exec_mode, retval): ) # Called with reports only - if config.execution.reports_only and exec_mode == "QSIRecon": + if config.execution.reports_only: build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) session_list = ( config.execution.bids_filters.get("dwi", {}).get("session") @@ -131,13 +112,13 @@ def build_workflow(config_file, exec_mode, retval): # Build main workflow init_msg = [ - f"Building {exec_mode}'s workflow:", + "Building QSIRecon's workflow:", f"BIDS dataset path: {config.execution.bids_dir}.", f"Participant list: {subject_list}.", f"Run identifier: {config.execution.run_uuid}.", ] - if config.execution.fs_subjects_dir and exec_mode == "QSIRecon": + if config.execution.fs_subjects_dir: init_msg += [f"Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."] build_log.log(25, f"\n{' ' * 11}* ".join(init_msg)) @@ -155,15 +136,13 @@ def build_workflow(config_file, exec_mode, retval): return retval # If qsirecon is being run on already preprocessed data: - retval["exec_mode"] = exec_mode - retval["workflow"] = workflow_builder() + retval["workflow"] = init_qsirecon_wf() # Check workflow for missing commands missing = check_deps(retval["workflow"]) if missing: build_log.critical( - "Cannot run %s. Missing dependencies:%s", - retval["exec_mode"], + "Cannot run QSIRecon. Missing dependencies:%s", "\n\t* ".join([""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), ) retval["return_code"] = 127 # 127 == command not found. @@ -171,8 +150,7 @@ def build_workflow(config_file, exec_mode, retval): config.to_filename(config_file) build_log.info( - "%s workflow graph with %d nodes built successfully.", - retval["exec_mode"], + "QSIRecon workflow graph with %d nodes built successfully.", len(retval["workflow"]._get_all_nodes()), ) retval["return_code"] = 0 diff --git a/qsirecon/config.py b/qsirecon/config.py index d94ad6be..744b38a6 100644 --- a/qsirecon/config.py +++ b/qsirecon/config.py @@ -454,7 +454,7 @@ class execution(_Config): work_dir = Path("work").absolute() """Path to a working directory where intermediate results will be available.""" write_graph = False - """Write out the computational graph corresponding to the planned preprocessing.""" + """Write out the computational graph corresponding to the planned postprocessing.""" dataset_links = {} """A dictionary of dataset links to be used to track Sources in sidecars.""" @@ -572,7 +572,7 @@ class workflow(_Config): contrast will be skull stripped and segmented for use in the visual reports and reconstruction. If --infant, T2w is forced.""" anat_only = False - """Execute the anatomical preprocessing only.""" + """Execute the anatomical postprocessing only.""" anatomical_template = None """Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences` instance keeping standard and nonstandard spaces.""" diff --git a/qsirecon/tests/test_cli.py b/qsirecon/tests/test_cli.py index 6c912c53..610e7f73 100644 --- a/qsirecon/tests/test_cli.py +++ b/qsirecon/tests/test_cli.py @@ -1061,7 +1061,7 @@ def _run_and_generate(test_name, parameters, test_main=True): config.loggers.cli.warning(f"Saving config file to {config_file}") config.to_filename(config_file) - retval = build_workflow(config_file, exec_mode="auto", retval={}) + retval = build_workflow(config_file, retval={}) qsirecon_wf = retval["workflow"] qsirecon_wf.run() write_derivative_description(config.execution.fmri_dir, config.execution.qsirecon_dir) diff --git a/qsirecon/workflows/base.py b/qsirecon/workflows/base.py index 4184665f..cf4c4fcb 100644 --- a/qsirecon/workflows/base.py +++ b/qsirecon/workflows/base.py @@ -2,84 +2,74 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -# -# Copyright The NiPreps Developers -# -# Changed to run qsirecon workflows -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# We support and encourage derived works from this project, please read -# about our expectations at -# -# https://www.nipreps.org/community/licensing/ -# """ -qsirecon base processing workflows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +qsirecon base reconstruction workflows +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: init_qsirecon_wf .. autofunction:: init_single_subject_wf """ -import sys -from collections import defaultdict + +import json +import os.path as op from copy import deepcopy +from glob import glob +import nipype.pipeline.engine as pe +from bids.layout import BIDSLayout +from dipy import __version__ as dipy_ver from nilearn import __version__ as nilearn_ver -from nipype.interfaces import utility as niu -from nipype.pipeline import engine as pe +from nipype import __version__ as nipype_ver +from nipype.utils.filemanip import split_filename from packaging.version import Version +from pkg_resources import resource_filename as pkgrf from .. import config from ..engine import Workflow -from ..interfaces import ( - AboutSummary, - BIDSDataGrabber, - BIDSInfo, - DerivativesDataSink, - SubjectSummary, -) -from ..utils.bids import collect_data -from ..utils.grouping import group_dwi_scans -from ..utils.misc import fix_multi_source_name -from .anatomical.volume import init_anat_preproc_wf -from .dwi.base import init_dwi_preproc_wf -from .dwi.distortion_group_merge import init_distortion_group_merge_wf -from .dwi.finalize import init_dwi_finalize_wf -from .dwi.intramodal_template import init_intramodal_template_wf -from .dwi.util import get_source_file def init_qsirecon_wf(): - """Organize the execution of qsirecon, with a sub-workflow for each subject. + """ + This workflow organizes the execution of qsirecon, with a sub-workflow for + each subject. .. workflow:: :graph2use: orig :simple_form: yes - import os - os.environ['FREESURFER_HOME'] = os.getcwd() from qsirecon.workflows.base import init_qsirecon_wf - wf = init_qsirecon_wf() + + + Parameters + + """ ver = Version(config.environment.version) qsirecon_wf = Workflow(name=f"qsirecon_{ver.major}_{ver.minor}_wf") qsirecon_wf.base_dir = config.execution.work_dir - for subject_id in config.execution.participant_label: - single_subject_wf = init_single_subject_wf(subject_id) + if config.workflow.recon_input_pipeline not in ("qsirecon", "ukb"): + raise NotImplementedError( + f"{config.workflow.recon_input_pipeline} is not supported as recon-input yet." + ) + + if config.workflow.recon_input_pipeline == "qsirecon": + # This should work for --recon-input as long as the same dataset is in bids_dir + # or if the call is doing preproc+recon + to_recon_list = config.execution.participant_label + elif config.workflow.recon_input_pipeline == "ukb": + from ..utils.ingress import collect_ukb_participants, create_ukb_layout + + # The ukb input will always be specified as the bids input - we can't preproc it first + ukb_layout = create_ukb_layout(config.execution.bids_dir) + to_recon_list = collect_ukb_participants( + ukb_layout, participant_label=config.execution.participant_label + ) + + for subject_id in to_recon_list: + single_subject_wf = init_single_subject_recon_wf(subject_id=subject_id) single_subject_wf.config["execution"]["crashdump_dir"] = str( config.execution.qsirecon_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid @@ -94,377 +84,261 @@ def init_qsirecon_wf(): ) log_dir.mkdir(exist_ok=True, parents=True) config.to_filename(log_dir / "qsirecon.toml") - return qsirecon_wf - - -def init_single_subject_wf(subject_id: str): - """Organize the preprocessing pipeline for a single subject. - This workflow collects and reports information about the subject, and prepares - sub-workflows to perform anatomical and diffusion preprocessing. - - Anatomical preprocessing is performed in a single workflow, regardless of - the number of sessions. - Diffusion preprocessing is performed using a separate workflow for each - session's dwi series. - - .. workflow:: - :graph2use: orig - :simple_form: yes + return qsirecon_wf - from qsirecon.workflows.base import init_single_subject_wf - wf = init_single_subject_wf('qsireconXtest') +def init_single_subject_recon_wf(subject_id): + """ + This workflow organizes the reconstruction pipeline for a single subject. + Reconstruction is performed using a separate workflow for each dwi series. Parameters - ---------- - subject_id : :obj:`str` - Single subject label - - """ - if subject_id == "qsireconXtest": - # for documentation purposes - subject_data = { - "t1w": ["/completely/made/up/path/sub-01_T1w.nii.gz"], - "dwi": ["/completely/made/up/path/sub-01_dwi.nii.gz"], - "t2w": ["/completely/made/up/path/sub-01_T2w.nii.gz"], - "roi": [], - } - config.loggers.workflow.warning("Building a test workflow") - else: - subject_data = collect_data( - config.execution.layout, - subject_id, - filters=config.execution.bids_filters, - bids_validate=False, - )[0] - - # Make sure we always go through these two checks - if not config.workflow.anat_only and subject_data["dwi"] == []: - raise Exception( - "No dwi images found for participant {}. " - "All workflows require dwi images unless " - "--anat-only is specified.".format(subject_id) - ) - if not config.workflow.anat_modality == "none" and not subject_data.get( - config.workflow.anat_modality.lower() - ): - raise Exception( - "No {} images found for participant {}. " - "To bypass anatomical processing choose " - "--anat-modality none".format(config.workflow.anat_modality, subject_id) - ) + subject_id : str + Single subject label - additional_t2ws = 0 - if "drbuddi" in config.workflow.pepolar_method.lower() and subject_data["t2w"]: - additional_t2ws = len(subject_data["t2w"]) + """ + from ..interfaces.ingress import QsiReconDWIIngress, UKBioBankDWIIngress + from ..interfaces.interchange import ( + ReconWorkflowInputs, + anatomical_workflow_outputs, + qsirecon_output_names, + recon_workflow_anatomical_input_fields, + recon_workflow_input_fields, + ) + from .recon.anatomical import ( + init_dwi_recon_anatomical_workflow, + init_highres_recon_anatomical_wf, + ) + from .recon.build_workflow import init_dwi_recon_workflow - # Inspect the dwi data and provide advice on pipeline choices - # provide_processing_advice(subject_data, layout, unringing_method) + spec = _load_recon_spec() + dwi_recon_inputs = _get_iterable_dwi_inputs(subject_id) - workflow = Workflow(name=f"sub_{subject_id}_wf") + workflow = Workflow(name=f"sub-{subject_id}_{spec['name']}") workflow.__desc__ = f""" -Preprocessing was performed using *QSIRecon* {config.environment.version}, -which is based on *Nipype* {config.environment.nipype_version} +Reconstruction was +performed using *QSIRecon* {config.__version__}, +which is based on *Nipype* {nipype_ver} (@nipype1; @nipype2; RRID:SCR_002502). """ workflow.__postdesc__ = f""" -Many internal operations of *QSIRecon* use +Many internal operations of *qsirecon* use *Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362] and -*Dipy* [@dipy]. +*Dipy* {dipy_ver}[@dipy]. For more details of the pipeline, see [the section corresponding -to workflows in *QSIRecon*'s documentation]\ +to workflows in *qsirecon*'s documentation]\ (https://qsirecon.readthedocs.io/en/latest/workflows.html \ -"QSIRecon's documentation"). +"qsirecon's documentation"). ### References -""" - - merging_distortion_groups = not config.workflow.distortion_group_merge.lower() == "none" + """ - inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") + if len(dwi_recon_inputs) == 0: + config.loggers.workflow.info("No dwi files found for %s", subject_id) + return workflow - bidssrc = pe.Node( - BIDSDataGrabber( - subject_data=subject_data, - dwi_only=config.workflow.anat_modality == "none", - anat_only=config.workflow.anat_only, - anatomical_contrast=config.workflow.anat_modality, - ), - name="bidssrc", - ) + # The recon spec may need additional anatomical files to be created. + atlas_names = spec.get("atlases", []) + needs_t1w_transform = spec_needs_to_template_transform(spec) + + # This is here because qsirecon currently only makes one anatomical result per subject + # regardless of sessions. So process it on its + if config.workflow.recon_input_pipeline == "qsirecon": + anat_ingress_node, available_anatomical_data = init_highres_recon_anatomical_wf( + subject_id=subject_id, + extras_to_make=spec.get("anatomical", []), + needs_t1w_transform=needs_t1w_transform, + ) - bids_info = pe.Node(BIDSInfo(), name="bids_info", run_without_submitting=True) + # Connect the anatomical-only inputs. NOTE this is not to the inputnode! + config.loggers.workflow.info( + "Anatomical (T1w) available for recon: %s", available_anatomical_data + ) - summary = pe.Node( - SubjectSummary(template=config.workflow.anatomical_template), - name="summary", - run_without_submitting=True, - ) + # create a processing pipeline for the dwis in each session + dwi_recon_wfs = {} + dwi_individual_anatomical_wfs = {} + recon_full_inputs = {} + dwi_ingress_nodes = {} + anat_ingress_nodes = {} + print(dwi_recon_inputs) + for dwi_input in dwi_recon_inputs: + dwi_file = dwi_input["bids_dwi_file"] + wf_name = _get_wf_name(dwi_file) + + # Get the preprocessed DWI and all the related preprocessed images + if config.workflow.recon_input_pipeline == "qsirecon": + dwi_ingress_nodes[dwi_file] = pe.Node( + QsiReconDWIIngress(dwi_file=dwi_file), name=wf_name + "_ingressed_dwi_data" + ) - about = pe.Node( - AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), - name="about", - run_without_submitting=True, - ) + elif config.workflow.recon_input_pipeline == "ukb": + dwi_ingress_nodes[dwi_file] = pe.Node( + UKBioBankDWIIngress(dwi_file=dwi_file, data_dir=str(dwi_input["path"].absolute())), + name=wf_name + "_ingressed_ukb_dwi_data", + ) + anat_ingress_nodes[dwi_file], available_anatomical_data = ( + init_highres_recon_anatomical_wf( + subject_id=subject_id, + recon_input_dir=dwi_input["path"], + extras_to_make=spec.get("anatomical", []), + pipeline_source="ukb", + needs_t1w_transform=needs_t1w_transform, + name=wf_name + "_ingressed_ukb_anat_data", + ) + ) - ds_report_summary = pe.Node( - DerivativesDataSink(base_directory=config.execution.reportlets_dir, suffix="summary"), - name="ds_report_summary", - run_without_submitting=True, - ) + # Create scan-specific anatomical data (mask, atlas configs, odf ROIs for reports) + print(available_anatomical_data) + dwi_individual_anatomical_wfs[dwi_file], dwi_available_anatomical_data = ( + init_dwi_recon_anatomical_workflow( + atlas_names=atlas_names, + prefer_dwi_mask=False, + needs_t1w_transform=needs_t1w_transform, + extras_to_make=spec.get("anatomical", []), + name=wf_name + "_dwi_specific_anat_wf", + **available_anatomical_data, + ) + ) - ds_report_about = pe.Node( - DerivativesDataSink(base_directory=config.execution.reportlets_dir, suffix="about"), - name="ds_report_about", - run_without_submitting=True, - ) + # This node holds all the inputs that will go to the recon workflow. + # It is the definitive place to check what the input files are + recon_full_inputs[dwi_file] = pe.Node( + ReconWorkflowInputs(), name=wf_name + "_recon_inputs" + ) - num_anat_images = ( - 0 - if config.workflow.anat_modality == "none" - else len(subject_data[config.workflow.anat_modality.lower()]) - ) - # Preprocessing of anatomical data (includes possible registration template) - info_modality = ( - "dwi" if config.workflow.anat_modality == "none" else config.workflow.anat_modality.lower() - ) - anat_preproc_wf = init_anat_preproc_wf( - num_anat_images=num_anat_images, - num_additional_t2ws=additional_t2ws, - has_rois=bool(subject_data["roi"]), - ) + # This is the actual recon workflow for this dwi file + dwi_recon_wfs[dwi_file] = init_dwi_recon_workflow( + available_anatomical_data=dwi_available_anatomical_data, + workflow_spec=spec, + name=wf_name + "_recon_wf", + ) - workflow.connect([ - (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]), - (bidssrc, bids_info, [ - ((info_modality, fix_multi_source_name, config.workflow.anat_modality == 'none', - config.workflow.anat_modality), 'in_file'), - ]), - (inputnode, summary, [('subjects_dir', 'subjects_dir')]), - (bidssrc, summary, [('t1w', 't1w'), ('t2w', 't2w')]), - (bids_info, summary, [('subject_id', 'subject_id')]), - (bidssrc, anat_preproc_wf, [ - ('t1w', 'inputnode.t1w'), - ('t2w', 'inputnode.t2w'), - ('roi', 'inputnode.roi'), - ('flair', 'inputnode.flair'), - ]), - (summary, anat_preproc_wf, [('subject_id', 'inputnode.subject_id')]), - (bidssrc, ds_report_summary, [ - ((info_modality, fix_multi_source_name, config.workflow.anat_modality == 'none', - config.workflow.anat_modality), 'source_file'), - ]), - (summary, ds_report_summary, [('out_report', 'in_file')]), - (bidssrc, ds_report_about, [ - ((info_modality, fix_multi_source_name, config.workflow.anat_modality == 'none', - config.workflow.anat_modality), 'source_file'), - ]), - (about, ds_report_about, [('out_report', 'in_file')]) - ]) # fmt:skip - - if config.workflow.anat_only: - return workflow + # Connect the collected diffusion data (gradients, etc) to the inputnode + workflow.connect([ + # The dwi data + (dwi_ingress_nodes[dwi_file], recon_full_inputs[dwi_file], [ + (trait, trait) for trait in qsirecon_output_names]), + + # Session-specific anatomical data + (dwi_ingress_nodes[dwi_file], dwi_individual_anatomical_wfs[dwi_file], + [(trait, "inputnode." + trait) for trait in qsirecon_output_names]), + + # subject dwi-specific anatomical to a special node in recon_full_inputs so + # we have a record of what went in. Otherwise it would be lost in an IdentityInterface + (dwi_individual_anatomical_wfs[dwi_file], recon_full_inputs[dwi_file], + [("outputnode." + trait, trait) for trait in recon_workflow_anatomical_input_fields]), + + # send the recon_full_inputs to the dwi recon workflow + (recon_full_inputs[dwi_file], dwi_recon_wfs[dwi_file], + [(trait, "inputnode." + trait) for trait in recon_workflow_input_fields]), + + (anat_ingress_node if config.workflow.recon_input_pipeline == "qsirecon" + else anat_ingress_nodes[dwi_file], + dwi_individual_anatomical_wfs[dwi_file], + [(f"outputnode.{trait}", f"inputnode.{trait}") + for trait in anatomical_workflow_outputs]) + ]) # fmt:skip - # Handle the grouping of multiple dwi files within a session - # concatenation_scheme maps the outputs to their final concatenation group - dwi_fmap_groups, concatenation_scheme = group_dwi_scans( - subject_data, - using_fsl=True, - combine_scans=not config.workflow.separate_all_dwis, - ignore_fieldmaps="fieldmaps" in config.workflow.ignore, - concatenate_distortion_groups=merging_distortion_groups, - ) - config.loggers.workflow.info(dwi_fmap_groups) - - # If a merge is happening at the end, make sure - if merging_distortion_groups: - # create a mapping of which across-distortion-groups are contained in each merge - merged_group_names = sorted(set(concatenation_scheme.values())) - merged_to_subgroups = defaultdict(list) - for subgroup_name, destination_name in concatenation_scheme.items(): - merged_to_subgroups[destination_name].append(subgroup_name) - - merging_group_workflows = {} - for merged_group in merged_group_names: - merging_group_workflows[merged_group] = init_distortion_group_merge_wf( - template=config.workflow.anatomical_template, - output_prefix=merged_group, - source_file=merged_group + "_dwi.nii.gz", - inputs_list=merged_to_subgroups[merged_group], - name=merged_group.replace("-", "_") + "_final_merge_wf", + # Fill-in datasinks and reportlet datasinks for the anatomical workflow + for _node in workflow.list_node_names(): + node_suffix = _node.split(".")[-1] + if node_suffix.startswith("ds"): + base_dir = ( + config.execution.reportlets_dir + if "report" in node_suffix + else config.execution.output_dir ) + workflow.get_node(_node).inputs.base_directory = base_dir - workflow.connect([ - (anat_preproc_wf, merging_group_workflows[merged_group], [ - ('outputnode.t1_brain', 'inputnode.t1_brain'), - ('outputnode.t1_seg', 'inputnode.t1_seg'), - ('outputnode.t1_mask', 'inputnode.t1_mask'), - ]), - ]) # fmt:skip - - outputs_to_files = { - dwi_group["concatenated_bids_name"]: dwi_group for dwi_group in dwi_fmap_groups - } - if config.workflow.force_syn: - for group_name in outputs_to_files: - outputs_to_files[group_name]["fieldmap_info"] = {"suffix": "syn"} - summary.inputs.dwi_groupings = outputs_to_files - - make_intramodal_template = False - if config.workflow.intramodal_template_iters > 0: - if len(outputs_to_files) < 2: - raise Exception("Cannot make an intramodal with less than 2 groups.") - make_intramodal_template = True - - intramodal_template_wf = init_intramodal_template_wf( - t1w_source_file=fix_multi_source_name( - subject_data[info_modality], config.workflow.anat_modality == "none" - ), - inputs_list=sorted(outputs_to_files.keys()), - name="intramodal_template_wf", - ) + return workflow - if make_intramodal_template: - workflow.connect([ - (anat_preproc_wf, intramodal_template_wf, [ - ('outputnode.t1_preproc', 'inputnode.t1_preproc'), - ('outputnode.t1_brain', 'inputnode.t1_brain'), - ('outputnode.t1_mask', 'inputnode.t1_mask'), - ('outputnode.t1_seg', 'inputnode.t1_seg'), - ('outputnode.t1_aseg', 'inputnode.t1_aseg'), - ('outputnode.t1_aparc', 'inputnode.t1_aparc'), - ('outputnode.t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'), - ('outputnode.t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform'), - ('outputnode.dwi_sampling_grid', 'inputnode.dwi_sampling_grid'), - ]), - ]) # fmt:skip - # create a processing pipeline for the dwis in each session - for output_fname, dwi_info in outputs_to_files.items(): - source_file = get_source_file(dwi_info["dwi_series"], output_fname, suffix="_dwi") - output_wfname = output_fname.replace("-", "_") - dwi_preproc_wf = init_dwi_preproc_wf( - scan_groups=dwi_info, - output_prefix=output_fname, - source_file=source_file, - t2w_sdc=bool(subject_data.get("t2w")), - ) - dwi_finalize_wf = init_dwi_finalize_wf( - scan_groups=dwi_info, - name=dwi_preproc_wf.name.replace("dwi_preproc", "dwi_finalize"), - output_prefix=output_fname, - source_file=source_file, - write_derivatives=not merging_distortion_groups, - ) +def spec_needs_to_template_transform(recon_spec): + """Determine whether a recon spec needs a transform from T1wACPC to a template.""" + atlases = recon_spec.get("atlases", []) + return bool(atlases) + + +def _get_wf_name(dwi_file): + basedir, fname, ext = split_filename(dwi_file) + tokens = fname.split("_") + return "_".join(tokens[:-1]).replace("-", "_") - workflow.connect([ - (anat_preproc_wf, dwi_preproc_wf, [ - ('outputnode.t1_preproc', 'inputnode.t1_preproc'), - ('outputnode.t1_brain', 'inputnode.t1_brain'), - ('outputnode.t1_mask', 'inputnode.t1_mask'), - ('outputnode.t1_seg', 'inputnode.t1_seg'), - ('outputnode.t1_aseg', 'inputnode.t1_aseg'), - ('outputnode.t1_aparc', 'inputnode.t1_aparc'), - ('outputnode.t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'), - ('outputnode.t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform'), - ('outputnode.dwi_sampling_grid', 'inputnode.dwi_sampling_grid'), - ('outputnode.t2w_unfatsat', 'inputnode.t2w_unfatsat'), - ]), - (anat_preproc_wf, dwi_finalize_wf, [ - ('outputnode.t1_preproc', 'inputnode.t1_preproc'), - ('outputnode.t1_brain', 'inputnode.t1_brain'), - ('outputnode.t1_mask', 'inputnode.t1_mask'), - ('outputnode.t1_seg', 'inputnode.t1_seg'), - ('outputnode.t1_aseg', 'inputnode.t1_aseg'), - ('outputnode.t1_aparc', 'inputnode.t1_aparc'), - ('outputnode.t1_2_mni_forward_transform', 'inputnode.t1_2_mni_forward_transform'), - ('outputnode.t1_2_mni_reverse_transform', 'inputnode.t1_2_mni_reverse_transform'), - ('outputnode.dwi_sampling_grid', 'inputnode.dwi_sampling_grid'), - ]), - (dwi_preproc_wf, dwi_finalize_wf, [ - ('outputnode.dwi_files', 'inputnode.dwi_files'), - ('outputnode.cnr_map', 'inputnode.cnr_map'), - ('outputnode.bval_files', 'inputnode.bval_files'), - ('outputnode.bvec_files', 'inputnode.bvec_files'), - ('outputnode.b0_ref_image', 'inputnode.b0_ref_image'), - ('outputnode.b0_indices', 'inputnode.b0_indices'), - ('outputnode.hmc_xforms', 'inputnode.hmc_xforms'), - ('outputnode.fieldwarps', 'inputnode.fieldwarps'), - ('outputnode.itk_b0_to_t1', 'inputnode.itk_b0_to_t1'), - ('outputnode.hmc_optimization_data', 'inputnode.hmc_optimization_data'), - ('outputnode.raw_qc_file', 'inputnode.raw_qc_file'), - ('outputnode.coreg_score', 'inputnode.coreg_score'), - ('outputnode.raw_concatenated', 'inputnode.raw_concatenated'), - ('outputnode.confounds', 'inputnode.confounds'), - ('outputnode.carpetplot_data', 'inputnode.carpetplot_data'), - ('outputnode.sdc_scaling_images', 'inputnode.sdc_scaling_images'), - ]), - ]) # fmt:skip - if make_intramodal_template: - input_name = "inputnode.{name}_b0_template".format(name=output_wfname) - output_name = "outputnode.{name}_transform".format(name=output_wfname) - workflow.connect([ - (dwi_preproc_wf, intramodal_template_wf, [ - ('outputnode.b0_ref_image', input_name), - ]), - (intramodal_template_wf, dwi_finalize_wf, [ - (output_name, 'inputnode.b0_to_intramodal_template_transforms'), - ( - 'outputnode.intramodal_template_to_t1_affine', - 'inputnode.intramodal_template_to_t1_affine', - ), - ( - 'outputnode.intramodal_template_to_t1_warp', - 'inputnode.intramodal_template_to_t1_warp', - ), - ('outputnode.intramodal_template', 'inputnode.intramodal_template'), - ]), - ]) # fmt:skip - - if merging_distortion_groups: - image_name = "inputnode.{name}_image".format(name=output_wfname) - bval_name = "inputnode.{name}_bval".format(name=output_wfname) - bvec_name = "inputnode.{name}_bvec".format(name=output_wfname) - original_bvec_name = "inputnode.{name}_original_bvec".format(name=output_wfname) - original_bids_name = "inputnode.{name}_original_image".format(name=output_wfname) - raw_concatenated_image_name = "inputnode.{name}_raw_concatenated_image".format( - name=output_wfname +def _load_recon_spec(): + from ..utils.sloppy_recon import make_sloppy + + spec_name = config.workflow.recon_spec + prepackaged_dir = pkgrf("qsirecon", "data/pipelines") + prepackaged = [op.split(fname)[1][:-5] for fname in glob(prepackaged_dir + "/*.json")] + if op.exists(spec_name): + recon_spec = spec_name + elif spec_name in prepackaged: + recon_spec = op.join(prepackaged_dir + "/{}.json".format(spec_name)) + else: + raise Exception("{} is not a file that exists or in {}".format(spec_name, prepackaged)) + with open(recon_spec, "r") as f: + try: + spec = json.load(f) + except Exception: + raise Exception("Unable to read JSON spec. Check the syntax.") + if config.execution.sloppy: + config.loggers.workflow.warning("Forcing reconstruction to use unrealistic parameters") + spec = make_sloppy(spec) + return spec + + +def _get_iterable_dwi_inputs(subject_id): + """Return inputs for the recon ingressors depending on the pipeline source. + + If qsirecon was used as the pipeline source, the iterable is going to be the + dwi files (there can be an arbitrary number of them). + + If ukb or hcpya were used there is only one dwi file per subject, so the + ingressors are sent the subject directory, which makes it easier to find + the other files needed. + + """ + from ..utils.ingress import create_ukb_layout + + recon_input_directory = config.execution.recon_input + if config.workflow.recon_input_pipeline == "qsirecon": + # If recon_input is specified without qsirecon, check if we can find the subject dir + if not (recon_input_directory / f"sub-{subject_id}").exists(): + config.loggers.workflow.info( + "%s not in %s, trying recon_input=%s", + subject_id, + recon_input_directory, + recon_input_directory / "qsirecon", ) - confounds_name = "inputnode.{name}_confounds".format(name=output_wfname) - b0_ref_name = "inputnode.{name}_b0_ref".format(name=output_wfname) - cnr_name = "inputnode.{name}_cnr".format(name=output_wfname) - carpetplot_name = "inputnode.{name}_carpetplot_data".format(name=output_wfname) - final_merge_wf = merging_group_workflows[concatenation_scheme[output_fname]] - workflow.connect([ - (dwi_finalize_wf, final_merge_wf, [ - ('outputnode.bvals_t1', bval_name), - ('outputnode.bvecs_t1', bvec_name), - ('outputnode.dwi_t1', image_name), - ('outputnode.t1_b0_ref', b0_ref_name), - ('outputnode.cnr_map_t1', cnr_name), - ]), - (dwi_preproc_wf, final_merge_wf, [ - ('outputnode.raw_concatenated', raw_concatenated_image_name), - ('outputnode.original_bvecs', original_bvec_name), - ('outputnode.original_files', original_bids_name), - ('outputnode.carpetplot_data', carpetplot_name), - ('outputnode.confounds', confounds_name), - ]) - ]) # fmt:skip - return workflow + recon_input_directory = recon_input_directory / "qsirecon" + if not (recon_input_directory / f"sub-{subject_id}").exists(): + raise Exception( + "Unable to find subject directory in %s or %s" + % (config.execution.recon_input, recon_input_directory) + ) + + layout = BIDSLayout(recon_input_directory, validate=False, absolute_paths=True) + # Get all the output files that are in this space + dwi_files = [ + f.path + for f in layout.get( + suffix="dwi", subject=subject_id, absolute_paths=True, extension=["nii", "nii.gz"] + ) + if "space-T1w" in f.filename + ] + config.loggers.workflow.info("found %s in %s", dwi_files, recon_input_directory) + return [{"bids_dwi_file": dwi_file} for dwi_file in dwi_files] + if config.workflow.recon_input_pipeline == "ukb": + return create_ukb_layout(ukb_dir=config.execution.bids_dir, participant_label=subject_id) -def provide_processing_advice(subject_data, layout, unringing_method): - """Provide advice on preprocessing options based on the data provided.""" - # metadata = {dwi_file: layout.get_metadata(dwi_file) for dwi_file in subject_data["dwi"]} - config.loggers.utils.warn( - "Partial Fourier acquisitions found for %s. Consider using --unringing-method rpg" - ) + raise Exception("Unknown pipeline " + config.workflow.recon_input_pipeline) diff --git a/qsirecon/workflows/recon/base.py b/qsirecon/workflows/recon/base.py deleted file mode 100644 index bf3c9af3..00000000 --- a/qsirecon/workflows/recon/base.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -qsirecon base reconstruction workflows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. autofunction:: init_qsirecon_wf -.. autofunction:: init_single_subject_wf - -""" - -import json -import os.path as op -from copy import deepcopy -from glob import glob - -import nipype.pipeline.engine as pe -from bids.layout import BIDSLayout -from dipy import __version__ as dipy_ver -from nilearn import __version__ as nilearn_ver -from nipype import __version__ as nipype_ver -from nipype.utils.filemanip import split_filename -from packaging.version import Version -from pkg_resources import resource_filename as pkgrf - -from ... import config -from ...engine import Workflow - - -def init_qsirecon_wf(): - """ - This workflow organizes the execution of qsirecon, with a sub-workflow for - each subject. - - .. workflow:: - :graph2use: orig - :simple_form: yes - - from qsirecon.workflows.recon.base import init_qsirecon_wf - wf = init_qsirecon_wf() - - - Parameters - - - """ - ver = Version(config.environment.version) - qsirecon_wf = Workflow(name=f"qsirecon_{ver.major}_{ver.minor}_wf") - qsirecon_wf.base_dir = config.execution.work_dir - - if config.workflow.recon_input_pipeline not in ("qsirecon", "ukb"): - raise NotImplementedError( - f"{config.workflow.recon_input_pipeline} is not supported as recon-input yet." - ) - - if config.workflow.recon_input_pipeline == "qsirecon": - # This should work for --recon-input as long as the same dataset is in bids_dir - # or if the call is doing preproc+recon - to_recon_list = config.execution.participant_label - elif config.workflow.recon_input_pipeline == "ukb": - from ...utils.ingress import collect_ukb_participants, create_ukb_layout - - # The ukb input will always be specified as the bids input - we can't preproc it first - ukb_layout = create_ukb_layout(config.execution.bids_dir) - to_recon_list = collect_ukb_participants( - ukb_layout, participant_label=config.execution.participant_label - ) - - for subject_id in to_recon_list: - single_subject_wf = init_single_subject_recon_wf(subject_id=subject_id) - - single_subject_wf.config["execution"]["crashdump_dir"] = str( - config.execution.qsirecon_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid - ) - for node in single_subject_wf._get_all_nodes(): - node.config = deepcopy(single_subject_wf.config) - qsirecon_wf.add_nodes([single_subject_wf]) - - # Dump a copy of the config file into the log directory - log_dir = ( - config.execution.qsirecon_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid - ) - log_dir.mkdir(exist_ok=True, parents=True) - config.to_filename(log_dir / "qsirecon.toml") - - return qsirecon_wf - - -def init_single_subject_recon_wf(subject_id): - """ - This workflow organizes the reconstruction pipeline for a single subject. - Reconstruction is performed using a separate workflow for each dwi series. - - Parameters - - subject_id : str - Single subject label - - """ - from ...interfaces.ingress import QsiReconDWIIngress, UKBioBankDWIIngress - from ...interfaces.interchange import ( - ReconWorkflowInputs, - anatomical_workflow_outputs, - qsirecon_output_names, - recon_workflow_anatomical_input_fields, - recon_workflow_input_fields, - ) - from .anatomical import ( - init_dwi_recon_anatomical_workflow, - init_highres_recon_anatomical_wf, - ) - from .build_workflow import init_dwi_recon_workflow - - spec = _load_recon_spec() - dwi_recon_inputs = _get_iterable_dwi_inputs(subject_id) - - workflow = Workflow(name=f"sub-{subject_id}_{spec['name']}") - workflow.__desc__ = f""" -Reconstruction was -performed using *QSIRecon* {config.__version__}, -which is based on *Nipype* {nipype_ver} -(@nipype1; @nipype2; RRID:SCR_002502). - -""" - workflow.__postdesc__ = f""" - -Many internal operations of *qsirecon* use -*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362] and -*Dipy* {dipy_ver}[@dipy]. -For more details of the pipeline, see [the section corresponding -to workflows in *qsirecon*'s documentation]\ -(https://qsirecon.readthedocs.io/en/latest/workflows.html \ -"qsirecon's documentation"). - - -### References - - """ - - if len(dwi_recon_inputs) == 0: - config.loggers.workflow.info("No dwi files found for %s", subject_id) - return workflow - - # The recon spec may need additional anatomical files to be created. - atlas_names = spec.get("atlases", []) - needs_t1w_transform = spec_needs_to_template_transform(spec) - - # This is here because qsirecon currently only makes one anatomical result per subject - # regardless of sessions. So process it on its - if config.workflow.recon_input_pipeline == "qsirecon": - anat_ingress_node, available_anatomical_data = init_highres_recon_anatomical_wf( - subject_id=subject_id, - extras_to_make=spec.get("anatomical", []), - needs_t1w_transform=needs_t1w_transform, - ) - - # Connect the anatomical-only inputs. NOTE this is not to the inputnode! - config.loggers.workflow.info( - "Anatomical (T1w) available for recon: %s", available_anatomical_data - ) - - # create a processing pipeline for the dwis in each session - dwi_recon_wfs = {} - dwi_individual_anatomical_wfs = {} - recon_full_inputs = {} - dwi_ingress_nodes = {} - anat_ingress_nodes = {} - print(dwi_recon_inputs) - for dwi_input in dwi_recon_inputs: - dwi_file = dwi_input["bids_dwi_file"] - wf_name = _get_wf_name(dwi_file) - - # Get the preprocessed DWI and all the related preprocessed images - if config.workflow.recon_input_pipeline == "qsirecon": - dwi_ingress_nodes[dwi_file] = pe.Node( - QsiReconDWIIngress(dwi_file=dwi_file), name=wf_name + "_ingressed_dwi_data" - ) - - elif config.workflow.recon_input_pipeline == "ukb": - dwi_ingress_nodes[dwi_file] = pe.Node( - UKBioBankDWIIngress(dwi_file=dwi_file, data_dir=str(dwi_input["path"].absolute())), - name=wf_name + "_ingressed_ukb_dwi_data", - ) - anat_ingress_nodes[dwi_file], available_anatomical_data = ( - init_highres_recon_anatomical_wf( - subject_id=subject_id, - recon_input_dir=dwi_input["path"], - extras_to_make=spec.get("anatomical", []), - pipeline_source="ukb", - needs_t1w_transform=needs_t1w_transform, - name=wf_name + "_ingressed_ukb_anat_data", - ) - ) - - # Create scan-specific anatomical data (mask, atlas configs, odf ROIs for reports) - print(available_anatomical_data) - dwi_individual_anatomical_wfs[dwi_file], dwi_available_anatomical_data = ( - init_dwi_recon_anatomical_workflow( - atlas_names=atlas_names, - prefer_dwi_mask=False, - needs_t1w_transform=needs_t1w_transform, - extras_to_make=spec.get("anatomical", []), - name=wf_name + "_dwi_specific_anat_wf", - **available_anatomical_data, - ) - ) - - # This node holds all the inputs that will go to the recon workflow. - # It is the definitive place to check what the input files are - recon_full_inputs[dwi_file] = pe.Node( - ReconWorkflowInputs(), name=wf_name + "_recon_inputs" - ) - - # This is the actual recon workflow for this dwi file - dwi_recon_wfs[dwi_file] = init_dwi_recon_workflow( - available_anatomical_data=dwi_available_anatomical_data, - workflow_spec=spec, - name=wf_name + "_recon_wf", - ) - - # Connect the collected diffusion data (gradients, etc) to the inputnode - workflow.connect([ - # The dwi data - (dwi_ingress_nodes[dwi_file], recon_full_inputs[dwi_file], [ - (trait, trait) for trait in qsirecon_output_names]), - - # Session-specific anatomical data - (dwi_ingress_nodes[dwi_file], dwi_individual_anatomical_wfs[dwi_file], - [(trait, "inputnode." + trait) for trait in qsirecon_output_names]), - - # subject dwi-specific anatomical to a special node in recon_full_inputs so - # we have a record of what went in. Otherwise it would be lost in an IdentityInterface - (dwi_individual_anatomical_wfs[dwi_file], recon_full_inputs[dwi_file], - [("outputnode." + trait, trait) for trait in recon_workflow_anatomical_input_fields]), - - # send the recon_full_inputs to the dwi recon workflow - (recon_full_inputs[dwi_file], dwi_recon_wfs[dwi_file], - [(trait, "inputnode." + trait) for trait in recon_workflow_input_fields]), - - (anat_ingress_node if config.workflow.recon_input_pipeline == "qsirecon" - else anat_ingress_nodes[dwi_file], - dwi_individual_anatomical_wfs[dwi_file], - [(f"outputnode.{trait}", f"inputnode.{trait}") - for trait in anatomical_workflow_outputs]) - ]) # fmt:skip - - # Fill-in datasinks and reportlet datasinks for the anatomical workflow - for _node in workflow.list_node_names(): - node_suffix = _node.split(".")[-1] - if node_suffix.startswith("ds"): - base_dir = ( - config.execution.reportlets_dir - if "report" in node_suffix - else config.execution.output_dir - ) - workflow.get_node(_node).inputs.base_directory = base_dir - - return workflow - - -def spec_needs_to_template_transform(recon_spec): - """Determine whether a recon spec needs a transform from T1wACPC to a template.""" - atlases = recon_spec.get("atlases", []) - return bool(atlases) - - -def _get_wf_name(dwi_file): - basedir, fname, ext = split_filename(dwi_file) - tokens = fname.split("_") - return "_".join(tokens[:-1]).replace("-", "_") - - -def _load_recon_spec(): - from ...utils.sloppy_recon import make_sloppy - - spec_name = config.workflow.recon_spec - prepackaged_dir = pkgrf("qsirecon", "data/pipelines") - prepackaged = [op.split(fname)[1][:-5] for fname in glob(prepackaged_dir + "/*.json")] - if op.exists(spec_name): - recon_spec = spec_name - elif spec_name in prepackaged: - recon_spec = op.join(prepackaged_dir + "/{}.json".format(spec_name)) - else: - raise Exception("{} is not a file that exists or in {}".format(spec_name, prepackaged)) - with open(recon_spec, "r") as f: - try: - spec = json.load(f) - except Exception: - raise Exception("Unable to read JSON spec. Check the syntax.") - if config.execution.sloppy: - config.loggers.workflow.warning("Forcing reconstruction to use unrealistic parameters") - spec = make_sloppy(spec) - return spec - - -def _get_iterable_dwi_inputs(subject_id): - """Return inputs for the recon ingressors depending on the pipeline source. - - If qsirecon was used as the pipeline source, the iterable is going to be the - dwi files (there can be an arbitrary number of them). - - If ukb or hcpya were used there is only one dwi file per subject, so the - ingressors are sent the subject directory, which makes it easier to find - the other files needed. - - """ - from ...utils.ingress import create_ukb_layout - - recon_input_directory = config.execution.recon_input - if config.workflow.recon_input_pipeline == "qsirecon": - # If recon_input is specified without qsirecon, check if we can find the subject dir - if not (recon_input_directory / f"sub-{subject_id}").exists(): - config.loggers.workflow.info( - "%s not in %s, trying recon_input=%s", - subject_id, - recon_input_directory, - recon_input_directory / "qsirecon", - ) - - recon_input_directory = recon_input_directory / "qsirecon" - if not (recon_input_directory / f"sub-{subject_id}").exists(): - raise Exception( - "Unable to find subject directory in %s or %s" - % (config.execution.recon_input, recon_input_directory) - ) - - layout = BIDSLayout(recon_input_directory, validate=False, absolute_paths=True) - # Get all the output files that are in this space - dwi_files = [ - f.path - for f in layout.get( - suffix="dwi", subject=subject_id, absolute_paths=True, extension=["nii", "nii.gz"] - ) - if "space-T1w" in f.filename - ] - config.loggers.workflow.info("found %s in %s", dwi_files, recon_input_directory) - return [{"bids_dwi_file": dwi_file} for dwi_file in dwi_files] - - if config.workflow.recon_input_pipeline == "ukb": - return create_ukb_layout(ukb_dir=config.execution.bids_dir, participant_label=subject_id) - - raise Exception("Unknown pipeline " + config.workflow.recon_input_pipeline)