From 259a8405a603d2cbedc5479a953abd925402908c Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:25:50 -0400 Subject: [PATCH 1/7] Rename 'packaging_ventral_rootlets' --> 'packaging' --- {packaging_ventral_rootlets => packaging}/README.md | 4 ++-- {packaging_ventral_rootlets => packaging}/requirements.txt | 0 .../run_inference_single_subject.py | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename {packaging_ventral_rootlets => packaging}/README.md (89%) rename {packaging_ventral_rootlets => packaging}/requirements.txt (100%) rename {packaging_ventral_rootlets => packaging}/run_inference_single_subject.py (100%) diff --git a/packaging_ventral_rootlets/README.md b/packaging/README.md similarity index 89% rename from packaging_ventral_rootlets/README.md rename to packaging/README.md index f8885e14..f9a32a36 100644 --- a/packaging_ventral_rootlets/README.md +++ b/packaging/README.md @@ -60,13 +60,13 @@ This assumes that the model has been downloaded (https://github.com/ivadomed/mod and unzipped (`unzip model-spinal-rootlets_ventral_D106_r20240523.zip`). ```bash -python packaging_ventral_rootlets/run_inference_single_subject.py -i -o -path-model -fold +python packaging/run_inference_single_subject.py -i -o -path-model -fold ``` For example: ```bash -python packaging_ventral_rootlets/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/model-spinal-rootlets_ventral_D106_r20240523 -fold all +python packaging/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/model-spinal-rootlets_ventral_D106_r20240523 -fold all ``` If the model folder contains also trainer subfolders (e.g., `nnUNetTrainer__nnUNetPlans__3d_fullres`, `nnUNetTrainerDA5__nnUNetPlans__3d_fullres`, ...), specify the trainer folder as well: diff --git a/packaging_ventral_rootlets/requirements.txt b/packaging/requirements.txt similarity index 100% rename from packaging_ventral_rootlets/requirements.txt rename to packaging/requirements.txt diff --git a/packaging_ventral_rootlets/run_inference_single_subject.py b/packaging/run_inference_single_subject.py similarity index 100% rename from packaging_ventral_rootlets/run_inference_single_subject.py rename to packaging/run_inference_single_subject.py From 68e36397e615347bb4329892887f3763fa4d9c1f Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:30:12 -0400 Subject: [PATCH 2/7] Update README --- packaging/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/README.md b/packaging/README.md index f9a32a36..434ff943 100644 --- a/packaging/README.md +++ b/packaging/README.md @@ -1,7 +1,9 @@ +# Ventral and dorsal rootlets + ## Getting started > [!IMPORTANT] ->️ This README provides instructions on how to use the model for **_ventral_** and dorsal rootlets. +>️ This README provides instructions on how to use the model for segmentation of **_ventral_** and dorsal rootlets from T2w images. > Please note that this model is still under development and is not yet available in the Spinal Cord Toolbox (SCT). > [!NOTE] @@ -40,7 +42,7 @@ conda activate venv_nnunet 3. Install the required packages with the following command: ``` cd model-spinal-rootlets -pip install -r packaging_ventral_rootlets/requirements.txt +pip install -r packaging/requirements.txt ``` ### Step 3: Getting the Predictions From 2676e1d8d49fa7270ef5c9e005ce964a008df29f Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:30:43 -0400 Subject: [PATCH 3/7] Rename README to make clear that it is related to the ventral model --- packaging/{README.md => README_ventral_rootlets.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging/{README.md => README_ventral_rootlets.md} (100%) diff --git a/packaging/README.md b/packaging/README_ventral_rootlets.md similarity index 100% rename from packaging/README.md rename to packaging/README_ventral_rootlets.md From 0fe446f85cc65b58aa557e8d9ca04eba49371f04 Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:31:34 -0400 Subject: [PATCH 4/7] Move 'packaging_lumbar_rootlets/README.md' -> 'packaging/README_lumbar_rootlets.md' --- .../README.md => packaging/README_lumbar_rootlets.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packaging_lumbar_rootlets/README.md => packaging/README_lumbar_rootlets.md (100%) diff --git a/packaging_lumbar_rootlets/README.md b/packaging/README_lumbar_rootlets.md similarity index 100% rename from packaging_lumbar_rootlets/README.md rename to packaging/README_lumbar_rootlets.md From c8e8f4362ef59aff3226b8313565329599bff3b9 Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:33:51 -0400 Subject: [PATCH 5/7] Update README_lumbar_rootlets --- packaging/README_lumbar_rootlets.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/packaging/README_lumbar_rootlets.md b/packaging/README_lumbar_rootlets.md index 0ac4aad8..f4691de4 100644 --- a/packaging/README_lumbar_rootlets.md +++ b/packaging/README_lumbar_rootlets.md @@ -1,3 +1,5 @@ +# Lumbar rootlets + ## Getting started > [!IMPORTANT] @@ -5,8 +7,7 @@ > Please note that this model is still under development and is not yet available in the Spinal Cord Toolbox (SCT). > [!NOTE] -> If you would like to use the model for _**dorsal**_ cervical rootlets only, use SCT v6.2 or higher (please refer to -> this [README](..%2FREADME.md)). +> For the stable model for dorsal cervical rootlets only, use SCT v6.2 or higher (please refer to this [README](..%2FREADME.md)). ### Dependencies @@ -41,7 +42,7 @@ conda activate venv_nnunet 3. Install the required packages with the following command: ``` cd model-spinal-rootlets -pip install -r packaging_lumbar_rootlets/requirements.txt +pip install -r packaging/requirements.txt ``` ### Step 3: Getting the Predictions @@ -60,19 +61,19 @@ To segment a single image using the trained model, run the following command fro This assumes that the lumbar model has been downloaded and unzipped (`unzip Dataset202_LumbarRootlets_r20240527.zip` or `unzip Dataset302_LumbarRootlets_r20240723.zip`). ```bash -python packaging_lumbar_rootlets/run_inference_single_subject.py -i -o -path-model -fold +python packaging/run_inference_single_subject.py -i -o -path-model -fold ``` For example: ```bash -python packaging_lumbar_rootlets/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/Dataset202_LumbarRootlets_r20240527 -fold 0 +python packaging/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/Dataset202_LumbarRootlets_r20240527 -fold 0 ``` If the model folder contains also trainer subfolders (e.g., `nnUNetTrainer__nnUNetPlans__3d_fullres`, `nnUNetTrainerDA5__nnUNetPlans__3d_fullres`, ...), specify the trainer folder as well: ```bash -python packaging_lumbar_rootlets/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/Dataset322_LumbarRootlets/nnUNetTrainerDA5__nnUNetPlans__3d_fullres -fold 0 +python packaging/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/Dataset322_LumbarRootlets/nnUNetTrainerDA5__nnUNetPlans__3d_fullres -fold 0 ``` > [!TIP] From bf5affa6dfface795bebf776f682ce4192fda0c1 Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:34:06 -0400 Subject: [PATCH 6/7] Update README_ventral_rootlets --- packaging/README_ventral_rootlets.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/README_ventral_rootlets.md b/packaging/README_ventral_rootlets.md index 434ff943..eb02e702 100644 --- a/packaging/README_ventral_rootlets.md +++ b/packaging/README_ventral_rootlets.md @@ -7,7 +7,7 @@ > Please note that this model is still under development and is not yet available in the Spinal Cord Toolbox (SCT). > [!NOTE] -> For the stable model for dorsal rootlets only, use SCT v6.2 or higher (please refer to this [README](..%2FREADME.md)). +> For the stable model for dorsal cervical rootlets only, use SCT v6.2 or higher (please refer to this [README](..%2FREADME.md)). ### Dependencies @@ -74,7 +74,7 @@ python packaging/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-00 If the model folder contains also trainer subfolders (e.g., `nnUNetTrainer__nnUNetPlans__3d_fullres`, `nnUNetTrainerDA5__nnUNetPlans__3d_fullres`, ...), specify the trainer folder as well: ```bash -python packaging_lumbar_rootlets/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/model-spinal-rootlets_ventral_D106/nnUNetTrainerDA5__nnUNetPlans__3d_fullres -fold 0 +python packaging/run_inference_single_subject.py -i sub-001_T2w.nii.gz -o sub-001_T2w_label-rootlets_dseg.nii.gz -path-model ~/Downloads/model-spinal-rootlets_ventral_D106/nnUNetTrainerDA5__nnUNetPlans__3d_fullres -fold 0 ``` > [!NOTE] From b7a5b7185f93ea8b3c9041b5d001242349405b58 Mon Sep 17 00:00:00 2001 From: valosekj Date: Wed, 28 Aug 2024 09:34:53 -0400 Subject: [PATCH 7/7] Remove packing_lumbar_rootlets folder --- packaging_lumbar_rootlets/requirements.txt | 4 - .../run_inference_single_subject.py | 251 ------------------ 2 files changed, 255 deletions(-) delete mode 100644 packaging_lumbar_rootlets/requirements.txt delete mode 100644 packaging_lumbar_rootlets/run_inference_single_subject.py diff --git a/packaging_lumbar_rootlets/requirements.txt b/packaging_lumbar_rootlets/requirements.txt deleted file mode 100644 index 179713f8..00000000 --- a/packaging_lumbar_rootlets/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -numpy -nibabel -nnunetv2==2.4.2 -torch \ No newline at end of file diff --git a/packaging_lumbar_rootlets/run_inference_single_subject.py b/packaging_lumbar_rootlets/run_inference_single_subject.py deleted file mode 100644 index ebb1bab5..00000000 --- a/packaging_lumbar_rootlets/run_inference_single_subject.py +++ /dev/null @@ -1,251 +0,0 @@ -""" -This script is used to run inference on a single subject using a nnUNetV2 model. - -Note: conda environment with nnUNetV2 is required to run this script. -For details how to install nnUNetV2, see: -https://github.com/ivadomed/utilities/blob/main/quick_start_guides/nnU-Net_quick_start_guide.md#installation - -Author: Jan Valosek - -Example: - python run_inference_single_subject.py - -i sub-001_T2w.nii.gz - -o sub-001_T2w_label-rootlet.nii.gz - -path-model - -tile-step-size 0.5 - -fold 1 -""" - - -import os -import shutil -import subprocess -import argparse -import datetime - -import torch -import glob -import time -import tempfile - -from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor -from batchgenerators.utilities.file_and_folder_operations import join - - -def get_parser(): - # parse command line arguments - parser = argparse.ArgumentParser(description='Segment an image using nnUNet model.') - parser.add_argument('-i', help='Input image to segment. Example: sub-001_T2w.nii.gz', required=True) - parser.add_argument('-o', help='Output filename. Example: sub-001_T2w_label-rootlet.nii.gz', required=True) - parser.add_argument('-path-model', help='Path to the model folder. This folder should contain individual ' - 'folders like fold_0, fold_1, etc. and dataset.json, ' - 'dataset_fingerprint.json and plans.json files.', required=True, type=str) - parser.add_argument('-use-gpu', action='store_true', default=False, - help='Use GPU for inference. Default: False') - parser.add_argument('-fold', type=str, required=True, - help='Fold(s) to use for inference. Example(s): 2 (single fold), 2,3 (multiple folds), ' - 'all (fold_all).', choices=['0', '1', '2', '3', '4', 'all']) - parser.add_argument('-use-best-checkpoint', action='store_true', default=False, - help='Use the best checkpoint (instead of the final checkpoint) for prediction. ' - 'NOTE: nnUNet by default uses the final checkpoint. Default: False') - parser.add_argument('-tile-step-size', default=0.5, type=float, - help='Tile step size defining the overlap between images patches during inference. ' - 'Default: 0.5 ' - 'NOTE: changing it from 0.5 to 0.9 makes inference faster but there is a small drop in ' - 'performance.') - - return parser - - -def get_orientation(file): - """ - Get the original orientation of an image - :param file: path to the image - :return: orig_orientation: original orientation of the image, e.g. LPI - """ - - # Fetch the original orientation from the output of sct_image - sct_command = "sct_image -i {} -header | grep -E qform_[xyz] | awk '{{printf \"%s\", substr($2, 1, 1)}}'".format( - file) - orig_orientation = subprocess.check_output(sct_command, shell=True).decode('utf-8') - return orig_orientation - - -def tmp_create(): - """ - Create temporary folder and return its path - """ - prefix = f"sciseg_prediction_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_" - tmpdir = tempfile.mkdtemp(prefix=prefix) - print(f"Creating temporary folder ({tmpdir})") - return tmpdir - - -def splitext(fname): - """ - Split a fname (folder/file + ext) into a folder/file and extension. - Note: for .nii.gz the extension is understandably .nii.gz, not .gz - (``os.path.splitext()`` would want to do the latter, hence the special case). - Taken (shamelessly) from: https://github.com/spinalcordtoolbox/manual-correction/blob/main/utils.py - """ - dir, filename = os.path.split(fname) - for special_ext in ['.nii.gz', '.tar.gz']: - if filename.endswith(special_ext): - stem, ext = filename[:-len(special_ext)], special_ext - return os.path.join(dir, stem), ext - # If no special case, behaves like the regular splitext - stem, ext = os.path.splitext(filename) - return os.path.join(dir, stem), ext - - -def add_suffix(fname, suffix): - """ - Add suffix between end of file name and extension. Taken (shamelessly) from: - https://github.com/spinalcordtoolbox/manual-correction/blob/main/utils.py - :param fname: absolute or relative file name. Example: t2.nii.gz - :param suffix: suffix. Example: _mean - :return: file name with suffix. Example: t2_mean.nii - Examples: - - add_suffix(t2.nii, _mean) -> t2_mean.nii - - add_suffix(t2.nii.gz, a) -> t2a.nii.gz - """ - stem, ext = splitext(fname) - return os.path.join(stem + suffix + ext) - - -def main(): - parser = get_parser() - args = parser.parse_args() - - fname_file = os.path.expanduser(args.i) - fname_file_out = os.path.expanduser(args.o) - print(f'\nFound {fname_file} file.') - - # If the fname_file is .nii, gzip it - # This is needed, because the filename suffix must match the `file_ending` in `dataset.json`. - # Context: https://github.com/ivadomed/model-spinal-rootlets/issues/49 - if not fname_file.endswith('.nii.gz'): - print('Compressing the input image...') - os.system('gzip -f {}'.format(fname_file)) - fname_file = fname_file + '.gz' - print(f'Compressed {fname_file}') - - # Add .gz suffix to the output file if not already present. This is needed because we gzip the input file. - if not fname_file_out.endswith('.gz'): - fname_file_out = fname_file_out + '.gz' - - # Create temporary directory in the temp to store the reoriented images - tmpdir = tmp_create() - # Copy the file to the temporary directory using shutil.copyfile - # NOTE: Add the `_0000` suffix, because nnUNet removes the last five characters: - # https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunetv2/inference/predict_from_raw_data.py#L171C19-L172C51 - # Context: https://github.com/ivadomed/model-spinal-rootlets/issues/49 - fname_file_tmp = os.path.join(tmpdir, os.path.basename(add_suffix(fname_file, '_0000'))) - shutil.copyfile(fname_file, fname_file_tmp) - print(f'Copied {fname_file} to {fname_file_tmp}') - - # Get the original orientation of the image, for example LPI - orig_orientation = get_orientation(fname_file_tmp) - - # Reorient the image to LPI orientation if not already in LPI - if orig_orientation != 'LPI': - print(f'Original orientation: {orig_orientation}') - print(f'Reorienting to LPI orientation...') - # reorient the image to LPI using SCT - os.system('sct_image -i {} -setorient LPI -o {}'.format(fname_file_tmp, fname_file_tmp)) - - # Note: even a single file must be in a list of lists - fname_file_tmp_list = [[fname_file_tmp]] - - # Use fold_all (all train/val subjects were used for training) or specific fold(s) - folds_avail = 'all' if args.fold == 'all' else [int(f) for f in args.fold.split(',')] - print(f'Using fold(s): {folds_avail}') - - # Create directory for nnUNet prediction - tmpdir_nnunet = os.path.join(tmpdir, 'nnUNet_prediction') - fname_prediction = os.path.join(tmpdir_nnunet, os.path.basename(add_suffix(fname_file_tmp, '_pred'))) - os.mkdir(tmpdir_nnunet) - - # Run nnUNet prediction - print('Starting inference...it may take a few minutes...\n') - start = time.time() - # directly call the predict function - predictor = nnUNetPredictor( - tile_step_size=args.tile_step_size, # changing it from 0.5 to 0.9 makes inference faster - use_gaussian=True, # applies gaussian noise and gaussian blur - use_mirroring=False, # test time augmentation by mirroring on all axes - perform_everything_on_device=True if args.use_gpu else False, - device=torch.device('cuda') if args.use_gpu else torch.device('cpu'), - verbose_preprocessing=False, - allow_tqdm=True - ) - - print('Running inference on device: {}'.format(predictor.device)) - - # args.path_model can contain either 'checkpoint_latest.pth' or 'checkpoint_final.pth' (depending on the nnUNet - # version) - checkpoint_name = 'checkpoint_final.pth' if ( - os.path.isfile(os.path.join(os.path.expanduser(args.path_model), - f'fold_{folds_avail[0]}', - 'checkpoint_final.pth'))) else 'checkpoint_latest.pth' - # use 'checkpoint_best.pth' if 'args.use_best_checkpoint' is True - if args.use_best_checkpoint: - checkpoint_name = 'checkpoint_best.pth' - - print(f'Using checkpoint: {checkpoint_name}') - - # initializes the network architecture, loads the checkpoint - predictor.initialize_from_trained_model_folder( - join(os.path.expanduser(args.path_model)), - use_folds=folds_avail, - checkpoint_name=checkpoint_name, - ) - print('Model loaded successfully. Fetching data...') - - # NOTE: for individual files, the image should be in a list of lists - predictor.predict_from_files( - list_of_lists_or_source_folder=fname_file_tmp_list, - output_folder_or_list_of_truncated_output_files=tmpdir_nnunet, - save_probabilities=False, - overwrite=True, - num_processes_preprocessing=4, - num_processes_segmentation_export=4, - folder_with_segs_from_prev_stage=None, - num_parts=1, - part_id=0 - ) - - end = time.time() - - print('Inference done.') - total_time = end - start - print('Total inference time: {} minute(s) {} seconds\n'.format(int(total_time // 60), int(round(total_time % 60)))) - - # Copy .nii.gz file from tmpdir_nnunet to tmpdir - pred_file = glob.glob(os.path.join(tmpdir_nnunet, '*.nii.gz'))[0] - shutil.copyfile(pred_file, fname_prediction) - print(f'Copied {pred_file} to {fname_prediction}') - - # Reorient the image back to original orientation - # skip if already in LPI - if orig_orientation != 'LPI': - print(f'Reorienting to original orientation {orig_orientation}...') - # reorient the image to the original orientation using SCT - os.system('sct_image -i {} -setorient {} -o {}'.format(fname_prediction, orig_orientation, fname_prediction)) - - # Copy level-specific (i.e., non-binary) segmentation - shutil.copyfile(fname_prediction, fname_file_out) - print(f'Copied {fname_prediction} to {fname_file_out}') - - print('Deleting the temporary folder...') - # Delete the temporary folder - shutil.rmtree(tmpdir) - - print('-' * 50) - print(f"Input file: {fname_file}") - print(f"Rootlet segmentation: {fname_file_out}") - print('-' * 50) - - -if __name__ == '__main__': - main() \ No newline at end of file