diff --git a/.github/workflows/weekly-preview.yml b/.github/workflows/weekly-preview.yml index 8d8cccffad..f89e0a11c4 100644 --- a/.github/workflows/weekly-preview.yml +++ b/.github/workflows/weekly-preview.yml @@ -66,7 +66,7 @@ jobs: export YEAR_WEEK=$(date +'%y%U') echo "Year week for tag is ${YEAR_WEEK}" if ! [[ $YEAR_WEEK =~ ^[0-9]{4}$ ]] ; then echo "Wrong 'year week' format. Should be 4 digits."; exit 1 ; fi - git tag "1.4.dev${YEAR_WEEK}" + git tag "1.5.dev${YEAR_WEEK}" git log -1 git tag --list python setup.py sdist bdist_wheel diff --git a/CHANGELOG.md b/CHANGELOG.md index 804508c262..e073eac56a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,98 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ## [Unreleased] +## [1.4.0] - 2024-10-14 +## What's Changed +### Added +* Implemented Conjugate Gradient Solver to generate confidence maps. (#7876) +* Added norm parameter to `ResNet` (#7752, #7805) +* Introduced alpha parameter to `DiceFocalLoss` for improved flexibility (#7841) +* Integrated Tailored ControlNet Implementations (#7875) +* Integrated Tailored Auto-Encoder Model (#7861) +* Integrated Tailored Diffusion U-Net Model (7867) +* Added Maisi morphological functions (#7893) +* Added support for downloading bundles from NGC private registry (#7907, #7929, #8076) +* Integrated generative refactor into the core (#7886, #7962) +* Made `ViT` and `UNETR` models compatible with TorchScript (#7937) +* Implemented post-download checks for MONAI bundles and compatibility warnings (#7938) +* Added NGC prefix argument when downloading bundles (#7974) +* Added flash attention support in the attention block for improved performance (#7977) +* Enhanced `MLPBlock` for compatibility with VISTA-3D (#7995) +* Added support for Neighbor-Aware Calibration Loss (NACL) for calibrated models in segmentation tasks (#7819) +* Added label_smoothing parameter to `DiceCELoss` for enhanced model calibration (#8000) +* Add `include_fc` and `use_combined_linear` argument in the `SABlock` (#7996) +* Added utilities, networks, and an inferer specific to VISTA-3D (#7999, #7987, #8047, #8059, #8021) +* Integrated a new network, `CellSamWrapper`, for cell-based applications (#7981) +* Introduced `WriteFileMapping` transform to map between input image paths and their corresponding output paths (#7769) +* Added `TrtHandler` to accelerate models using TensorRT (#7990, #8064) +* Added box and points conversion transforms for more flexible spatial manipulation (#8053) +* Enhanced `RandSimulateLowResolutiond` transform with deterministic support (#8057) +* Added a contiguous argument to the `Fourier` class to facilitate contiguous tensor outputs (#7969) +* Allowed `ApplyTransformToPointsd` to receive a sequence of reference keys for more versatile point manipulation (#8063) +* Made `MetaTensor` an optional print in `DataStats` and `DataStatsd` for more concise logging (#7814) +#### misc. +* Refactored Dataset to utilize Compose for handling transforms. (#7784) +* Combined `map_classes_to_indices` and `generate_label_classes_crop_centers` into a unified function (#7712) +* Introduced metadata schema directly into the codebase for improved structure and validation (#7409) +* Renamed `optional_packages_version` to `required_packages_version` for clearer package dependency management. (#7253) +* Replaced `pkg_resources` with the more modern packaging module for package handling (#7953) +* Refactored MAISI-related networks to align with the new generative components (#7989, #7993, #8005) +* Added a badge displaying monthly download statistics to enhance project visibility (#7891) +### Fixed +#### transforms +* Ensured deterministic behavior in `MixUp`, `CutMix`, and `CutOut` transforms (#7813) +* Applied a minor correction to `AsDiscrete` transform (#7984) +* Fixed handling of integer weightmaps in `RandomWeightedCrop` (#8097) +* Resolved data type bug in `ScaleIntensityRangePercentile` (#8109) +#### data +* Fixed negative strides issue in the `NrrdReader` (#7809) +* Addressed wsireader issue with retrieving MPP (7921) +* Ensured location is returned as a tuple in wsireader (#8007) +* Corrected interpretation of space directions in nrrd reader (#8091) +#### metrics and losses +* Improved memory management for `NACLLoss` (#8020) +* Fixed reduction logic in `GeneralizedDiceScore` (#7970) +#### networks +* Resolved issue with loading pre-trained weights in `ResNet` (#7924) +* Fixed error where `torch.device` object had no attribute gpu_id during TensorRT export (#8019) +* Corrected function for loading older weights in `DiffusionModelUNet` (#8031) +* Switched to `torch_tensorrt.Device` instead of `torch.device` during TensorRT compilation (#8051) +#### engines and handlers +* Attempted to resolve the "experiment already exists" issue in `MLFlowHandler` (#7916) +* Refactored the model export process for conversion and saving (#7934) +#### misc. +* Adjusted requirements to exclude Numpy version 2.0 (#7859) +* Updated deprecated `scipy.ndimage` namespaces in optional imports (#7847, #7897) +* Resolved `load_module()` deprecation in Python 3.12 (#7881) +* Fixed Ruff type check issues (#7885) +* Cleaned disk space in the conda test pipeline (#7902) +* Replaced deprecated `pkgutil.find_loader` usage (#7906) +* Enhanced docstrings in various modules (#7913, #8055) +* Test cases fixing (#7905, #7794, #7808) +* Fix mypy issue introduced in 1.11.0 (#7941) +* Cleaned up warnings during test collection (#7914) +* Fix incompatible types in assignment issue (#7950) +* Fix outdated link in the docs (#7971) +* Addressed CI issues (#7983, #8013) +* Fix module can not import correctly issue (#8015) +* Fix AttributeError when using torch.min and max (#8041) +* Ensure synchronization by adding `cuda.synchronize` (#8058) +* Ignore warning from nptyping as workaround (#8062) +* Suppress deprecated warning when importing monai (#8067) +* Fix link in test bundle under MONAI-extra-test-data (#8092) +### Changed +* Base Docker image upgraded to `nvcr.io/nvidia/pytorch:24.08-py3` from `nvcr.io/nvidia/pytorch:23.08-py3` +* Change blossom-ci to ACL security format (#7843) +* Move PyType test to weekly test (#8025) +* Adjusted to meet Numpy 2.0 requirements (#7857) +### Deprecated +* Dropped support for Python 3.8 (#7909) +* Remove deprecated arguments and class for v1.4 (#8079) +### Removed +* Remove use of deprecated python 3.12 strtobool (#7900) +* Removed the pipeline for publishing to testpypi (#8086) +* Cleaning up some very old and now obsolete infrastructure (#8113, #8118, #8121) + ## [1.3.2] - 2024-06-25 ### Fixed #### misc. @@ -1040,7 +1132,8 @@ the postprocessing steps should be used before calling the metrics methods [highlights]: https://github.com/Project-MONAI/MONAI/blob/master/docs/source/highlights.md -[Unreleased]: https://github.com/Project-MONAI/MONAI/compare/1.3.2...HEAD +[Unreleased]: https://github.com/Project-MONAI/MONAI/compare/1.4.0...HEAD +[1.4.0]: https://github.com/Project-MONAI/MONAI/compare/1.3.2...1.4.0 [1.3.2]: https://github.com/Project-MONAI/MONAI/compare/1.3.1...1.3.2 [1.3.1]: https://github.com/Project-MONAI/MONAI/compare/1.3.0...1.3.1 [1.3.0]: https://github.com/Project-MONAI/MONAI/compare/1.2.0...1.3.0 diff --git a/CITATION.cff b/CITATION.cff index b535a77a9f..3cd3d0e0b1 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,8 +6,8 @@ title: "MONAI: Medical Open Network for AI" abstract: "AI Toolkit for Healthcare Imaging" authors: - name: "MONAI Consortium" -date-released: 2024-06-26 -version: "1.3.2" +date-released: 2024-10-14 +version: "1.4.0" identifiers: - description: "This DOI represents all versions of MONAI, and will always resolve to the latest one." type: doi diff --git a/docs/images/maisi_train.png b/docs/images/maisi_train.png new file mode 100644 index 0000000000..8c4936456d Binary files /dev/null and b/docs/images/maisi_train.png differ diff --git a/docs/images/vista2d.png b/docs/images/vista2d.png new file mode 100644 index 0000000000..5d09c1a275 Binary files /dev/null and b/docs/images/vista2d.png differ diff --git a/docs/images/vista3d.png b/docs/images/vista3d.png new file mode 100644 index 0000000000..c8a94fbecd Binary files /dev/null and b/docs/images/vista3d.png differ diff --git a/docs/source/installation.md b/docs/source/installation.md index 70a8b6f1d4..4308a07647 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -254,10 +254,10 @@ Since MONAI v0.2.0, the extras syntax such as `pip install 'monai[nibabel]'` is - The options are ``` -[nibabel, skimage, scipy, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, cucim, openslide, pandas, einops, transformers, mlflow, clearml, matplotlib, tensorboardX, tifffile, imagecodecs, pyyaml, fire, jsonschema, ninja, pynrrd, pydicom, h5py, nni, optuna, onnx, onnxruntime, zarr, lpips, pynvml, huggingface_hub, segment-anything] +[nibabel, skimage, scipy, pillow, tensorboard, gdown, ignite, torchvision, itk, tqdm, lmdb, psutil, cucim, openslide, pandas, einops, transformers, mlflow, clearml, matplotlib, tensorboardX, tifffile, imagecodecs, pyyaml, fire, jsonschema, ninja, pynrrd, pydicom, h5py, nni, optuna, onnx, onnxruntime, zarr, lpips, pynvml, huggingface_hub] ``` which correspond to `nibabel`, `scikit-image`,`scipy`, `pillow`, `tensorboard`, -`gdown`, `pytorch-ignite`, `torchvision`, `itk`, `tqdm`, `lmdb`, `psutil`, `cucim`, `openslide-python`, `pandas`, `einops`, `transformers`, `mlflow`, `clearml`, `matplotlib`, `tensorboardX`, `tifffile`, `imagecodecs`, `pyyaml`, `fire`, `jsonschema`, `ninja`, `pynrrd`, `pydicom`, `h5py`, `nni`, `optuna`, `onnx`, `onnxruntime`, `zarr`, `lpips`, `nvidia-ml-py`, `huggingface_hub`, `pyamg` and `segment-anything` respectively. +`gdown`, `pytorch-ignite`, `torchvision`, `itk`, `tqdm`, `lmdb`, `psutil`, `cucim`, `openslide-python`, `pandas`, `einops`, `transformers`, `mlflow`, `clearml`, `matplotlib`, `tensorboardX`, `tifffile`, `imagecodecs`, `pyyaml`, `fire`, `jsonschema`, `ninja`, `pynrrd`, `pydicom`, `h5py`, `nni`, `optuna`, `onnx`, `onnxruntime`, `zarr`, `lpips`, `nvidia-ml-py`, `huggingface_hub` and `pyamg` respectively. - `pip install 'monai[all]'` installs all the optional dependencies. diff --git a/docs/source/whatsnew.rst b/docs/source/whatsnew.rst index a12dbe6959..b1f6b2dac7 100644 --- a/docs/source/whatsnew.rst +++ b/docs/source/whatsnew.rst @@ -6,6 +6,7 @@ What's New .. toctree:: :maxdepth: 1 + whatsnew_1_4.md whatsnew_1_3.md whatsnew_1_2.md whatsnew_1_1.md diff --git a/docs/source/whatsnew_1_3.md b/docs/source/whatsnew_1_3.md index c4b14810b5..6480547eec 100644 --- a/docs/source/whatsnew_1_3.md +++ b/docs/source/whatsnew_1_3.md @@ -1,4 +1,4 @@ -# What's new in 1.3 🎉🎉 +# What's new in 1.3 - Bundle usability enhancements - Integrating MONAI Generative into MONAI core diff --git a/docs/source/whatsnew_1_4.md b/docs/source/whatsnew_1_4.md new file mode 100644 index 0000000000..0fc82ff820 --- /dev/null +++ b/docs/source/whatsnew_1_4.md @@ -0,0 +1,68 @@ +# What's new in 1.4 🎉🎉 + +- MAISI: state-of-the-art 3D Latent Diffusion Model +- VISTA-3D: interactive foundation model for segmenting and anotating human anatomies +- VISTA-2D: cell segmentation pipeline +- Integrating MONAI Generative into MONAI core +- Lazy TensorRT export via `trt_compile` +- Geometric Data Support + + +## MAISI: state-of-the-art 3D Latent Diffusion Model + +![maisi](../images/maisi_train.png) + +MAISI (Medical AI for Synthetic Imaging) is a state-of-the-art three-dimensional (3D) Latent Diffusion Model designed for generating high-quality synthetic CT images with or without anatomical annotations. This AI model excels in data augmentation and creating realistic medical imaging data to supplement limited datasets due to privacy concerns or rare conditions. It can also significantly enhance the performance of other medical imaging AI models by generating diverse and realistic training data. + +A tutorial for generating large CT images accompanied by corresponding segmentation masks using MAISI is provided within +[`project-monai/tutorials`](https://github.com/Project-MONAI/tutorials/blob/main/generation/maisi). +It contains the following features: +- A foundation Variational Auto-Encoder (VAE) model for latent feature compression that works for both CT and MRI with flexible volume size and voxel size +- A foundation Diffusion model that can generate large CT volumes up to 512 × 512 × 768 size, with flexible volume size and voxel size +- A ControlNet to generate image/mask pairs that can improve downstream tasks, with controllable organ/tumor size + +## VISTA-3D: state-of-the-art 3D Latent Diffusion Model + +![vista-3d](../images/vista3d.png) + +VISTA-3D is a specialized interactive foundation model for 3D medical imaging. It excels in providing accurate and adaptable segmentation analysis across anatomies and modalities. Utilizing a multi-head architecture, VISTA-3D adapts to varying conditions and anatomical areas, helping guide users' annotation workflow. + +A tutorial showing how to finetune VISTA-3D on spleen dataset is provided within +[`project-monai/tutorials`](https://github.com/Project-MONAI/tutorials/blob/main/vista_3d). +It supports three core workflows: +- Segment everything: Enables whole body exploration, crucial for understanding complex diseases affecting multiple organs and for holistic treatment planning. +- Segment using class: Provides detailed sectional views based on specific classes, essential for targeted disease analysis or organ mapping, such as tumor identification in critical organs. +- Segment point prompts: Enhances segmentation precision through user-directed, click-based selection. This interactive approach accelerates the creation of accurate ground-truth data, essential in medical imaging analysis. + +## VISTA-2D: cell segmentation pipeline + +![vista-2d](../images/vista2d.png) + +VISTA-2D is a comprehensive training and inference pipeline for cell segmentation in imaging applications. For more information, refer to this [Blog](https://developer.nvidia.com/blog/advancing-cell-segmentation-and-morphology-analysis-with-nvidia-ai-foundation-model-vista-2d/) + +Key features of the model include: +- A robust deep learning algorithm utilizing transformers +- Foundational model as compared to specialist models +- Supports a wide variety of datasets and file formats +- Capable of handling multiple imaging modalities +- Multi-GPU and multinode training support + +A tutorial demonstrating how to train a cell segmentation model using the MONAI framework on the Cellpose dataset can be found in [`project-monai/tutorials`](https://github.com/Project-MONAI/tutorials/blob/main/vista_2d). + +## Integrating MONAI Generative into MONAI Core + +Key modules originally developed in the [MONAI GenerativeModels](https://github.com/Project-MONAI/GenerativeModels) repository have been integrated into the core MONAI codebase. This integration ensures consistent maintenance and streamlined release of essential components for generative AI. In this version, all utilities, networks, diffusion schedulers, inferers, and engines have been migrated into the core codebase. Special care has been taken to ensure saved weights from models trained using GenerativeModels can be loaded into those now integrated into core. + +Additionally, several tutorials have been ported and are available within [`project-monai/tutorials`](https://github.com/Project-MONAI/tutorials/blob/main/generation) + +## Lazy TensorRT export via `trt_compile` +This release expands TensorRT optimization options for MONAI bundles with `trt_compile` API. +The existing `trt_export` API requires the user to run a separate export script to prepare a TensorRT engine-based TorchScript model. +`trt_compile` builds and saves a TensorRT engine the first time a bundle is run and provides limited dependency support. +It also allows partial TensorRT export where only a certain submodule is being optimized, which improves usability. +A few bundles in the MONAI model zoo, like the new [VISTA-3D](https://github.com/Project-MONAI/model-zoo/tree/dev/models/vista3d) +and [VISTA-2D](https://github.com/Project-MONAI/model-zoo/tree/dev/models/vista2d) bundles, already come with `trt_inference.json` config files which use `trt_compile`. + +## Geometric Data Support + +MONAI introduces support for geometric data transformations as a key feature. As a starting point, ApplyTransformToPoints transform is added to facilitate matrix operations on points, enabling flexible and efficient handling of geometric transformations. Alongside this, the framework now supports conversions between boxes and points, providing seamless interoperability within detection pipelines. These updates have been integrated into existing pipelines, such as the [detection tutorial](https://github.com/Project-MONAI/tutorials/blob/main/detection) and the [3D registration workflow](https://github.com/Project-MONAI/tutorials/blob/main/3d_registration/learn2reg_nlst_paired_lung_ct.ipynb), leveraging the latest APIs for improved functionality. diff --git a/monai/apps/deepgrow/transforms.py b/monai/apps/deepgrow/transforms.py index c2f97091fd..721c0db489 100644 --- a/monai/apps/deepgrow/transforms.py +++ b/monai/apps/deepgrow/transforms.py @@ -803,6 +803,14 @@ class RestoreLabeld(MapTransform): original_shape_key: key that records original shape for foreground. cropped_shape_key: key that records cropped shape for foreground. allow_missing_keys: don't raise exception if key is missing. + restore_resizing: used to enable or disable resizing restoration, default is True. + If True, the transform will resize the items back to its original shape. + restore_cropping: used to enable or disable cropping restoration, default is True. + If True, the transform will restore the items to its uncropped size. + restore_spacing: used to enable or disable spacing restoration, default is True. + If True, the transform will resample the items back to the spacing it had before being altered. + restore_slicing: used to enable or disable slicing restoration, default is True. + If True, the transform will reassemble the full volume by restoring the slices to their original positions. """ def __init__( @@ -819,6 +827,10 @@ def __init__( original_shape_key: str = "foreground_original_shape", cropped_shape_key: str = "foreground_cropped_shape", allow_missing_keys: bool = False, + restore_resizing: bool = True, + restore_cropping: bool = True, + restore_spacing: bool = True, + restore_slicing: bool = True, ) -> None: super().__init__(keys, allow_missing_keys) self.ref_image = ref_image @@ -833,6 +845,10 @@ def __init__( self.end_coord_key = end_coord_key self.original_shape_key = original_shape_key self.cropped_shape_key = cropped_shape_key + self.restore_resizing = restore_resizing + self.restore_cropping = restore_cropping + self.restore_spacing = restore_spacing + self.restore_slicing = restore_slicing def __call__(self, data: Any) -> dict: d = dict(data) @@ -842,38 +858,45 @@ def __call__(self, data: Any) -> dict: image = d[key] # Undo Resize - current_shape = image.shape - cropped_shape = meta_dict[self.cropped_shape_key] - if np.any(np.not_equal(current_shape, cropped_shape)): - resizer = Resize(spatial_size=cropped_shape[1:], mode=mode) - image = resizer(image, mode=mode, align_corners=align_corners) + if self.restore_resizing: + current_shape = image.shape + cropped_shape = meta_dict[self.cropped_shape_key] + if np.any(np.not_equal(current_shape, cropped_shape)): + resizer = Resize(spatial_size=cropped_shape[1:], mode=mode) + image = resizer(image, mode=mode, align_corners=align_corners) # Undo Crop - original_shape = meta_dict[self.original_shape_key] - result = np.zeros(original_shape, dtype=np.float32) - box_start = meta_dict[self.start_coord_key] - box_end = meta_dict[self.end_coord_key] - - spatial_dims = min(len(box_start), len(image.shape[1:])) - slices = tuple( - [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])] - ) - result[slices] = image + if self.restore_cropping: + original_shape = meta_dict[self.original_shape_key] + result = np.zeros(original_shape, dtype=np.float32) + box_start = meta_dict[self.start_coord_key] + box_end = meta_dict[self.end_coord_key] + + spatial_dims = min(len(box_start), len(image.shape[1:])) + slices = tuple( + [slice(None)] + [slice(s, e) for s, e in zip(box_start[:spatial_dims], box_end[:spatial_dims])] + ) + result[slices] = image + else: + result = image # Undo Spacing - current_size = result.shape[1:] - # change spatial_shape from HWD to DHW - spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1)) - spatial_size = spatial_shape[-len(current_size) :] + if self.restore_spacing: + current_size = result.shape[1:] + # change spatial_shape from HWD to DHW + spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1)) + spatial_size = spatial_shape[-len(current_size) :] - if np.any(np.not_equal(current_size, spatial_size)): - resizer = Resize(spatial_size=spatial_size, mode=mode) - result = resizer(result, mode=mode, align_corners=align_corners) # type: ignore + if np.any(np.not_equal(current_size, spatial_size)): + resizer = Resize(spatial_size=spatial_size, mode=mode) + result = resizer(result, mode=mode, align_corners=align_corners) # type: ignore # Undo Slicing slice_idx = meta_dict.get("slice_idx") final_result: NdarrayOrTensor - if slice_idx is None or self.slice_only: + if not self.restore_slicing: # do nothing if restore slicing isn't requested + final_result = result + elif slice_idx is None or self.slice_only: final_result = result if len(result.shape) <= 3 else result[0] else: slice_idx = meta_dict["slice_idx"][0] diff --git a/monai/networks/schedulers/ddim.py b/monai/networks/schedulers/ddim.py index 2a0121d063..50a680336d 100644 --- a/monai/networks/schedulers/ddim.py +++ b/monai/networks/schedulers/ddim.py @@ -220,7 +220,7 @@ def step( if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 device: torch.device = torch.device(model_output.device if torch.is_tensor(model_output) else "cpu") - noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device) + noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator, device=device) variance = self._get_variance(timestep, prev_timestep) ** 0.5 * eta * noise pred_prev_sample = pred_prev_sample + variance diff --git a/monai/networks/schedulers/ddpm.py b/monai/networks/schedulers/ddpm.py index 93ad833031..d64e11d379 100644 --- a/monai/networks/schedulers/ddpm.py +++ b/monai/networks/schedulers/ddpm.py @@ -241,8 +241,12 @@ def step( variance = 0 if timestep > 0: noise = torch.randn( - model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator - ).to(model_output.device) + model_output.size(), + dtype=model_output.dtype, + layout=model_output.layout, + generator=generator, + device=model_output.device, + ) variance = (self._get_variance(timestep, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance diff --git a/monai/transforms/regularization/array.py b/monai/transforms/regularization/array.py index 4bf6cff649..66a5116c1a 100644 --- a/monai/transforms/regularization/array.py +++ b/monai/transforms/regularization/array.py @@ -112,6 +112,11 @@ class CutMix(Mixer): the mixing weight but also the size of the random rectangles used during for mixing. Please refer to the paper for details. + Please note that there is a change in behavior starting from version 1.4.0. In the previous + implementation, the transform would generate a different label each time it was called. + To ensure determinism, the new implementation will now generate the same label for + the same input image when using the same operation. + The most common use case is something close to: .. code-block:: python diff --git a/requirements-dev.txt b/requirements-dev.txt index 6d0ccd378a..72654d3534 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -22,7 +22,7 @@ isort>=5.1 ruff pytype>=2020.6.1; platform_system != "Windows" types-setuptools -mypy>=1.5.0 +mypy>=1.5.0, <1.12.0 ninja torchvision psutil diff --git a/tests/test_deepgrow_transforms.py b/tests/test_deepgrow_transforms.py index a491a8004b..091d00afcd 100644 --- a/tests/test_deepgrow_transforms.py +++ b/tests/test_deepgrow_transforms.py @@ -141,6 +141,21 @@ DATA_12 = {"image": np.arange(27).reshape(3, 3, 3), PostFix.meta("image"): {}, "guidance": [[0, 0, 0], [0, 1, 1], 1]} +DATA_13 = { + "image": np.arange(64).reshape((1, 4, 4, 4)), + PostFix.meta("image"): { + "spatial_shape": [8, 8, 4], + "foreground_start_coord": np.array([1, 1, 1]), + "foreground_end_coord": np.array([3, 3, 3]), + "foreground_original_shape": (1, 4, 4, 4), + "foreground_cropped_shape": (1, 2, 2, 2), + "original_affine": np.array( + [[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]] + ), + }, + "pred": np.array([[[[10, 20], [30, 40]], [[50, 60], [70, 80]]]]), +} + FIND_SLICE_TEST_CASE_1 = [{"label": "label", "sids": "sids"}, DATA_1, [0]] FIND_SLICE_TEST_CASE_2 = [{"label": "label", "sids": "sids"}, DATA_2, [0, 1]] @@ -329,6 +344,74 @@ RESTORE_LABEL_TEST_CASE_2 = [{"keys": ["pred"], "ref_image": "image", "mode": "nearest"}, DATA_11, RESULT] +RESTORE_LABEL_TEST_CASE_3_RESULT = np.zeros((10, 20, 20)) +RESTORE_LABEL_TEST_CASE_3_RESULT[:5, 0:10, 0:10] = 1 +RESTORE_LABEL_TEST_CASE_3_RESULT[:5, 0:10, 10:20] = 2 +RESTORE_LABEL_TEST_CASE_3_RESULT[:5, 10:20, 0:10] = 3 +RESTORE_LABEL_TEST_CASE_3_RESULT[:5, 10:20, 10:20] = 4 +RESTORE_LABEL_TEST_CASE_3_RESULT[5:10, 0:10, 0:10] = 5 +RESTORE_LABEL_TEST_CASE_3_RESULT[5:10, 0:10, 10:20] = 6 +RESTORE_LABEL_TEST_CASE_3_RESULT[5:10, 10:20, 0:10] = 7 +RESTORE_LABEL_TEST_CASE_3_RESULT[5:10, 10:20, 10:20] = 8 + +RESTORE_LABEL_TEST_CASE_3 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest", "restore_cropping": False}, + DATA_11, + RESTORE_LABEL_TEST_CASE_3_RESULT, +] + +RESTORE_LABEL_TEST_CASE_4_RESULT = np.zeros((4, 8, 8)) +RESTORE_LABEL_TEST_CASE_4_RESULT[1, 2:6, 2:6] = np.array( + [[10.0, 10.0, 20.0, 20.0], [10.0, 10.0, 20.0, 20.0], [30.0, 30.0, 40.0, 40.0], [30.0, 30.0, 40.0, 40.0]] +) +RESTORE_LABEL_TEST_CASE_4_RESULT[2, 2:6, 2:6] = np.array( + [[50.0, 50.0, 60.0, 60.0], [50.0, 50.0, 60.0, 60.0], [70.0, 70.0, 80.0, 80.0], [70.0, 70.0, 80.0, 80.0]] +) + +RESTORE_LABEL_TEST_CASE_4 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest", "restore_resizing": False}, + DATA_13, + RESTORE_LABEL_TEST_CASE_4_RESULT, +] + +RESTORE_LABEL_TEST_CASE_5_RESULT = np.zeros((4, 4, 4)) +RESTORE_LABEL_TEST_CASE_5_RESULT[1, 1:3, 1:3] = np.array([[10.0, 20.0], [30.0, 40.0]]) +RESTORE_LABEL_TEST_CASE_5_RESULT[2, 1:3, 1:3] = np.array([[50.0, 60.0], [70.0, 80.0]]) + +RESTORE_LABEL_TEST_CASE_5 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest", "restore_spacing": False}, + DATA_13, + RESTORE_LABEL_TEST_CASE_5_RESULT, +] + +RESTORE_LABEL_TEST_CASE_6_RESULT = np.zeros((1, 4, 8, 8)) +RESTORE_LABEL_TEST_CASE_6_RESULT[-1, 1, 2:6, 2:6] = np.array( + [[10.0, 10.0, 20.0, 20.0], [10.0, 10.0, 20.0, 20.0], [30.0, 30.0, 40.0, 40.0], [30.0, 30.0, 40.0, 40.0]] +) +RESTORE_LABEL_TEST_CASE_6_RESULT[-1, 2, 2:6, 2:6] = np.array( + [[50.0, 50.0, 60.0, 60.0], [50.0, 50.0, 60.0, 60.0], [70.0, 70.0, 80.0, 80.0], [70.0, 70.0, 80.0, 80.0]] +) + +RESTORE_LABEL_TEST_CASE_6 = [ + {"keys": ["pred"], "ref_image": "image", "mode": "nearest", "restore_slicing": False}, + DATA_13, + RESTORE_LABEL_TEST_CASE_6_RESULT, +] + +RESTORE_LABEL_TEST_CASE_7 = [ + { + "keys": ["pred"], + "ref_image": "image", + "mode": "nearest", + "restore_resizing": False, + "restore_cropping": False, + "restore_spacing": False, + "restore_slicing": False, + }, + DATA_11, + np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]), +] + FETCH_2D_SLICE_TEST_CASE_1 = [ {"keys": ["image"], "guidance": "guidance"}, DATA_12, @@ -445,7 +528,17 @@ def test_correct_results(self, arguments, input_data, expected_result): class TestRestoreLabeld(unittest.TestCase): - @parameterized.expand([RESTORE_LABEL_TEST_CASE_1, RESTORE_LABEL_TEST_CASE_2]) + @parameterized.expand( + [ + RESTORE_LABEL_TEST_CASE_1, + RESTORE_LABEL_TEST_CASE_2, + RESTORE_LABEL_TEST_CASE_3, + RESTORE_LABEL_TEST_CASE_4, + RESTORE_LABEL_TEST_CASE_5, + RESTORE_LABEL_TEST_CASE_6, + RESTORE_LABEL_TEST_CASE_7, + ] + ) def test_correct_results(self, arguments, input_data, expected_result): result = RestoreLabeld(**arguments)(input_data) np.testing.assert_allclose(result["pred"], expected_result)