Skip to content

Commit

Permalink
Merge branch 'dev' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
KumoLiu authored Sep 6, 2024
2 parents 2e7e04c + 19cc6f0 commit cfbeb0e
Show file tree
Hide file tree
Showing 45 changed files with 2,536 additions and 111 deletions.
24 changes: 12 additions & 12 deletions .github/workflows/cron.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,24 +13,24 @@ jobs:
strategy:
matrix:
environment:
- "PT191+CUDA113"
- "PT110+CUDA113"
- "PT113+CUDA113"
- "PTLATEST+CUDA121"
- "PT113+CUDA118"
- "PT210+CUDA121"
- "PTLATEST+CUDA124"
include:
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes
- environment: PT110+CUDA113
pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu113"
base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3
- environment: PT113+CUDA113
pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu113"
base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3
- environment: PT113+CUDA122
- environment: PT113+CUDA118
pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu121"
base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.2
base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8
- environment: PT210+CUDA121
pytorch: "pytorch==2.1.0 torchvision==0.16.0 --extra-index-url https://download.pytorch.org/whl/cu121"
base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.1
- environment: PTLATEST+CUDA124
pytorch: "-U torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121"
base: "nvcr.io/nvidia/pytorch:24.03-py3" # CUDA 12.4
base: "nvcr.io/nvidia/pytorch:24.08-py3" # CUDA 12.4
container:
image: ${{ matrix.base }}
options: "--gpus all"
Expand Down Expand Up @@ -80,7 +80,7 @@ jobs:
if: github.repository == 'Project-MONAI/MONAI'
strategy:
matrix:
container: ["pytorch:23.08", "pytorch:24.03"]
container: ["pytorch:23.08", "pytorch:24.08"]
container:
image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image
options: "--gpus all"
Expand Down Expand Up @@ -129,7 +129,7 @@ jobs:
if: github.repository == 'Project-MONAI/MONAI'
strategy:
matrix:
container: ["pytorch:24.03"]
container: ["pytorch:24.08"]
container:
image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image
options: "--gpus all"
Expand Down Expand Up @@ -233,7 +233,7 @@ jobs:
if: github.repository == 'Project-MONAI/MONAI'
needs: cron-gpu # so that monai itself is verified first
container:
image: nvcr.io/nvidia/pytorch:24.03-py3 # testing with the latest pytorch base image
image: nvcr.io/nvidia/pytorch:24.08-py3 # testing with the latest pytorch base image
options: "--gpus all --ipc=host"
runs-on: [self-hosted, linux, x64, integration]
steps:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/pythonapp-gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ jobs:
pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error
base: "nvcr.io/nvidia/pytorch:23.08-py3"
- environment: PT210+CUDA121DOCKER
# 24.03: 2.3.0a0+40ec155e58.nv24.3
# 24.08: 2.3.0a0+40ec155e58.nv24.3
pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error
base: "nvcr.io/nvidia/pytorch:24.03-py3"
base: "nvcr.io/nvidia/pytorch:24.08-py3"
container:
image: ${{ matrix.base }}
options: --gpus all --env NVIDIA_DISABLE_REQUIRE=true # workaround for unsatisfied condition: cuda>=11.6
Expand Down
3 changes: 2 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

# To build with a different base image
# please run `docker build` using the `--build-arg PYTORCH_IMAGE=...` flag.
ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.03-py3
ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.08-py3
FROM ${PYTORCH_IMAGE}

LABEL maintainer="[email protected]"
Expand Down Expand Up @@ -56,4 +56,5 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/*
# append /opt/tools to runtime path for NGC CLI to be accessible from all file system locations
ENV PATH=${PATH}:/opt/tools
ENV POLYGRAPHY_AUTOINSTALL_DEPS=1
WORKDIR /opt/monai
1 change: 1 addition & 0 deletions docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,4 @@ zarr
huggingface_hub
pyamg>=5.0.0
packaging
polygraphy
42 changes: 42 additions & 0 deletions docs/source/config_syntax.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ Content:
- [`$` to evaluate as Python expressions](#to-evaluate-as-python-expressions)
- [`%` to textually replace configuration elements](#to-textually-replace-configuration-elements)
- [`_target_` (`_disabled_`, `_desc_`, `_requires_`, `_mode_`) to instantiate a Python object](#instantiate-a-python-object)
- [`+` to alter semantics of merging config keys from multiple configuration files](#multiple-config-files)
- [The command line interface](#the-command-line-interface)
- [Recommendations](#recommendations)

Expand Down Expand Up @@ -175,6 +176,47 @@ _Description:_ `_requires_`, `_disabled_`, `_desc_`, and `_mode_` are optional k
- `"debug"` -- execute with debug prompt and return the return value of ``pdb.runcall(_target_, **kwargs)``,
see also [`pdb.runcall`](https://docs.python.org/3/library/pdb.html#pdb.runcall).

## Multiple config files

_Description:_ Multiple config files may be specified on the command line.
The content of those config files is being merged. When same keys are specifiled in more than one config file,
the value associated with the key is being overridden, in the order config files are specified.
If the desired behaviour is to merge values from both files, the key in second config file should be prefixed with `+`.
The value types for the merged contents must match and be both of `dict` or both of `list` type.
`dict` values will be merged via update(), `list` values - concatenated via extend().
Here's an example. In this case, "amp" value will be overridden by extra_config.json.
`imports` and `preprocessing#transforms` lists will be merged. An error would be thrown if the value type in `"+imports"` is not `list`:

config.json:
```json
{
"amp": "$True"
"imports": [
"$import torch"
],
"preprocessing": {
"_target_": "Compose",
"transforms": [
"$@t1",
"$@t2"
]
},
}
```

extra_config.json:
```json
{
"amp": "$False"
"+imports": [
"$from monai.networks import trt_compile"
],
"+preprocessing#transforms": [
"$@t3"
]
}
```

## The command line interface

In addition to the Pythonic APIs, a few command line interfaces (CLI) are provided to interact with the bundle.
Expand Down
48 changes: 48 additions & 0 deletions docs/source/transforms.rst
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,12 @@ IO
:members:
:special-members: __call__

`WriteFileMapping`
""""""""""""""""""
.. autoclass:: WriteFileMapping
:members:
:special-members: __call__


NVIDIA Tool Extension (NVTX)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Expand Down Expand Up @@ -970,6 +976,18 @@ Spatial
:members:
:special-members: __call__

`ConvertBoxToPoints`
""""""""""""""""""""
.. autoclass:: ConvertBoxToPoints
:members:
:special-members: __call__

`ConvertPointsToBoxes`
""""""""""""""""""""""
.. autoclass:: ConvertPointsToBoxes
:members:
:special-members: __call__


Smooth Field
^^^^^^^^^^^^
Expand Down Expand Up @@ -1216,6 +1234,12 @@ Utility
:members:
:special-members: __call__

`ApplyTransformToPoints`
""""""""""""""""""""""""
.. autoclass:: ApplyTransformToPoints
:members:
:special-members: __call__

Dictionary Transforms
---------------------

Expand Down Expand Up @@ -1642,6 +1666,12 @@ IO (Dict)
:members:
:special-members: __call__

`WriteFileMappingd`
"""""""""""""""""""
.. autoclass:: WriteFileMappingd
:members:
:special-members: __call__

Post-processing (Dict)
^^^^^^^^^^^^^^^^^^^^^^

Expand Down Expand Up @@ -1961,6 +1991,18 @@ Spatial (Dict)
:members:
:special-members: __call__

`ConvertBoxToPointsd`
"""""""""""""""""""""
.. autoclass:: ConvertBoxToPointsd
:members:
:special-members: __call__

`ConvertPointsToBoxesd`
"""""""""""""""""""""""
.. autoclass:: ConvertPointsToBoxesd
:members:
:special-members: __call__


Smooth Field (Dict)
^^^^^^^^^^^^^^^^^^^
Expand Down Expand Up @@ -2265,6 +2307,12 @@ Utility (Dict)
:members:
:special-members: __call__

`ApplyTransformToPointsd`
"""""""""""""""""""""""""
.. autoclass:: ApplyTransformToPointsd
:members:
:special-members: __call__


MetaTensor
^^^^^^^^^^
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/vista3d/inferer.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def point_based_window_inferer(
point_labels=point_labels,
class_vector=class_vector,
prompt_class=prompt_class,
patch_coords=unravel_slice,
patch_coords=[unravel_slice],
prev_mask=prev_mask,
**kwargs,
)
Expand Down
29 changes: 18 additions & 11 deletions monai/apps/vista3d/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@
import torch
from torch import Tensor

__all__ = ["sample_prompt_pairs"]

ENABLE_SPECIAL = True
SPECIAL_INDEX = (23, 24, 25, 26, 27, 57, 128)
MERGE_LIST = {
Expand All @@ -30,6 +28,8 @@
132: [57], # overlap with trachea merge into airway
}

__all__ = ["sample_prompt_pairs"]


def _get_point_label(id: int) -> tuple[int, int]:
if id in SPECIAL_INDEX and ENABLE_SPECIAL:
Expand Down Expand Up @@ -66,22 +66,29 @@ def sample_prompt_pairs(
max_backprompt: int, max number of prompt from background.
max_point: maximum number of points for each object.
include_background: if include 0 into training prompt. If included, background 0 is treated
the same as foreground. Always be False for multi-partial-dataset training. If needed,
can be true for finetuning specific dataset, .
the same as foreground and points will be sampled. Can be true only if user want to segment
background 0 with point clicks, otherwise always be false.
drop_label_prob: probability to drop label prompt.
drop_point_prob: probability to drop point prompt.
point_sampler: sampler to augment masks with supervoxel.
point_sampler_kwargs: arguments for point_sampler.
Returns:
label_prompt: [B, 1]. The classes used for training automatic segmentation.
point: [B, N, 3]. The corresponding points for each class.
Note that background label prompt requires matching point as well ([0,0,0] is used).
point_label: [B, N]. The corresponding point labels for each point (negative or positive).
-1 is used for padding the background label prompt and will be ignored.
prompt_class: [B, 1], exactly the same with label_prompt for label indexing for training loss.
label_prompt can be None, and prompt_class is used to identify point classes.
tuple:
- label_prompt (Tensor | None): Tensor of shape [B, 1] containing the classes used for
training automatic segmentation.
- point (Tensor | None): Tensor of shape [B, N, 3] representing the corresponding points
for each class. Note that background label prompts require matching points as well
(e.g., [0, 0, 0] is used).
- point_label (Tensor | None): Tensor of shape [B, N] representing the corresponding point
labels for each point (negative or positive). -1 is used for padding the background
label prompt and will be ignored.
- prompt_class (Tensor | None): Tensor of shape [B, 1], exactly the same as label_prompt
for label indexing during training. If label_prompt is None, prompt_class is used to
identify point classes.
"""

# class label number
if not labels.shape[0] == 1:
raise ValueError("only support batch size 1")
Expand Down
8 changes: 5 additions & 3 deletions monai/bundle/config_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

from monai.bundle.config_item import ComponentLocator, ConfigComponent, ConfigExpression, ConfigItem
from monai.bundle.reference_resolver import ReferenceResolver
from monai.bundle.utils import ID_REF_KEY, ID_SEP_KEY, MACRO_KEY
from monai.bundle.utils import ID_REF_KEY, ID_SEP_KEY, MACRO_KEY, merge_kv
from monai.config import PathLike
from monai.utils import ensure_tuple, look_up_option, optional_import
from monai.utils.misc import CheckKeyDuplicatesYamlLoader, check_key_duplicates
Expand Down Expand Up @@ -423,8 +423,10 @@ def load_config_files(cls, files: PathLike | Sequence[PathLike] | dict, **kwargs
if isinstance(files, str) and not Path(files).is_file() and "," in files:
files = files.split(",")
for i in ensure_tuple(files):
for k, v in (cls.load_config_file(i, **kwargs)).items():
parser[k] = v
config_dict = cls.load_config_file(i, **kwargs)
for k, v in config_dict.items():
merge_kv(parser, k, v)

return parser.get() # type: ignore

@classmethod
Expand Down
4 changes: 2 additions & 2 deletions monai/bundle/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from monai.apps.utils import _basename, download_url, extractall, get_logger
from monai.bundle.config_item import ConfigComponent
from monai.bundle.config_parser import ConfigParser
from monai.bundle.utils import DEFAULT_INFERENCE, DEFAULT_METADATA
from monai.bundle.utils import DEFAULT_INFERENCE, DEFAULT_METADATA, merge_kv
from monai.bundle.workflows import BundleWorkflow, ConfigWorkflow
from monai.config import IgniteInfo, PathLike
from monai.data import load_net_with_metadata, save_net_with_metadata
Expand Down Expand Up @@ -105,7 +105,7 @@ def update_kwargs(args: str | dict | None = None, ignore_none: bool = True, **kw
if isinstance(v, dict) and isinstance(args_.get(k), dict):
args_[k] = update_kwargs(args_[k], ignore_none, **v)
else:
args_[k] = v
merge_kv(args_, k, v)
return args_


Expand Down
Loading

0 comments on commit cfbeb0e

Please sign in to comment.