From a9a23b93bfed9331bcedfbe00f9fc448d1db26e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 10:03:59 +0200 Subject: [PATCH 01/43] Bump sqlparse from 0.4.4 to 0.5.0 (#558) Bumps [sqlparse](https://github.com/andialbrecht/sqlparse) from 0.4.4 to 0.5.0. - [Changelog](https://github.com/andialbrecht/sqlparse/blob/master/CHANGELOG) - [Commits](https://github.com/andialbrecht/sqlparse/compare/0.4.4...0.5.0) --- updated-dependencies: - dependency-name: sqlparse dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 188a4728a..e0971e36c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3408,19 +3408,18 @@ sqlcipher = ["sqlcipher3_binary"] [[package]] name = "sqlparse" -version = "0.4.4" +version = "0.5.0" description = "A non-validating SQL parser." optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "sqlparse-0.4.4-py3-none-any.whl", hash = "sha256:5430a4fe2ac7d0f93e66f1efc6e1338a41884b7ddf2a350cedd20ccc4d9d28f3"}, - {file = "sqlparse-0.4.4.tar.gz", hash = "sha256:d446183e84b8349fa3061f0fe7f06ca94ba65b426946ffebe6e3e8295332420c"}, + {file = "sqlparse-0.5.0-py3-none-any.whl", hash = "sha256:c204494cd97479d0e39f28c93d46c0b2d5959c7b9ab904762ea6c7af211c8663"}, + {file = "sqlparse-0.5.0.tar.gz", hash = "sha256:714d0a4932c059d16189f58ef5411ec2287a4360f17cdd0edd2d09d4c5087c93"}, ] [package.extras] -dev = ["build", "flake8"] +dev = ["build", "hatch"] doc = ["sphinx"] -test = ["pytest", "pytest-cov"] [[package]] name = "sympy" From 36eb46f371616b35921aebfc6362726c586b0754 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 May 2024 09:50:19 +0200 Subject: [PATCH 02/43] Bump tqdm from 4.66.1 to 4.66.3 (#569) Bumps [tqdm](https://github.com/tqdm/tqdm) from 4.66.1 to 4.66.3. - [Release notes](https://github.com/tqdm/tqdm/releases) - [Commits](https://github.com/tqdm/tqdm/compare/v4.66.1...v4.66.3) --- updated-dependencies: - dependency-name: tqdm dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index e0971e36c..64d65d07e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3667,13 +3667,13 @@ scipy = ["scipy"] [[package]] name = "tqdm" -version = "4.66.1" +version = "4.66.3" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, + {file = "tqdm-4.66.3-py3-none-any.whl", hash = "sha256:4f41d54107ff9a223dca80b53efe4fb654c67efaba7f47bada3ee9d50e05bd53"}, + {file = "tqdm-4.66.3.tar.gz", hash = "sha256:23097a41eba115ba99ecae40d06444c15d1c0c698d527a01c6c8bd1c5d0647e5"}, ] [package.dependencies] From fa7f0f1f21578fc56410b593782f3aacae182a48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 08:51:38 +0200 Subject: [PATCH 03/43] Bump werkzeug from 3.0.1 to 3.0.3 (#570) Bumps [werkzeug](https://github.com/pallets/werkzeug) from 3.0.1 to 3.0.3. - [Release notes](https://github.com/pallets/werkzeug/releases) - [Changelog](https://github.com/pallets/werkzeug/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/werkzeug/compare/3.0.1...3.0.3) --- updated-dependencies: - dependency-name: werkzeug dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 64d65d07e..80ed6f4c9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3889,13 +3889,13 @@ watchmedo = ["PyYAML (>=3.10)"] [[package]] name = "werkzeug" -version = "3.0.1" +version = "3.0.3" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.8" files = [ - {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, - {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, + {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, + {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, ] [package.dependencies] From a05fcd5963fd5135e799b408ff7094ae8d95eac5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 May 2024 08:52:55 +0200 Subject: [PATCH 04/43] Bump jinja2 from 3.1.3 to 3.1.4 (#571) Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.3 to 3.1.4. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/3.1.3...3.1.4) --- updated-dependencies: - dependency-name: jinja2 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 80ed6f4c9..04501eadb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1095,13 +1095,13 @@ files = [ [[package]] name = "jinja2" -version = "3.1.3" +version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, - {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [package.dependencies] From b2fc3e6d975a47a9d2897aad4cf8cf8bcb8244bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 09:14:52 +0200 Subject: [PATCH 05/43] Bump mlflow from 2.10.1 to 2.12.1 (#575) Bumps [mlflow](https://github.com/mlflow/mlflow) from 2.10.1 to 2.12.1. - [Release notes](https://github.com/mlflow/mlflow/releases) - [Changelog](https://github.com/mlflow/mlflow/blob/master/CHANGELOG.md) - [Commits](https://github.com/mlflow/mlflow/compare/v2.10.1...v2.12.1) --- updated-dependencies: - dependency-name: mlflow dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 138 +++++++++++++++++++++++++++------------------------- 1 file changed, 73 insertions(+), 65 deletions(-) diff --git a/poetry.lock b/poetry.lock index 04501eadb..4b48efdc8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -32,6 +32,20 @@ typing-extensions = ">=4" [package.extras] tz = ["backports.zoneinfo"] +[[package]] +name = "aniso8601" +version = "9.0.1" +description = "A library for parsing ISO 8601 strings." +optional = false +python-versions = "*" +files = [ + {file = "aniso8601-9.0.1-py2.py3-none-any.whl", hash = "sha256:1d2b7ef82963909e93c4f24ce48d4de9e66009a21bf1c1e1c85bdd0812fe412f"}, + {file = "aniso8601-9.0.1.tar.gz", hash = "sha256:72e3117667eedf66951bb2d93f4296a56b94b078a8a95905a052611fb3f1b973"}, +] + +[package.extras] +dev = ["black", "coverage", "isort", "pre-commit", "pyenchant", "pylint"] + [[package]] name = "appdirs" version = "1.4.4" @@ -456,26 +470,6 @@ files = [ docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] tests = ["pytest", "pytest-cov", "pytest-xdist"] -[[package]] -name = "databricks-cli" -version = "0.18.0" -description = "A command line interface for Databricks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "databricks-cli-0.18.0.tar.gz", hash = "sha256:87569709eda9af3e9db8047b691e420b5e980c62ef01675575c0d2b9b4211eb7"}, - {file = "databricks_cli-0.18.0-py2.py3-none-any.whl", hash = "sha256:1176a5f42d3e8af4abfc915446fb23abc44513e325c436725f5898cbb9e3384b"}, -] - -[package.dependencies] -click = ">=7.0" -oauthlib = ">=3.1.0" -pyjwt = ">=1.7.0" -requests = ">=2.17.3" -six = ">=1.10.0" -tabulate = ">=0.7.7" -urllib3 = ">=1.26.7,<3" - [[package]] name = "deprecated" version = "1.2.14" @@ -806,6 +800,51 @@ requests-oauthlib = ">=0.7.0" [package.extras] tool = ["click (>=6.0.0)"] +[[package]] +name = "graphene" +version = "3.3" +description = "GraphQL Framework for Python" +optional = false +python-versions = "*" +files = [ + {file = "graphene-3.3-py2.py3-none-any.whl", hash = "sha256:bb3810be33b54cb3e6969506671eb72319e8d7ba0d5ca9c8066472f75bf35a38"}, + {file = "graphene-3.3.tar.gz", hash = "sha256:529bf40c2a698954217d3713c6041d69d3f719ad0080857d7ee31327112446b0"}, +] + +[package.dependencies] +aniso8601 = ">=8,<10" +graphql-core = ">=3.1,<3.3" +graphql-relay = ">=3.1,<3.3" + +[package.extras] +dev = ["black (==22.3.0)", "coveralls (>=3.3,<4)", "flake8 (>=4,<5)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>=6,<7)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=3.4,<4)", "pytest-cov (>=3,<4)", "pytest-mock (>=3,<4)", "pytz (==2022.1)", "snapshottest (>=0.6,<1)"] +test = ["coveralls (>=3.3,<4)", "iso8601 (>=1,<2)", "mock (>=4,<5)", "pytest (>=6,<7)", "pytest-asyncio (>=0.16,<2)", "pytest-benchmark (>=3.4,<4)", "pytest-cov (>=3,<4)", "pytest-mock (>=3,<4)", "pytz (==2022.1)", "snapshottest (>=0.6,<1)"] + +[[package]] +name = "graphql-core" +version = "3.2.3" +description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "graphql-core-3.2.3.tar.gz", hash = "sha256:06d2aad0ac723e35b1cb47885d3e5c45e956a53bc1b209a9fc5369007fe46676"}, + {file = "graphql_core-3.2.3-py3-none-any.whl", hash = "sha256:5766780452bd5ec8ba133f8bf287dc92713e3868ddd83aee4faab9fc3e303dc3"}, +] + +[[package]] +name = "graphql-relay" +version = "3.2.0" +description = "Relay library for graphql-core" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "graphql-relay-3.2.0.tar.gz", hash = "sha256:1ff1c51298356e481a0be009ccdff249832ce53f30559c1338f22a0e0d17250c"}, + {file = "graphql_relay-3.2.0-py3-none-any.whl", hash = "sha256:c9b22bd28b170ba1fe674c74384a8ff30a76c8e26f88ac3aa1584dd3179953e5"}, +] + +[package.dependencies] +graphql-core = ">=3.2,<3.3" + [[package]] name = "greenlet" version = "3.0.3" @@ -1635,25 +1674,25 @@ files = [ [[package]] name = "mlflow" -version = "2.10.1" -description = "MLflow: A Platform for ML Development and Productionization" +version = "2.12.2" +description = "MLflow is an open source platform for the complete machine learning lifecycle" optional = false python-versions = ">=3.8" files = [ - {file = "mlflow-2.10.1-py3-none-any.whl", hash = "sha256:3dddb8a011ab3671d0c6da806549fdc84d39eb853b1bc29e8b3df50115ba5b6c"}, - {file = "mlflow-2.10.1.tar.gz", hash = "sha256:d534e658a979517f56478fc7f0b1a19451700078a725242e789fe63c87d46815"}, + {file = "mlflow-2.12.2-py3-none-any.whl", hash = "sha256:38dd04710fe64ee8229b7233b4d91db32c3ff887934c40d926246a566c886c0b"}, + {file = "mlflow-2.12.2.tar.gz", hash = "sha256:d712f1af9d44f1eb9e1baee8ca64f7311e185b7572fc3c1e0a83a4c8ceff6aad"}, ] [package.dependencies] alembic = "<1.10.0 || >1.10.0,<2" click = ">=7.0,<9" cloudpickle = "<4" -databricks-cli = ">=0.8.7,<1" docker = ">=4.0.0,<8" entrypoints = "<1" Flask = "<4" -gitpython = ">=2.1.0,<4" -gunicorn = {version = "<22", markers = "platform_system != \"Windows\""} +gitpython = ">=3.1.9,<4" +graphene = "<4" +gunicorn = {version = "<23", markers = "platform_system != \"Windows\""} importlib-metadata = ">=3.7.0,<4.7.0 || >4.7.0,<8" Jinja2 = [ {version = ">=2.11,<4", markers = "platform_system != \"Windows\""}, @@ -1662,11 +1701,11 @@ Jinja2 = [ markdown = ">=3.3,<4" matplotlib = "<4" numpy = "<2" -packaging = "<24" +packaging = "<25" pandas = "<3" protobuf = ">=3.12.0,<5" pyarrow = ">=4.0.0,<16" -pytz = "<2024" +pytz = "<2025" pyyaml = ">=5.1,<7" querystring-parser = "<2" requests = ">=2.17.3,<3" @@ -1674,14 +1713,14 @@ scikit-learn = "<2" scipy = "<2" sqlalchemy = ">=1.4.0,<3" sqlparse = ">=0.4.0,<1" -waitress = {version = "<3", markers = "platform_system == \"Windows\""} +waitress = {version = "<4", markers = "platform_system == \"Windows\""} [package.extras] aliyun-oss = ["aliyunstoreplugin"] -databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore (>1.34)", "google-cloud-storage (>=1.30.0)"] -extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1)", "mlserver-mlflow (>=1.2.0,!=1.3.1)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] -gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] -genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +databricks = ["azure-storage-file-datalake (>12)", "boto3 (>1)", "botocore", "google-cloud-storage (>=1.30.0)"] +extras = ["azureml-core (>=1.2.0)", "boto3", "botocore", "google-cloud-storage (>=1.30.0)", "kubernetes", "mlserver (>=1.2.0,!=1.3.1,<1.4.0)", "mlserver-mlflow (>=1.2.0,!=1.3.1,<1.4.0)", "prometheus-flask-exporter", "pyarrow", "pysftp", "requests-auth-aws-sigv4", "virtualenv"] +gateway = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] +genai = ["aiohttp (<4)", "boto3 (>=1.28.56,<2)", "fastapi (<1)", "pydantic (>=1.0,<3)", "slowapi (>=0.1.9,<1)", "tiktoken (<1)", "uvicorn[standard] (<1)", "watchfiles (<1)"] sqlserver = ["mlflow-dbstore"] xethub = ["mlflow-xethub"] @@ -2373,23 +2412,6 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] -[[package]] -name = "pyjwt" -version = "2.8.0" -description = "JSON Web Token implementation in Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, - {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, -] - -[package.extras] -crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] - [[package]] name = "pymdown-extensions" version = "10.7" @@ -3435,20 +3457,6 @@ files = [ [package.dependencies] mpmath = ">=0.19" -[[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, -] - -[package.extras] -widechars = ["wcwidth"] - [[package]] name = "tensorboard" version = "2.14.0" From 495d5b9454becba63fb4ba23b1852cc05f210307 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 May 2024 09:35:45 +0200 Subject: [PATCH 06/43] Bump gunicorn from 21.2.0 to 22.0.0 (#576) Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 21.2.0 to 22.0.0. - [Release notes](https://github.com/benoitc/gunicorn/releases) - [Commits](https://github.com/benoitc/gunicorn/compare/21.2.0...22.0.0) --- updated-dependencies: - dependency-name: gunicorn dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4b48efdc8..2f2b3b9b9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -984,22 +984,23 @@ protobuf = ["grpcio-tools (>=1.60.1)"] [[package]] name = "gunicorn" -version = "21.2.0" +version = "22.0.0" description = "WSGI HTTP Server for UNIX" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" files = [ - {file = "gunicorn-21.2.0-py3-none-any.whl", hash = "sha256:3213aa5e8c24949e792bcacfc176fef362e7aac80b76c56f6b5122bf350722f0"}, - {file = "gunicorn-21.2.0.tar.gz", hash = "sha256:88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033"}, + {file = "gunicorn-22.0.0-py3-none-any.whl", hash = "sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9"}, + {file = "gunicorn-22.0.0.tar.gz", hash = "sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63"}, ] [package.dependencies] packaging = "*" [package.extras] -eventlet = ["eventlet (>=0.24.1)"] +eventlet = ["eventlet (>=0.24.1,!=0.36.0)"] gevent = ["gevent (>=1.4.0)"] setproctitle = ["setproctitle"] +testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"] tornado = ["tornado (>=0.2)"] [[package]] From bdd102a6e42197a5a416625225798f47bf8314b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 09:14:39 +0200 Subject: [PATCH 07/43] Bump requests from 2.31.0 to 2.32.0 (#578) updated-dependencies: - dependency-name: requests dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2f2b3b9b9..7ff18c8fc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2909,13 +2909,13 @@ files = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, + {file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, ] [package.dependencies] From beccd4cddccee82ea305537489323e1df8237d82 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Wed, 22 May 2024 15:35:13 +0200 Subject: [PATCH 08/43] [CI] Run tests through GitHub Actions (#573) * try a simple workflow first * try running on new ubuntu VM * fixes * bump poetry version to 1.8.3 * try removing caching.. * add workflow for testing tsv tools --- .github/workflows/test_cli.yml | 46 +++++++++++++++++++++++++++ .github/workflows/test_tsvtools.yml | 48 +++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 .github/workflows/test_cli.yml create mode 100644 .github/workflows/test_tsvtools.yml diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml new file mode 100644 index 000000000..d8309b2e1 --- /dev/null +++ b/.github/workflows/test_cli.yml @@ -0,0 +1,46 @@ +name: CLI Tests + +on: + push: + branches: ["dev"] + pull_request: + branches: ["dev"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-cli: + runs-on: + - self-hosted + - Linux + - ubuntu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run CLI tests + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_cli_report.xml \ + --disable-warnings \ + --verbose \ + test_cli.py diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml new file mode 100644 index 000000000..bddbb80d2 --- /dev/null +++ b/.github/workflows/test_tsvtools.yml @@ -0,0 +1,48 @@ +name: TSV Tools Tests + +on: + push: + branches: ["dev"] + pull_request: + branches: ["dev"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-tsvtools: + runs-on: + - self-hosted + - Linux + - ubuntu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for TSV tools + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_tsvtools_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp \ + --input_data_directory=/mnt/data/data_ci \ + test_tsvtools.py From 2861e9d8da889f7546be9776e1a496bb8cd83e61 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 14:22:24 +0200 Subject: [PATCH 09/43] [CI] Skip tests when PR is in draft mode (#592) * try skipping test_tsvtools when PR is in draft mode * trigger CI * add a cpu tag to avoid running cpu tests on gpu machines * run also on refactoring branch --- .github/workflows/test_cli.yml | 6 ++++-- .github/workflows/test_tsvtools.yml | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index d8309b2e1..750f1cd00 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -2,9 +2,9 @@ name: CLI Tests on: push: - branches: ["dev"] + branches: ["dev", "refactoring"] pull_request: - branches: ["dev"] + branches: ["dev", "refactoring"] permissions: contents: read @@ -19,10 +19,12 @@ env: jobs: test-cli: + if: github.event.pull_request.draft == false runs-on: - self-hosted - Linux - ubuntu + - cpu steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index bddbb80d2..5a8c7896a 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -2,9 +2,9 @@ name: TSV Tools Tests on: push: - branches: ["dev"] + branches: ["dev", "refactoring"] pull_request: - branches: ["dev"] + branches: ["dev", "refactoring"] permissions: contents: read @@ -19,10 +19,12 @@ env: jobs: test-tsvtools: + if: github.event.pull_request.draft == false runs-on: - self-hosted - Linux - ubuntu + - cpu steps: - uses: actions/checkout@v4 - uses: snok/install-poetry@v1 From f5de25105e2db3e87619b7782eb6873a0066c3c6 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 14:28:57 +0200 Subject: [PATCH 10/43] [CI] Test train workflow on GPU machine (#590) * add test workflow on GPU for train * fix conda path * fix conflicting workdir * only run on non-draft PRs * run also on refactoring branch --- .github/workflows/test_train.yml | 53 ++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 .github/workflows/test_train.yml diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml new file mode 100644 index 000000000..a65a92a56 --- /dev/null +++ b/.github/workflows/test_train.yml @@ -0,0 +1,53 @@ +name: Train Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-train-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Train on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_train_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/train \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + -k test_train + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/train/* From 69b3538d5397c94e0c3b7e306648ca1dd0720b7a Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 23 May 2024 15:51:54 +0200 Subject: [PATCH 11/43] [CI] Port remaining GPU tests to GitHub Actions (#593) * add workflow for testing interpretation task * add workflow for testing random search task * add workflow for testing resume task * add workflow for testing transfer learning task * trigger CI * trigger CI --- .github/workflows/test_interpret.yml | 53 ++++++++++++++++++++ .github/workflows/test_random_search.yml | 53 ++++++++++++++++++++ .github/workflows/test_resume.yml | 53 ++++++++++++++++++++ .github/workflows/test_transfer_learning.yml | 53 ++++++++++++++++++++ 4 files changed, 212 insertions(+) create mode 100644 .github/workflows/test_interpret.yml create mode 100644 .github/workflows/test_random_search.yml create mode 100644 .github/workflows/test_resume.yml create mode 100644 .github/workflows/test_transfer_learning.yml diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml new file mode 100644 index 000000000..0163bf583 --- /dev/null +++ b/.github/workflows/test_interpret.yml @@ -0,0 +1,53 @@ +name: Interpretation Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-interpret-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Interpret task on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_interpret_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/interpret \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_interpret.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/interpret/* diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml new file mode 100644 index 000000000..529f1fda1 --- /dev/null +++ b/.github/workflows/test_random_search.yml @@ -0,0 +1,53 @@ +name: Random Search Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-random-search-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run Random Search tests on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_random_search_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/random_search \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_random_search.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/random_search/* diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml new file mode 100644 index 000000000..b789a21f6 --- /dev/null +++ b/.github/workflows/test_resume.yml @@ -0,0 +1,53 @@ +name: Resume Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-resume-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run resume tests on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_resume_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/resume \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_resume.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/resume/* diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml new file mode 100644 index 000000000..61238d4e1 --- /dev/null +++ b/.github/workflows/test_transfer_learning.yml @@ -0,0 +1,53 @@ +name: Transfer Learning Tests (GPU) + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-transfer-learning-gpu: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - gpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Transfer Learning on GPU + run: | + make env.conda + source "${HOME}/miniconda3/etc/profile.d/conda.sh" + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_transfer_learning_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/actions_runner_workdir/transfer_learning \ + --input_data_directory=/mnt/data/clinicadl_data_ci/data_ci \ + test_transfer_learning.py + - name: Cleaning + run: | + rm -rf $HOME/actions_runner_workdir/transfer_learning/* From c9d9252ae4436a7a17d8812fdea97f2b01e0c0cb Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 09:43:01 +0200 Subject: [PATCH 12/43] [CI] Remove GPU pipeline from Jenkinsfile (#594) --- .jenkins/Jenkinsfile | 207 ------------------------------------------- 1 file changed, 207 deletions(-) diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile index f7bd3dafb..033182681 100644 --- a/.jenkins/Jenkinsfile +++ b/.jenkins/Jenkinsfile @@ -252,214 +252,7 @@ pipeline { } } } - stage('GPU') { - agent { - label 'gpu' - } - environment { - CONDA_HOME = "$HOME/miniconda3" - CONDA_ENV = "$WORKSPACE/env" - PATH = "$HOME/.local/bin:$PATH" - TMP_DIR = "$HOME/tmp" - INPUT_DATA_DIR = '/mnt/data/clinicadl_data_ci/data_ci' - } - stages { - stage('Build Env') { - steps { - echo 'Installing clinicadl sources in Linux...' - echo "My branch name is ${BRANCH_NAME}" - sh "echo 'My branch name is ${BRANCH_NAME}'" - sh 'printenv' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - make env.conda - conda activate "${CONDA_ENV}" - conda info - echo "Install clinicadl using poetry..." - cd $WORKSPACE - make env - # Show clinicadl help message - echo "Display clinicadl help message" - clinicadl --help - conda deactivate - ''' - } - } - stage('Train tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing train task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_train_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - -k "test_train" - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_train_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Transfer learning tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing transfer learning...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_transfer_learning_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_transfer_learning.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_transfer_learning_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Resume tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing resume...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_resume_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_resume.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_resume_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Interpretation tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing interpret task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_interpret_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_interpret.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_interpret_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Random search tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing random search...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh '''#!/usr/bin/env bash - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - clinicadl --help - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_random_search_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_random_search.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_random_search_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - } - post { - // Clean after build - cleanup { - cleanWs(deleteDirs: true, - notFailBuild: true, - patterns: [[pattern: 'env', type: 'INCLUDE']]) - } - } - } } } } -// post { -// failure { -// mail to: 'clinicadl-ci@inria.fr', -// subject: "Failed Pipeline: ${currentBuild.fullDisplayName}", -// body: "Something is wrong with ${env.BUILD_URL}" -// mattermostSend( -// color: "#FF0000", -// message: "ClinicaDL Build FAILED: ${env.JOB_NAME} #${env.BUILD_NUMBER} (<${env.BUILD_URL}|Link to build>)" -// ) -// } -// } } From 753f04e49e266ec3767cd91bcefda18370718fec Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 12:06:06 +0200 Subject: [PATCH 13/43] [CI] Port remaining non GPU tests to GitHub Actions (#581) * add cleaning step to test_tsvtools pipeline * add test_generate pipeline * add test_predict pipeline * add test_prepare_data pipeline * add test_quality_checks pipeline * add refactoring target branch, cpu tag, and draft PR filter * trigger CI --- .github/workflows/test_generate.yml | 53 +++++++++++++++++++++++ .github/workflows/test_predict.yml | 53 +++++++++++++++++++++++ .github/workflows/test_prepare_data.yml | 53 +++++++++++++++++++++++ .github/workflows/test_quality_checks.yml | 53 +++++++++++++++++++++++ .github/workflows/test_tsvtools.yml | 5 ++- 5 files changed, 216 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/test_generate.yml create mode 100644 .github/workflows/test_predict.yml create mode 100644 .github/workflows/test_prepare_data.yml create mode 100644 .github/workflows/test_quality_checks.yml diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml new file mode 100644 index 000000000..51ac863b2 --- /dev/null +++ b/.github/workflows/test_generate.yml @@ -0,0 +1,53 @@ +name: Generate Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-generate: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for generate task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_generate_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/generate \ + --input_data_directory=/mnt/data/data_ci \ + test_generate.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/generate diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml new file mode 100644 index 000000000..8ec5976e4 --- /dev/null +++ b/.github/workflows/test_predict.yml @@ -0,0 +1,53 @@ +name: Predict Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-predict: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for predict task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_predict_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/predict \ + --input_data_directory=/mnt/data/data_ci \ + test_predict.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/predict/* diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml new file mode 100644 index 000000000..8dccd217f --- /dev/null +++ b/.github/workflows/test_prepare_data.yml @@ -0,0 +1,53 @@ +name: Prepare data Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-prepare-data: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for prepare data task + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_prepare_data_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/prepare_data \ + --input_data_directory=/mnt/data/data_ci \ + test_prepare_data.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/prepare_data/* diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml new file mode 100644 index 000000000..1cf0414e2 --- /dev/null +++ b/.github/workflows/test_quality_checks.yml @@ -0,0 +1,53 @@ +name: Quality Check Tests + +on: + push: + branches: ["dev", "refactoring"] + pull_request: + branches: ["dev", "refactoring"] + +permissions: + contents: read + +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +env: + POETRY_VERSION: '1.8.3' + PYTHON_VERSION: '3.11' + +jobs: + test-quality-check: + if: github.event.pull_request.draft == false + runs-on: + - self-hosted + - Linux + - ubuntu + - cpu + steps: + - uses: actions/checkout@v4 + - uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: false + - uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Run tests for Quality Check + run: | + make env.conda + source /builds/miniconda3/etc/profile.d/conda.sh + conda activate "${{ github.workspace }}"/env + make install + cd tests + poetry run pytest --verbose \ + --junitxml=./test-reports/test_quality_check_report.xml \ + --disable-warnings \ + --verbose \ + --basetemp=$HOME/tmp/quality_checks \ + --input_data_directory=/mnt/data/data_ci \ + test_qc.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/quality_checks/* diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 5a8c7896a..811c6d4f4 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -45,6 +45,9 @@ jobs: --junitxml=./test-reports/test_tsvtools_report.xml \ --disable-warnings \ --verbose \ - --basetemp=$HOME/tmp \ + --basetemp=$HOME/tmp/tsv_tools \ --input_data_directory=/mnt/data/data_ci \ test_tsvtools.py + - name: Cleaning + run: | + rm -rf $HOME/tmp/tsv_tools/* From c424d77f2273966d89571f5c9a0da08fffc5dff4 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 24 May 2024 13:07:56 +0200 Subject: [PATCH 14/43] [CI] Remove jenkins related things (#595) --- .jenkins/Jenkinsfile | 258 ---------------------------- .jenkins/scripts/find_env.sh | 39 ----- .jenkins/scripts/generate_wheels.sh | 31 ---- 3 files changed, 328 deletions(-) delete mode 100644 .jenkins/Jenkinsfile delete mode 100755 .jenkins/scripts/find_env.sh delete mode 100755 .jenkins/scripts/generate_wheels.sh diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile deleted file mode 100644 index 033182681..000000000 --- a/.jenkins/Jenkinsfile +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env groovy - -// Continuous Integration script for clinicadl -// Author: mauricio.diaz@inria.fr - -pipeline { - options { - timeout(time: 1, unit: 'HOURS') - disableConcurrentBuilds(abortPrevious: true) - } - agent none - stages { - stage('Functional tests') { - failFast false - parallel { - stage('No GPU') { - agent { - label 'cpu' - } - environment { - CONDA_HOME = "$HOME/miniconda" - CONDA_ENV = "$WORKSPACE/env" - PATH = "$HOME/.local/bin:$PATH" - TMP_DIR = "$HOME/tmp" - INPUT_DATA_DIR = '/mnt/data/clinicadl_data_ci/data_ci' - } - stages { - stage('Build Env') { - steps { - echo 'Installing clinicadl sources in Linux...' - echo "My branch name is ${BRANCH_NAME}" - sh "echo 'My branch name is ${BRANCH_NAME}'" - sh 'printenv' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - set +x - source "${CONDA_HOME}/etc/profile.d/conda.sh" - make env.conda - conda activate "${CONDA_ENV}" - conda info - echo "Install clinicadl using poetry..." - cd $WORKSPACE - make env - # Show clinicadl help message - echo "Display clinicadl help message" - clinicadl --help - conda deactivate - ''' - } - } - stage('CLI tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing pipeline instantiation...' - sh 'echo "Agent name: ${NODE_NAME}"' - sh ''' - set +x - echo $WORKSPACE - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - conda list - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_cli_report.xml \ - --verbose \ - --disable-warnings \ - test_cli.py - conda deactivate - ''' - } - } - } - stage('tsvtools tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing tsvtool tasks...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_tsvtool_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_tsvtools.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_tsvtool_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Quality check tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing quality check tasks...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_quality_check_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_qc.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_quality_check_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Generate tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing generate task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_generate_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_generate.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_generate_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Prepare data tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing prepare_data task...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_prepare_data_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_prepare_data.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_prepare_data_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - stage('Predict tests Linux') { - steps { - catchError(buildResult: 'FAILURE', stageResult: 'UNSTABLE') { - echo 'Testing predict...' - sh "echo 'Agent name: ${NODE_NAME}'" - sh ''' - source "${CONDA_HOME}/etc/profile.d/conda.sh" - conda activate "${CONDA_ENV}" - cd $WORKSPACE/tests - poetry run pytest \ - --junitxml=./test-reports/test_predict_report.xml \ - --verbose \ - --disable-warnings \ - --basetemp=$TMP_DIR \ - --input_data_directory=$INPUT_DATA_DIR \ - test_predict.py - conda deactivate - ''' - } - } - post { - always { - junit 'tests/test-reports/test_predict_report.xml' - } - success { - sh 'rm -rf ${TMP_DIR}/*' - } - } - } - // stage('Meta-maps analysis') { - // environment { - // PATH = "$HOME/miniconda3/bin:$HOME/miniconda/bin:$PATH" - // } - // steps { - // echo 'Testing maps-analysis task...' - // sh 'echo "Agent name: ${NODE_NAME}"' - // sh '''#!/usr/bin/env bash - // set +x - // eval "$(conda shell.bash hook)" - // conda activate "${WORKSPACE}/env" - // cd $WORKSPACE/tests - // pytest \ - // --junitxml=./test-reports/test_meta-analysis_report.xml \ - // --verbose \ - // --disable-warnings \ - // test_meta_maps.py - // conda deactivate - // ''' - // } - // post { - // always { - // junit 'tests/test-reports/test_meta-analysis_report.xml' - // sh 'rm -rf $WORKSPACE/tests/data/dataset' - // } - // } - // } - } - post { - // Clean after build - cleanup { - cleanWs(deleteDirs: true, - notFailBuild: true, - patterns: [[pattern: 'env', type: 'INCLUDE']]) - } - } - } - } - } - } -} diff --git a/.jenkins/scripts/find_env.sh b/.jenkins/scripts/find_env.sh deleted file mode 100755 index a68fff821..000000000 --- a/.jenkins/scripts/find_env.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# A shell script to launch clinica in CI machines - -# Name of the Conda environment according to the branch -CLINICA_ENV_BRANCH="clinicadl_test" - -set -e -set +x - -ENV_EXISTS=0 -# Verify that the conda environment corresponding to the branch exists, otherwise -# create it. -ENVS=$(conda env list | awk '{print $1}' ) -echo $ENVS - -for ENV in $ENVS -do - if [[ "$ENV " == *"$CLINICA_ENV_BRANCH "* ]] - then - echo "Find Conda environment named $ENV, continue." - conda activate $CLINICA_ENV_BRANCH - cd $WORKSPACE/ - poetry install - conda deactivate - ENV_EXISTS=1 - break - fi; -done -if [ "$ENV_EXISTS" = 0 ]; then - echo "Conda env $CLINICA_ENV_BRANCH not found... Creating" - conda create -y -f environment.yml - echo "Conda env $CLINICA_ENV_BRANCH was created." - conda activate $CLINICA_ENV_BRANCH - cd $WORKSPACE/ - poetry install - echo "ClinicaDL has been installed in $CLINICA_ENV_BRANCH." - conda deactivate - cd $WORKSPACE -fi diff --git a/.jenkins/scripts/generate_wheels.sh b/.jenkins/scripts/generate_wheels.sh deleted file mode 100755 index 326d55074..000000000 --- a/.jenkins/scripts/generate_wheels.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /bin/sh - -#--------------------------------------# -# ClinicaDL package creations ( wheel) -#--------------------------------------# -# -# WARNING: Activate a conda environment with the right pip version. -# Use at your own risk. - - -CURRENT_DIR=$(pwd) -echo $CURRENT_DIR - -# ensure we are in the right dir -SCRIPT_DIR=`(dirname $0)` -cd "$SCRIPT_DIR" -echo "Entering ${SCRIPT_DIR}/../../" -cd "${SCRIPT_DIR}/../../" -ls - -# clean pycache stuff -rm -rf dist build clinicadl.egg-info/ -find -name "*__pycache__*" -exec rm {} \-rf \; -find -name "*.pyc*" -exec rm {} \-rf \; - -set -o errexit -set -e -# generate wheel -poetry build -# come back to directory of -cd $CURRENT_DIR From 4281c73a96a9b6188059e1285421202bc5f979e1 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 28 May 2024 10:42:45 +0200 Subject: [PATCH 15/43] add simulate-gpu option --- tests/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 7251b5b8f..3a603f44f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,10 +14,19 @@ def pytest_addoption(parser): action="store", help="Directory for (only-read) inputs for tests", ) + parser.addoption( + "--simulate-gpu", + action="store_true", + help="""To simulate the presence of a gpu on a cpu-only device. Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if gpu option if false in the output MAPS whereas + it should be true.""", + ) @pytest.fixture def cmdopt(request): config_param = {} config_param["input"] = request.config.getoption("--input_data_directory") + config_param["simulate gpu"] = request.config.getoption("--simulate-gpu") return config_param From 52d7561f8b6aab078d6ef2c33a6a9b97ad3e852f Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Thu, 30 May 2024 11:34:05 +0200 Subject: [PATCH 16/43] Add flags to run CI tests locally (#596) --- tests/conftest.py | 19 +++++++++++++++++++ tests/test_train_ae.py | 14 ++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 7251b5b8f..e5a4a7302 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -14,10 +14,29 @@ def pytest_addoption(parser): action="store", help="Directory for (only-read) inputs for tests", ) + parser.addoption( + "--no-gpu", + action="store_true", + help="""To run tests on cpu. Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if gpu option is false in the output MAPS whereas + it is true in the reference MAPS.""", + ) + parser.addoption( + "--adapt-base-dir", + action="store_true", + help="""To virtually change the base directory in the paths stored in the MAPS of the CI data. + Default is False. + To use carefully, only to run tests locally. Should not be used in final CI tests. + Concretely, the tests won't fail if only the base directories differ in the paths stored + in the output and reference MAPS.""", + ) @pytest.fixture def cmdopt(request): config_param = {} config_param["input"] = request.config.getoption("--input_data_directory") + config_param["no-gpu"] = request.config.getoption("--no-gpu") + config_param["adapt-base-dir"] = request.config.getoption("--adapt-base-dir") return config_param diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index ab9c057ff..311e145d0 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -88,6 +88,9 @@ def test_train_ae(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + if tmp_out_dir.is_dir(): shutil.rmtree(tmp_out_dir) @@ -101,6 +104,17 @@ def test_train_ae(cmdopt, tmp_path, test_name): if test_name == "patch_multi_ae": json_data_out["multi_network"] = True + if cmdopt["no-gpu"]: + json_data_ref["gpu"] = False + if cmdopt["adapt-base-dir"]: + base_dir = base_dir.resolve() + ref_base_dir = Path(json_data_ref["caps_directory"]).parents[2] + json_data_ref["caps_directory"] = str( + base_dir / Path(json_data_ref["caps_directory"]).relative_to(ref_base_dir) + ) + json_data_ref["tsv_path"] = str( + base_dir / Path(json_data_ref["tsv_path"]).relative_to(ref_base_dir) + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( From 39d22fddbdf613ea1b793e6593d8b84d4440db0a Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 30 May 2024 13:43:39 +0200 Subject: [PATCH 17/43] [CI] Remove duplicated verbose flag in test pipelines (#598) --- .github/workflows/test_cli.yml | 2 +- .github/workflows/test_generate.yml | 2 +- .github/workflows/test_interpret.yml | 2 +- .github/workflows/test_predict.yml | 2 +- .github/workflows/test_prepare_data.yml | 2 +- .github/workflows/test_quality_checks.yml | 2 +- .github/workflows/test_random_search.yml | 2 +- .github/workflows/test_resume.yml | 2 +- .github/workflows/test_train.yml | 2 +- .github/workflows/test_transfer_learning.yml | 2 +- .github/workflows/test_tsvtools.yml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index 750f1cd00..802511c9d 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_cli_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml index 51ac863b2..e0149760f 100644 --- a/.github/workflows/test_generate.yml +++ b/.github/workflows/test_generate.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_generate_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml index 0163bf583..7cfd9d56b 100644 --- a/.github/workflows/test_interpret.yml +++ b/.github/workflows/test_interpret.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_interpret_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml index 8ec5976e4..f12740dac 100644 --- a/.github/workflows/test_predict.yml +++ b/.github/workflows/test_predict.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_predict_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml index 8dccd217f..8209b9fa2 100644 --- a/.github/workflows/test_prepare_data.yml +++ b/.github/workflows/test_prepare_data.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_prepare_data_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml index 1cf0414e2..701460266 100644 --- a/.github/workflows/test_quality_checks.yml +++ b/.github/workflows/test_quality_checks.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_quality_check_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml index 529f1fda1..314afc353 100644 --- a/.github/workflows/test_random_search.yml +++ b/.github/workflows/test_random_search.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_random_search_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml index b789a21f6..78a229913 100644 --- a/.github/workflows/test_resume.yml +++ b/.github/workflows/test_resume.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_resume_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml index a65a92a56..599725225 100644 --- a/.github/workflows/test_train.yml +++ b/.github/workflows/test_train.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_train_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml index 61238d4e1..4664a97a3 100644 --- a/.github/workflows/test_transfer_learning.yml +++ b/.github/workflows/test_transfer_learning.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_transfer_learning_report.xml \ --disable-warnings \ --verbose \ diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 811c6d4f4..9f3bfeb02 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -41,7 +41,7 @@ jobs: conda activate "${{ github.workspace }}"/env make install cd tests - poetry run pytest --verbose \ + poetry run pytest \ --junitxml=./test-reports/test_tsvtools_report.xml \ --disable-warnings \ --verbose \ From 571662c4598101fd969158b1d337ff5046974a1d Mon Sep 17 00:00:00 2001 From: Gensollen Date: Thu, 30 May 2024 15:54:16 +0200 Subject: [PATCH 18/43] [DOC] Update the Python version used for creating the conda environment in README (#600) * update python version used for creating conda env in README * investigate * fix --- .github/workflows/test_cli.yml | 2 +- .github/workflows/test_generate.yml | 2 +- .github/workflows/test_interpret.yml | 2 +- .github/workflows/test_predict.yml | 2 +- .github/workflows/test_prepare_data.yml | 2 +- .github/workflows/test_quality_checks.yml | 2 +- .github/workflows/test_random_search.yml | 2 +- .github/workflows/test_resume.yml | 2 +- .github/workflows/test_train.yml | 2 +- .github/workflows/test_transfer_learning.yml | 2 +- .github/workflows/test_tsvtools.yml | 2 +- README.md | 19 +++++++++---------- 12 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test_cli.yml b/.github/workflows/test_cli.yml index 802511c9d..4efd2b024 100644 --- a/.github/workflows/test_cli.yml +++ b/.github/workflows/test_cli.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run CLI tests run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_generate.yml b/.github/workflows/test_generate.yml index e0149760f..8fc54c025 100644 --- a/.github/workflows/test_generate.yml +++ b/.github/workflows/test_generate.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for generate task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_interpret.yml b/.github/workflows/test_interpret.yml index 7cfd9d56b..13db354a9 100644 --- a/.github/workflows/test_interpret.yml +++ b/.github/workflows/test_interpret.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Interpret task on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_predict.yml b/.github/workflows/test_predict.yml index f12740dac..e6d790b2a 100644 --- a/.github/workflows/test_predict.yml +++ b/.github/workflows/test_predict.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for predict task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_prepare_data.yml b/.github/workflows/test_prepare_data.yml index 8209b9fa2..753634f76 100644 --- a/.github/workflows/test_prepare_data.yml +++ b/.github/workflows/test_prepare_data.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for prepare data task run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_quality_checks.yml b/.github/workflows/test_quality_checks.yml index 701460266..25d1bc752 100644 --- a/.github/workflows/test_quality_checks.yml +++ b/.github/workflows/test_quality_checks.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Quality Check run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_random_search.yml b/.github/workflows/test_random_search.yml index 314afc353..78ddc2df0 100644 --- a/.github/workflows/test_random_search.yml +++ b/.github/workflows/test_random_search.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run Random Search tests on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_resume.yml b/.github/workflows/test_resume.yml index 78a229913..6d145339b 100644 --- a/.github/workflows/test_resume.yml +++ b/.github/workflows/test_resume.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run resume tests on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_train.yml b/.github/workflows/test_train.yml index 599725225..b3852bb09 100644 --- a/.github/workflows/test_train.yml +++ b/.github/workflows/test_train.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Train on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_transfer_learning.yml b/.github/workflows/test_transfer_learning.yml index 4664a97a3..9a3a583a3 100644 --- a/.github/workflows/test_transfer_learning.yml +++ b/.github/workflows/test_transfer_learning.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for Transfer Learning on GPU run: | - make env.conda source "${HOME}/miniconda3/etc/profile.d/conda.sh" + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/.github/workflows/test_tsvtools.yml b/.github/workflows/test_tsvtools.yml index 9f3bfeb02..9e388b8e7 100644 --- a/.github/workflows/test_tsvtools.yml +++ b/.github/workflows/test_tsvtools.yml @@ -36,8 +36,8 @@ jobs: python-version: ${{ env.PYTHON_VERSION }} - name: Run tests for TSV tools run: | - make env.conda source /builds/miniconda3/etc/profile.d/conda.sh + make env.conda conda activate "${{ github.workspace }}"/env make install cd tests diff --git a/README.md b/README.md index 05b5f3a09..dae486a72 100755 --- a/README.md +++ b/README.md @@ -33,34 +33,33 @@ ## About the project This repository hosts ClinicaDL, the deep learning extension of [Clinica](https://github.com/aramis-lab/clinica), -a python library to process neuroimaging data in [BIDS](https://bids.neuroimaging.io/index.html) format. +a Python library to process neuroimaging data in [BIDS](https://bids.neuroimaging.io/index.html) format. > **Disclaimer:** this software is **under development**. Some features can change between different releases and/or commits. -To access the full documentation of the project, follow the link -[https://clinicadl.readthedocs.io/](https://clinicadl.readthedocs.io/). +To access the full documentation of the project, follow the link [https://clinicadl.readthedocs.io/](https://clinicadl.readthedocs.io/). If you find a problem when using it or if you want to provide us feedback, please [open an issue](https://github.com/aramis-lab/ad-dl/issues) or write on the [forum](https://groups.google.com/forum/#!forum/clinica-user). ## Getting started + ClinicaDL currently supports macOS and Linux. We recommend to use `conda` or `virtualenv` for the installation of ClinicaDL -as it guarantees the correct management of libraries depending on common -packages: +as it guarantees the correct management of libraries depending on common packages: ```{.sourceCode .bash} -conda create --name ClinicaDL python=3.8 +conda create --name ClinicaDL python=3.10 conda activate ClinicaDL pip install clinicadl ``` -## Tutorial -Visit our [hands-on tutorial web -site](https://aramislab.paris.inria.fr/clinicadl/tuto) to start -using **ClinicaDL** directly in a Google Colab instance! +## Tutorial + +Visit our [hands-on tutorial web site](https://aramislab.paris.inria.fr/clinicadl/tuto) +to start using **ClinicaDL** directly in a Google Colab instance! ## Related Repositories From d54d59cfd1bc954ff026cf2cbba9a0ba9647c4ea Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 31 May 2024 13:38:12 +0200 Subject: [PATCH 19/43] Flag for local tests (#608) * add no-gpu and adapt-base-dir flag --- tests/test_interpret.py | 3 ++ tests/test_random_search.py | 12 ++++--- tests/test_resume.py | 26 ++++++++++++-- tests/test_train_ae.py | 25 +++++-------- tests/test_train_cnn.py | 13 +++++-- tests/test_train_from_json.py | 28 ++++++++++----- tests/test_transfer_learning.py | 42 +++++++++++++++------- tests/testing_tools.py | 62 ++++++++++++++++++++++++++++++++- 8 files changed, 164 insertions(+), 47 deletions(-) diff --git a/tests/test_interpret.py b/tests/test_interpret.py index 8030e4c98..d84147e97 100644 --- a/tests/test_interpret.py +++ b/tests/test_interpret.py @@ -61,6 +61,9 @@ def test_interpret(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + cnn_input.append("--no-gpu") + run_interpret(cnn_input, tmp_out_dir, ref_dir) diff --git a/tests/test_random_search.py b/tests/test_random_search.py index e1c530513..5b68787e8 100644 --- a/tests/test_random_search.py +++ b/tests/test_random_search.py @@ -1,6 +1,5 @@ # coding: utf8 -import json import os import shutil from os.path import join @@ -8,7 +7,7 @@ import pytest -from tests.testing_tools import compare_folders +from .testing_tools import change_gpu_in_toml, compare_folders # random searxh for ROI with CNN @@ -34,10 +33,12 @@ def test_random_search(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") - run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir) + run_test_random_search( + toml_path, generate_input, tmp_out_dir, ref_dir, cmdopt["no-gpu"] + ) -def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir): +def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir, no_gpu): if os.path.exists(tmp_out_dir): shutil.rmtree(tmp_out_dir) @@ -45,6 +46,9 @@ def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir): os.makedirs(tmp_out_dir, exist_ok=True) shutil.copy(toml_path, tmp_out_dir) + if no_gpu: + change_gpu_in_toml(tmp_out_dir / "random_search.toml") + flag_error_generate = not os.system("clinicadl " + " ".join(generate_input)) performances_flag = os.path.exists( tmp_out_dir / "job-1" / "split-0" / "best-loss" / "train" diff --git a/tests/test_resume.py b/tests/test_resume.py index 3cf883c32..cdf6031ee 100644 --- a/tests/test_resume.py +++ b/tests/test_resume.py @@ -1,15 +1,14 @@ # coding: utf8 import json -import os import shutil from os import system -from os.path import join from pathlib import Path import pytest from clinicadl import MapsManager -from tests.testing_tools import compare_folders + +from .testing_tools import modify_maps @pytest.fixture( @@ -33,6 +32,18 @@ def test_resume(cmdopt, tmp_path, test_name): shutil.copytree(input_dir / test_name, tmp_out_dir / test_name) maps_stopped = tmp_out_dir / test_name + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # modify the input MAPS + with open(maps_stopped / "maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(maps_stopped / "maps.json", "w") as f: + json.dump(config, f) + flag_error = not system(f"clinicadl -vv train resume {maps_stopped}") assert flag_error @@ -48,4 +59,13 @@ def test_resume(cmdopt, tmp_path, test_name): json_data_out = json.load(out) with open(ref_dir / "maps_image_cnn" / "maps.json", "r") as ref: json_data_ref = json.load(ref) + + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + assert json_data_ref == json_data_out diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index 311e145d0..b20749258 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -3,12 +3,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import clean_folder, compare_folders +from .testing_tools import clean_folder, compare_folders, modify_maps @pytest.fixture( @@ -27,8 +26,8 @@ def test_train_ae(cmdopt, tmp_path, test_name): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train" / "in" ref_dir = base_dir / "train" / "ref" - tmp_out_dir = base_dir / "train" / "out" - # tmp_out_dir.mkdir(parents=True) + tmp_out_dir = tmp_path / "train" / "out" + tmp_out_dir.mkdir(parents=True) clean_folder(tmp_out_dir, recreate=True) @@ -102,18 +101,12 @@ def test_train_ae(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - if test_name == "patch_multi_ae": - json_data_out["multi_network"] = True - if cmdopt["no-gpu"]: - json_data_ref["gpu"] = False - if cmdopt["adapt-base-dir"]: - base_dir = base_dir.resolve() - ref_base_dir = Path(json_data_ref["caps_directory"]).parents[2] - json_data_ref["caps_directory"] = str( - base_dir / Path(json_data_ref["caps_directory"]).relative_to(ref_base_dir) - ) - json_data_ref["tsv_path"] = str( - base_dir / Path(json_data_ref["tsv_path"]).relative_to(ref_base_dir) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], ) assert json_data_out == json_data_ref # ["mode"] == mode diff --git a/tests/test_train_cnn.py b/tests/test_train_cnn.py index da5b3a3f1..761fedbee 100644 --- a/tests/test_train_cnn.py +++ b/tests/test_train_cnn.py @@ -3,12 +3,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import compare_folders +from .testing_tools import compare_folders, modify_maps @pytest.fixture( @@ -101,6 +100,9 @@ def test_train_cnn(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + if tmp_out_dir.is_dir(): shutil.rmtree(tmp_out_dir) @@ -117,6 +119,13 @@ def test_train_cnn(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_" + test_name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py index f48791d31..363af9aff 100644 --- a/tests/test_train_from_json.py +++ b/tests/test_train_from_json.py @@ -1,17 +1,14 @@ -import os -import pathlib +import json import shutil -from os import path, system -from os.path import join +from os import system from pathlib import Path -from .testing_tools import compare_folders_with_hashes, create_hashes_dict, models_equal +from .testing_tools import compare_folders_with_hashes, create_hashes_dict, modify_maps def test_json_compatibility(cmdopt, tmp_path): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train_from_json" / "in" - ref_dir = base_dir / "train_from_json" / "ref" tmp_out_dir = tmp_path / "train_from_json" / "out" tmp_out_dir.mkdir(parents=True) @@ -22,6 +19,19 @@ def test_json_compatibility(cmdopt, tmp_path): if reproduced_maps_dir.exists(): shutil.rmtree(reproduced_maps_dir) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: # virtually modify the input MAPS + with open(config_json, "r") as f: + config = json.load(f) + config_json = tmp_out_dir / "modified_maps.json" + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(config_json, "w+") as f: + json.dump(config, f) + flag_error = not system( f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s {split}" ) @@ -31,7 +41,6 @@ def test_json_compatibility(cmdopt, tmp_path): def test_determinism(cmdopt, tmp_path): base_dir = Path(cmdopt["input"]) input_dir = base_dir / "train_from_json" / "in" - ref_dir = base_dir / "train_from_json" / "ref" tmp_out_dir = tmp_path / "train_from_json" / "out" tmp_out_dir.mkdir(parents=True) @@ -50,8 +59,11 @@ def test_determinism(cmdopt, tmp_path): str(maps_dir), "-c", str(input_dir / "reproducibility_config.toml"), - "--no-gpu", ] + + if cmdopt["no-gpu"]: + test_input.append("--no-gpu") + # Run first experiment flag_error = not system("clinicadl " + " ".join(test_input)) assert flag_error diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py index 95713d7ad..b9c3f999b 100644 --- a/tests/test_transfer_learning.py +++ b/tests/test_transfer_learning.py @@ -1,12 +1,11 @@ import json import os import shutil -from os.path import join from pathlib import Path import pytest -from tests.testing_tools import compare_folders +from .testing_tools import compare_folders, modify_maps # Everything is tested on roi except for cnn --> multicnn (patch) as multicnn is not implemented for roi. @@ -41,7 +40,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), "-c", str(config_path), ] @@ -55,7 +54,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), ] name = "aeTOae" elif test_name == "transfer_ae_cnn": @@ -65,7 +64,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), "-c", str(config_path), ] @@ -79,7 +78,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_ae"), ] name = "aeTOcnn" elif test_name == "transfer_cnn_cnn": @@ -89,7 +88,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), "-c", str(config_path), ] @@ -103,7 +102,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), ] name = "cnnTOcnn" elif test_name == "transfer_cnn_multicnn": @@ -113,7 +112,7 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): str(caps_roi_path), extract_roi_str, str(labels_path), - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), "-c", str(config_path), ] @@ -127,12 +126,17 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): "-c", str(config_path), "--transfer_path", - str(tmp_out_dir), + str(tmp_out_dir / "maps_roi_cnn"), + "--multi_network", ] - name = "cnnTOcnn" + name = "cnnTOmulticnn" else: raise NotImplementedError(f"Test {test_name} is not implemented.") + if cmdopt["no-gpu"]: + source_task.append("--no-gpu") + target_task.append("--no-gpu") + if tmp_out_dir.exists(): shutil.rmtree(tmp_out_dir) if tmp_target_dir.exists(): @@ -148,9 +152,21 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_roi_" + name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - json_data_ref["transfer_path"] = json_data_out["transfer_path"] - json_data_ref["gpu"] = json_data_out["gpu"] + ref_source_dir = Path(json_data_ref["transfer_path"]).parent + json_data_ref["transfer_path"] = str( + tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) + ) + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + json_data_ref = modify_maps( + maps=json_data_ref, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + # TODO: remove and update data json_data_ref["caps_directory"] = json_data_out["caps_directory"] + json_data_ref["gpu"] = json_data_out["gpu"] + ### assert json_data_out == json_data_ref # ["mode"] == mode assert compare_folders( diff --git a/tests/testing_tools.py b/tests/testing_tools.py index d4cb29c8a..ff7eb97b1 100644 --- a/tests/testing_tools.py +++ b/tests/testing_tools.py @@ -1,7 +1,7 @@ import pathlib from os import PathLike from pathlib import Path -from typing import Dict, List +from typing import Any, Dict, List def ignore_pattern(file_path: pathlib.Path, ignore_pattern_list: List[str]) -> bool: @@ -166,3 +166,63 @@ def clean_folder(path, recreate=True): rmtree(abs_path) if recreate: makedirs(abs_path) + + +def modify_maps( + maps: Dict[str, Any], + base_dir: Path, + no_gpu: bool = False, + adapt_base_dir: bool = False, +) -> Dict[str, Any]: + """ + Modifies a MAPS dictionary if the user passed --no-gpu or --adapt-base-dir flags. + + Parameters + ---------- + maps : Dict[str, Any] + The MAPS dictionary. + base_dir : Path + The base directory, where CI data are stored. + no_gpu : bool (optional, default=False) + Whether the user activated the --no-gpu flag. + adapt_base_dir : bool (optional, default=False) + Whether the user activated the --adapt-base-dir flag. + + Returns + ------- + Dict[str, Any] + The modified MAPS dictionary. + """ + if no_gpu: + maps["gpu"] = False + if adapt_base_dir: + base_dir = base_dir.resolve() + ref_base_dir = Path(maps["caps_directory"]).parents[2] + maps["caps_directory"] = str( + base_dir / Path(maps["caps_directory"]).relative_to(ref_base_dir) + ) + maps["tsv_path"] = str( + base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) + ) + return maps + + +def change_gpu_in_toml(toml_path: Path) -> None: + """ + Changes GPU to false in a TOML config file. + + Parameters + ---------- + toml_path : Path + The TOML file. + """ + import toml + + config = toml.load(toml_path) + try: + config["Computational"]["gpu"] = False + except KeyError: + config["Computational"] = {"gpu": False} + f = open(toml_path, "w") + toml.dump(config, f) + f.close() From f20e7fb31abe444d9204ce92665af43265f284d9 Mon Sep 17 00:00:00 2001 From: HuguesRoy <149707970+HuguesRoy@users.noreply.github.com> Date: Tue, 4 Jun 2024 13:53:33 +0200 Subject: [PATCH 20/43] Update quality_check.py (#609) * Update quality_check.py --- clinicadl/quality_check/t1_linear/quality_check.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clinicadl/quality_check/t1_linear/quality_check.py b/clinicadl/quality_check/t1_linear/quality_check.py index c684858ea..86d85366d 100755 --- a/clinicadl/quality_check/t1_linear/quality_check.py +++ b/clinicadl/quality_check/t1_linear/quality_check.py @@ -141,7 +141,10 @@ def quality_check( qc_df = pd.DataFrame(columns=columns) qc_df["pass"] = qc_df["pass"].astype(bool) softmax = torch.nn.Softmax(dim=1) - logger.info(f"Quality check will be performed over {len(dataloader)} images.") + + logger.info( + f"Quality check will be performed over {len(dataloader.dataset)} images." + ) for data in dataloader: logger.debug(f"Processing subject {data['participant_id']}.") From f6f382aba02e4cb8cc3a2f79f355ea7e8f1f54ed Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:22:11 +0200 Subject: [PATCH 21/43] Fix issue in compare_folders (#610) * add FileNotFound error in tree --- tests/test_generate.py | 4 +- tests/test_predict.py | 69 ++++++++++++++++++++++----------- tests/test_qc.py | 14 +++---- tests/test_random_search.py | 24 ++++++------ tests/test_resume.py | 2 +- tests/test_train_ae.py | 13 +++---- tests/test_train_from_json.py | 2 +- tests/test_transfer_learning.py | 25 ++++++------ tests/testing_tools.py | 51 ++++++++++++++++++------ 9 files changed, 128 insertions(+), 76 deletions(-) diff --git a/tests/test_generate.py b/tests/test_generate.py index 78ad55156..9fc03535b 100644 --- a/tests/test_generate.py +++ b/tests/test_generate.py @@ -46,12 +46,12 @@ def test_generate(cmdopt, tmp_path, test_name): "t1-linear", ] elif test_name == "hypometabolic_example": - output_folder = str(tmp_out_dir / test_name) + output_folder = tmp_out_dir / test_name test_input = [ "generate", "hypometabolic", data_caps_pet, - output_folder, + str(output_folder), "--n_subjects", "2", "--pathology", diff --git a/tests/test_predict.py b/tests/test_predict.py index 34427eeeb..c6b6a39fa 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -1,6 +1,5 @@ # coding: utf8 import json -import os import shutil from os.path import exists from pathlib import Path @@ -8,7 +7,8 @@ import pytest from clinicadl import MapsManager -from tests.testing_tools import clean_folder, compare_folders + +from .testing_tools import compare_folders, modify_maps @pytest.fixture( @@ -33,46 +33,71 @@ def test_predict(cmdopt, tmp_path, test_name): tmp_out_dir.mkdir(parents=True) if test_name == "predict_image_classification": - model_folder = input_dir / "maps_image_cnn" + maps_name = "maps_image_cnn" modes = ["image"] use_labels = True elif test_name == "predict_slice_classification": - model_folder = input_dir / "maps_slice_cnn" + maps_name = "maps_slice_cnn" modes = ["image", "slice"] use_labels = True elif test_name == "predict_patch_regression": - model_folder = input_dir / "maps_patch_cnn" + maps_name = "maps_patch_cnn" modes = ["image", "patch"] use_labels = False elif test_name == "predict_roi_regression": - model_folder = input_dir / "maps_roi_cnn" + maps_name = "maps_roi_cnn" modes = ["image", "roi"] use_labels = False elif test_name == "predict_patch_multi_classification": - model_folder = input_dir / "maps_patch_multi_cnn" + maps_name = "maps_patch_multi_cnn" modes = ["image", "patch"] use_labels = False elif test_name == "predict_roi_reconstruction": - model_folder = input_dir / "maps_roi_ae" + maps_name = "maps_roi_ae" modes = ["roi"] use_labels = False else: raise NotImplementedError(f"Test {test_name} is not implemented.") - out_dir = str(model_folder / "split-0/best-loss/test-RANDOM") + shutil.copytree(input_dir / maps_name, tmp_out_dir / maps_name) + model_folder = tmp_out_dir / maps_name + + if cmdopt["adapt-base-dir"]: + with open(model_folder / "maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(model_folder / "maps.json", "w") as f: + json.dump(config, f, skipkeys=True, indent=4) + + with open(model_folder / "groups/test-RANDOM/maps.json", "r") as f: + config = json.load(f) + config = modify_maps( + maps=config, + base_dir=base_dir, + no_gpu=False, + adapt_base_dir=cmdopt["adapt-base-dir"], + ) + with open(model_folder / "groups/test-RANDOM/maps.json", "w") as f: + json.dump(config, f, skipkeys=True, indent=4) - if exists(out_dir): - shutil.rmtree(out_dir) + tmp_out_subdir = str(model_folder / "split-0/best-loss/test-RANDOM") + if exists(tmp_out_subdir): + shutil.rmtree(tmp_out_subdir) - # Correction of JSON file for ROI - if "roi" in modes: - json_path = model_folder / "maps.json" - with open(json_path, "r") as f: - parameters = json.load(f) - parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"] - json_data = json.dumps(parameters, skipkeys=True, indent=4) - with open(json_path, "w") as f: - f.write(json_data) + # # Correction of JSON file for ROI + # if "roi" in modes: + # json_path = model_folder / "maps.json" + # with open(json_path, "r") as f: + # parameters = json.load(f) + # parameters["roi_list"] = ["leftHippocampusBox", "rightHippocampusBox"] + # json_data = json.dumps(parameters, skipkeys=True, indent=4) + # with open(json_path, "w") as f: + # f.write(json_data) maps_manager = MapsManager(model_folder, verbose="debug") maps_manager.predict( @@ -91,7 +116,7 @@ def test_predict(cmdopt, tmp_path, test_name): maps_manager.get_metrics(data_group="test-RANDOM", mode=mode) assert compare_folders( - tmp_out_dir / test_name, - ref_dir / test_name, + tmp_out_dir / maps_name, + input_dir / maps_name, tmp_out_dir, ) diff --git a/tests/test_qc.py b/tests/test_qc.py index 910c357d4..9b03c2151 100644 --- a/tests/test_qc.py +++ b/tests/test_qc.py @@ -22,29 +22,29 @@ def test_qc(cmdopt, tmp_path, test_name): tmp_out_dir.mkdir(parents=True) if test_name == "t1-linear": - out_tsv = str(tmp_out_dir / "QC.tsv") + out_tsv = tmp_out_dir / "QC.tsv" test_input = [ "t1-linear", str(input_dir / "caps"), - out_tsv, + str(out_tsv), "--no-gpu", ] elif test_name == "t1-volume": - out_dir = str(tmp_out_dir / "QC_T1V") + out_dir = tmp_out_dir / "QC_T1V" test_input = [ "t1-volume", str(input_dir / "caps_T1V"), - out_dir, + str(out_dir), "Ixi549Space", ] elif test_name == "pet-linear": - out_tsv = str(tmp_out_dir / "QC_pet.tsv") + out_tsv = tmp_out_dir / "QC_pet.tsv" test_input = [ "pet-linear", str(input_dir / "caps_pet"), - out_tsv, + str(out_tsv), "18FFDG", "cerebellumPons2", "--threshold", @@ -73,7 +73,7 @@ def test_qc(cmdopt, tmp_path, test_name): assert out_df.equals(ref_df) elif test_name == "t1-volume": - assert compare_folders(out_dir, str(ref_dir / "QC_T1V"), tmp_out_dir) + assert compare_folders(out_dir, ref_dir / "QC_T1V", tmp_out_dir) elif test_name == "pet-linear": out_df = pd.read_csv(out_tsv, sep="\t") diff --git a/tests/test_random_search.py b/tests/test_random_search.py index 5b68787e8..864f8b1fa 100644 --- a/tests/test_random_search.py +++ b/tests/test_random_search.py @@ -7,7 +7,7 @@ import pytest -from .testing_tools import change_gpu_in_toml, compare_folders +from .testing_tools import compare_folders, modify_toml # random searxh for ROI with CNN @@ -25,6 +25,9 @@ def test_random_search(cmdopt, tmp_path, test_name): input_dir = base_dir / "randomSearch" / "in" ref_dir = base_dir / "randomSearch" / "ref" tmp_out_dir = tmp_path / "randomSearch" / "out" + + if os.path.exists(tmp_out_dir): + shutil.rmtree(tmp_out_dir) tmp_out_dir.mkdir(parents=True) if test_name == "rs_roi_cnn": @@ -33,21 +36,16 @@ def test_random_search(cmdopt, tmp_path, test_name): else: raise NotImplementedError(f"Test {test_name} is not implemented.") - run_test_random_search( - toml_path, generate_input, tmp_out_dir, ref_dir, cmdopt["no-gpu"] - ) - - -def run_test_random_search(toml_path, generate_input, tmp_out_dir, ref_dir, no_gpu): - if os.path.exists(tmp_out_dir): - shutil.rmtree(tmp_out_dir) - # Write random_search.toml file - os.makedirs(tmp_out_dir, exist_ok=True) shutil.copy(toml_path, tmp_out_dir) - if no_gpu: - change_gpu_in_toml(tmp_out_dir / "random_search.toml") + if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + modify_toml( + toml_path=tmp_out_dir / "random_search.toml", + base_dir=base_dir, + no_gpu=cmdopt["no-gpu"], + adapt_base_dir=cmdopt["adapt-base-dir"], + ) flag_error_generate = not os.system("clinicadl " + " ".join(generate_input)) performances_flag = os.path.exists( diff --git a/tests/test_resume.py b/tests/test_resume.py index cdf6031ee..5827bda0f 100644 --- a/tests/test_resume.py +++ b/tests/test_resume.py @@ -42,7 +42,7 @@ def test_resume(cmdopt, tmp_path, test_name): adapt_base_dir=cmdopt["adapt-base-dir"], ) with open(maps_stopped / "maps.json", "w") as f: - json.dump(config, f) + json.dump(config, f, skipkeys=True, indent=4) flag_error = not system(f"clinicadl -vv train resume {maps_stopped}") assert flag_error diff --git a/tests/test_train_ae.py b/tests/test_train_ae.py index b20749258..c7fbcb276 100644 --- a/tests/test_train_ae.py +++ b/tests/test_train_ae.py @@ -33,8 +33,10 @@ def test_train_ae(cmdopt, tmp_path, test_name): labels_path = str(input_dir / "labels_list" / "2_fold") config_path = str(input_dir / "train_config.toml") + split = 0 + if test_name == "image_ae": - split = [0, 0] + split = 1 test_input = [ "train", "reconstruction", @@ -45,10 +47,9 @@ def test_train_ae(cmdopt, tmp_path, test_name): "-c", config_path, "--split", - "1", + str(split), ] elif test_name == "patch_multi_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -61,7 +62,6 @@ def test_train_ae(cmdopt, tmp_path, test_name): "--multi_network", ] elif test_name == "roi_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -73,7 +73,6 @@ def test_train_ae(cmdopt, tmp_path, test_name): config_path, ] elif test_name == "slice_ae": - split = [0, 0] test_input = [ "train", "reconstruction", @@ -116,7 +115,7 @@ def test_train_ae(cmdopt, tmp_path, test_name): tmp_path, ) assert compare_folders( - tmp_out_dir / f"split-{split[0]}" / "best-loss", - ref_dir / ("maps_" + test_name) / f"split-{split[1]}" / "best-loss", + tmp_out_dir / f"split-{split}" / "best-loss", + ref_dir / ("maps_" + test_name) / f"split-{split}" / "best-loss", tmp_path, ) diff --git a/tests/test_train_from_json.py b/tests/test_train_from_json.py index 363af9aff..06b307b0f 100644 --- a/tests/test_train_from_json.py +++ b/tests/test_train_from_json.py @@ -30,7 +30,7 @@ def test_json_compatibility(cmdopt, tmp_path): adapt_base_dir=cmdopt["adapt-base-dir"], ) with open(config_json, "w+") as f: - json.dump(config, f) + json.dump(config, f, skipkeys=True, indent=4) flag_error = not system( f"clinicadl train from_json {str(config_json)} {str(reproduced_maps_dir)} -s {split}" diff --git a/tests/test_transfer_learning.py b/tests/test_transfer_learning.py index b9c3f999b..d49cbd61f 100644 --- a/tests/test_transfer_learning.py +++ b/tests/test_transfer_learning.py @@ -152,20 +152,23 @@ def test_transfer_learning(cmdopt, tmp_path, test_name): with open(ref_dir / ("maps_roi_" + name) / "maps.json", "r") as ref: json_data_ref = json.load(ref) - ref_source_dir = Path(json_data_ref["transfer_path"]).parent - json_data_ref["transfer_path"] = str( - tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) - ) - if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: - json_data_ref = modify_maps( - maps=json_data_ref, - base_dir=base_dir, - no_gpu=cmdopt["no-gpu"], - adapt_base_dir=cmdopt["adapt-base-dir"], - ) + # TODO : uncomment when CI data are correct + # ref_source_dir = Path(json_data_ref["transfer_path"]).parent + # json_data_ref["transfer_path"] = str( + # tmp_out_dir / Path(json_data_ref["transfer_path"]).relative_to(ref_source_dir) + # ) + # if cmdopt["no-gpu"] or cmdopt["adapt-base-dir"]: + # json_data_ref = modify_maps( + # maps=json_data_ref, + # base_dir=base_dir, + # no_gpu=cmdopt["no-gpu"], + # adapt_base_dir=cmdopt["adapt-base-dir"], + # ) # TODO: remove and update data json_data_ref["caps_directory"] = json_data_out["caps_directory"] json_data_ref["gpu"] = json_data_out["gpu"] + json_data_ref["transfer_path"] = json_data_out["transfer_path"] + json_data_ref["tsv_path"] = json_data_out["tsv_path"] ### assert json_data_out == json_data_ref # ["mode"] == mode diff --git a/tests/testing_tools.py b/tests/testing_tools.py index ff7eb97b1..4044d1022 100644 --- a/tests/testing_tools.py +++ b/tests/testing_tools.py @@ -95,6 +95,9 @@ def tree(dir_: PathLike, file_out: PathLike): """ from pathlib import Path + if not dir_.is_dir(): + raise FileNotFoundError(f"No directory found at {dir_}.") + file_content = "" for path in sorted(Path(dir_).rglob("*")): @@ -104,8 +107,6 @@ def tree(dir_: PathLike, file_out: PathLike): spacer = " " * depth file_content = file_content + f"{spacer}+ {path.name}\n" - print(file_content) - Path(file_out).write_text(file_content) @@ -201,28 +202,54 @@ def modify_maps( maps["caps_directory"] = str( base_dir / Path(maps["caps_directory"]).relative_to(ref_base_dir) ) - maps["tsv_path"] = str( - base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) - ) + try: + maps["tsv_path"] = str( + base_dir / Path(maps["tsv_path"]).relative_to(ref_base_dir) + ) + except KeyError: # maps with only caps directory + pass return maps -def change_gpu_in_toml(toml_path: Path) -> None: +def modify_toml( + toml_path: Path, + base_dir: Path, + no_gpu: bool = False, + adapt_base_dir: bool = False, +) -> None: """ - Changes GPU to false in a TOML config file. + Modifies a TOML file if the user passed --no-gpu or --adapt-base-dir flags. Parameters ---------- toml_path : Path - The TOML file. + The path of the TOML file. + base_dir : Path + The base directory, where CI data are stored. + no_gpu : bool (optional, default=False) + Whether the user activated the --no-gpu flag. + adapt_base_dir : bool (optional, default=False) + Whether the user activated the --adapt-base-dir flag. """ import toml config = toml.load(toml_path) - try: - config["Computational"]["gpu"] = False - except KeyError: - config["Computational"] = {"gpu": False} + if no_gpu: + try: + config["Computational"]["gpu"] = False + except KeyError: + config["Computational"] = {"gpu": False} + if adapt_base_dir: + random_search_config = config["Random_Search"] + base_dir = base_dir.resolve() + ref_base_dir = Path(random_search_config["caps_directory"]).parents[2] + random_search_config["caps_directory"] = str( + base_dir + / Path(random_search_config["caps_directory"]).relative_to(ref_base_dir) + ) + random_search_config["tsv_path"] = str( + base_dir / Path(random_search_config["tsv_path"]).relative_to(ref_base_dir) + ) f = open(toml_path, "w") toml.dump(config, f) f.close() From 523563d985f1ad28e97e754d6a0d10ea9799e263 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:35:56 +0200 Subject: [PATCH 22/43] revert change on poetry --- poetry.lock | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/poetry.lock b/poetry.lock index c9b87b84f..eafdc75ff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -46,6 +46,20 @@ files = [ [package.extras] dev = ["black", "coverage", "isort", "pre-commit", "pyenchant", "pylint"] +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "appdirs" version = "1.4.4" From 4971fa7b77b8dcf5bc742b59b328d26ab2dbca2e Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:43:48 +0200 Subject: [PATCH 23/43] correction of wrong conflict choice in rebasing --- tests/test_predict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_predict.py b/tests/test_predict.py index ebc9d0fd1..93d3a4a38 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -6,7 +6,7 @@ import pytest -from clinicadl import MapsManager +from clinicadl.predict.predict_manager import PredictManager from .testing_tools import compare_folders, modify_maps From 52f94928e57b0c407e75c232684bf2479fd2d1ef Mon Sep 17 00:00:00 2001 From: Gensollen Date: Tue, 4 Jun 2024 18:45:20 +0200 Subject: [PATCH 24/43] [INFRA] Update the Makefile `check.lock` target (#603) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5c8ca37d1..a7b469b16 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ help: Makefile .PHONY: check.lock check.lock: - @$(POETRY) lock --check + @$(POETRY) check --lock ## build : Build the package. .PHONY: build From 996cdd51e02bcbd4899a41aaf89addebc90183ee Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 7 Jun 2024 16:23:13 +0200 Subject: [PATCH 25/43] [CI] Run unit tests and linter on refactoring branch (#618) * run unit tests on refactoring * run linter on refactoring --- .github/workflows/lint.yaml | 4 ++-- .github/workflows/test.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 466dd1c2f..1a2b6f7d9 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -2,9 +2,9 @@ name: 'Lint codebase' on: pull_request: - branches: [ dev ] + branches: [ "dev", "refactoring" ] push: - branches: [ dev ] + branches: [ "dev", "refactoring" ] permissions: contents: read diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5e0fdde33..219e86c2b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,9 +2,9 @@ name: Test on: push: - branches: [dev] + branches: ["dev", "refactoring"] pull_request: - branches: [dev] + branches: ["dev", "refactoring"] permissions: contents: read From 752bc2b2264e34fd7ce654dd560e72d2da5bc759 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:54:40 +0200 Subject: [PATCH 26/43] remove outdated tests --- tests/unittests/generate/test_hypo_config.py | 97 ------------------- .../unittests/generate/test_trivial_config.py | 0 2 files changed, 97 deletions(-) delete mode 100644 tests/unittests/generate/test_hypo_config.py delete mode 100644 tests/unittests/generate/test_trivial_config.py diff --git a/tests/unittests/generate/test_hypo_config.py b/tests/unittests/generate/test_hypo_config.py deleted file mode 100644 index 9d59270ee..000000000 --- a/tests/unittests/generate/test_hypo_config.py +++ /dev/null @@ -1,97 +0,0 @@ -import pytest -from pydantic import ValidationError - - -@pytest.mark.parametrize( - "parameters", - [ - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "flair-linear", - "n_subjects": 3, - "n_proc": 1, - "pathology_cls": "lvppa", - "anomaly_degree": 6, - "sigma": 5, - "use_uncropped_image": False, - }, - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "t1-linear", - "n_subjects": 3, - "n_proc": 1, - "pathology_cls": "alzheimer", - "anomaly_degree": 6, - "sigma": 5, - "use_uncropped_image": False, - }, - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "t1-linear", - "n_subjects": 3, - "n_proc": 1, - "pathology_cls": "lvppa", - "anomaly_degree": 6, - "sigma": 40.2, - "use_uncropped_image": True, - }, - ], -) -def test_fails_validations(parameters): - from clinicadl.generate.generate_config import GenerateHypometabolicConfig - - with pytest.raises(ValidationError): - GenerateHypometabolicConfig(**parameters) - - -@pytest.mark.parametrize( - "parameters", - [ - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "t1-linear", - "n_subjects": 3, - "n_proc": 2, - "pathology_cls": "lvppa", - "anomaly_degree": 30.5, - "sigma": 35, - "use_uncropped_image": False, - }, - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "pet-linear", - "n_subjects": 3, - "n_proc": 1, - "pathology_cls": "ad", - "anomaly_degree": 6.6, - "sigma": 20, - "use_uncropped_image": True, - }, - { - "caps_directory": "", - "generated_caps_directory": "", - "participants_list": "", - "preprocessing_cls": "t1-linear", - "n_subjects": 3, - "n_proc": 1, - "pathology_cls": "pca", - "anomaly_degree": 6, - "sigma": 5, - "use_uncropped_image": True, - }, - ], -) -def test_passes_validations(parameters): - from clinicadl.generate.generate_config import GenerateHypometabolicConfig - - GenerateHypometabolicConfig(**parameters) diff --git a/tests/unittests/generate/test_trivial_config.py b/tests/unittests/generate/test_trivial_config.py deleted file mode 100644 index e69de29bb..000000000 From 405f4d8c81c33c16a10bd1dac2cfbd9a81bb4470 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 12 Jun 2024 10:10:09 +0200 Subject: [PATCH 27/43] new model module --- clinicadl/losses/__init__.py | 15 + clinicadl/network/pythae/__init__.py | 3 + clinicadl/network/pythae/ae/__init__.py | 0 clinicadl/network/pythae/ae/ae_config.py | 18 + clinicadl/network/pythae/ae/ae_model.py | 52 +++ clinicadl/network/pythae/ae/ae_utils.py | 40 ++ clinicadl/network/pythae/ae/builtin_models.py | 106 +++++ clinicadl/network/pythae/base/__init__.py | 2 + clinicadl/network/pythae/base/base_config.py | 34 ++ clinicadl/network/pythae/base/base_model.py | 111 +++++ clinicadl/network/pythae/cnn/__init__.py | 0 clinicadl/network/pythae/cnn/cnn_config.py | 13 + clinicadl/network/pythae/cnn/cnn_model.py | 40 ++ clinicadl/network/pythae/nn/__init__.py | 0 .../network/pythae/nn/layers/__init__.py | 0 .../pythae/nn/layers/factory/__init__.py | 3 + .../network/pythae/nn/layers/factory/conv.py | 26 ++ .../network/pythae/nn/layers/factory/norm.py | 62 +++ .../network/pythae/nn/layers/factory/pool.py | 53 +++ clinicadl/network/pythae/nn/layers/pool.py | 81 ++++ clinicadl/network/pythae/nn/layers/unpool.py | 32 ++ .../network/pythae/nn/networks/__init__.py | 0 clinicadl/network/pythae/nn/networks/cnn.py | 394 ++++++++++++++++++ clinicadl/network/pythae/nn/utils/SECNN.py | 143 +++++++ clinicadl/network/pythae/nn/utils/__init__.py | 0 clinicadl/network/pythae/nn/utils/resnet.py | 63 +++ clinicadl/network/pythae/nn/utils/resnet3D.py | 100 +++++ clinicadl/network/pythae/utils.py | 27 ++ clinicadl/network/pythae/vae/__init__.py | 0 clinicadl/network/pythae/vae/ae_config.py | 18 + clinicadl/network/pythae/vae/vae_model.py | 52 +++ clinicadl/utils/enum.py | 11 + 32 files changed, 1499 insertions(+) create mode 100644 clinicadl/losses/__init__.py create mode 100644 clinicadl/network/pythae/__init__.py create mode 100644 clinicadl/network/pythae/ae/__init__.py create mode 100644 clinicadl/network/pythae/ae/ae_config.py create mode 100644 clinicadl/network/pythae/ae/ae_model.py create mode 100644 clinicadl/network/pythae/ae/ae_utils.py create mode 100644 clinicadl/network/pythae/ae/builtin_models.py create mode 100644 clinicadl/network/pythae/base/__init__.py create mode 100644 clinicadl/network/pythae/base/base_config.py create mode 100644 clinicadl/network/pythae/base/base_model.py create mode 100644 clinicadl/network/pythae/cnn/__init__.py create mode 100644 clinicadl/network/pythae/cnn/cnn_config.py create mode 100644 clinicadl/network/pythae/cnn/cnn_model.py create mode 100644 clinicadl/network/pythae/nn/__init__.py create mode 100644 clinicadl/network/pythae/nn/layers/__init__.py create mode 100644 clinicadl/network/pythae/nn/layers/factory/__init__.py create mode 100644 clinicadl/network/pythae/nn/layers/factory/conv.py create mode 100644 clinicadl/network/pythae/nn/layers/factory/norm.py create mode 100644 clinicadl/network/pythae/nn/layers/factory/pool.py create mode 100644 clinicadl/network/pythae/nn/layers/pool.py create mode 100644 clinicadl/network/pythae/nn/layers/unpool.py create mode 100644 clinicadl/network/pythae/nn/networks/__init__.py create mode 100644 clinicadl/network/pythae/nn/networks/cnn.py create mode 100644 clinicadl/network/pythae/nn/utils/SECNN.py create mode 100644 clinicadl/network/pythae/nn/utils/__init__.py create mode 100644 clinicadl/network/pythae/nn/utils/resnet.py create mode 100644 clinicadl/network/pythae/nn/utils/resnet3D.py create mode 100644 clinicadl/network/pythae/utils.py create mode 100644 clinicadl/network/pythae/vae/__init__.py create mode 100644 clinicadl/network/pythae/vae/ae_config.py create mode 100644 clinicadl/network/pythae/vae/vae_model.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py new file mode 100644 index 000000000..14d3963f0 --- /dev/null +++ b/clinicadl/losses/__init__.py @@ -0,0 +1,15 @@ +from enum import Enum + + +class ImplementedLosses(str, Enum): + Conv5_FC3 = "Conv5_FC3" + Conv4_FC3 = "Conv4_FC3" + Stride_Conv5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented networks are: " + + ", ".join([repr(m.value) for m in cls]) + ) diff --git a/clinicadl/network/pythae/__init__.py b/clinicadl/network/pythae/__init__.py new file mode 100644 index 000000000..42e5facd8 --- /dev/null +++ b/clinicadl/network/pythae/__init__.py @@ -0,0 +1,3 @@ +from .base.base_config import ModelConfig +from .base.base_model import ClinicaDLModel +from .base.base_utils import ModelOutput diff --git a/clinicadl/network/pythae/ae/__init__.py b/clinicadl/network/pythae/ae/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/ae/ae_config.py b/clinicadl/network/pythae/ae/ae_config.py new file mode 100644 index 000000000..a91447d43 --- /dev/null +++ b/clinicadl/network/pythae/ae/ae_config.py @@ -0,0 +1,18 @@ +from pydantic import PositiveFloat, PositiveInt + +from clinicadl.network.pythae import ModelConfig +from clinicadl.utils.enum import Normalization, ReconstructionLoss + +from .ae_utils import AENetworks + + +class AEConfig(ModelConfig): + network: AENetworks = AENetworks.AE_Conv5_FC3 + loss: ReconstructionLoss = ReconstructionLoss.MSELoss + latent_space_size: PositiveInt = 128 + feature_size: PositiveInt = 1024 + n_conv: PositiveInt = 4 + io_layer_channels: PositiveInt = 8 + recons_weight: PositiveFloat = 1.0 + kl_weight: PositiveFloat = 1.0 + normalization: Normalization = Normalization.BATCH diff --git a/clinicadl/network/pythae/ae/ae_model.py b/clinicadl/network/pythae/ae/ae_model.py new file mode 100644 index 000000000..cb96bf76e --- /dev/null +++ b/clinicadl/network/pythae/ae/ae_model.py @@ -0,0 +1,52 @@ +from typing import Callable, Type, TypeVar + +import torch +import torch.nn as nn + +from clinicadl.network.pythae import ClinicaDLModel +from clinicadl.network.pythae.utils import PythaeModel + +from .ae_config import AEConfig +from .ae_utils import PythaeAEWrapper + +T = TypeVar("T", bound="AE") + + +class AE(ClinicaDLModel): + def __init__( + self, + encoder: nn.Module, + decoder: nn.Module, + reconstruction_loss: Callable[ + [torch.Tensor, torch.Tensor], torch.Tensor + ] = nn.MSELoss(), + ) -> None: + super().__init__() + self.encoder = encoder + self.decoder = decoder + self.reconstruction_loss = reconstruction_loss + + @classmethod + def from_config(cls: Type[T], config: AEConfig) -> T: + pass + + @staticmethod + def from_pythae(model: PythaeModel) -> PythaeAEWrapper: + return PythaeAEWrapper(model) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.reconstruct(self.embed(x)) + + def embed(self, x: torch.Tensor) -> torch.Tensor: + return self.encoder(x) + + def reconstruct(self, x: torch.Tensor) -> torch.Tensor: + return self.decoder(x) + + def training_step(self, x: torch.Tensor) -> torch.Tensor: + recon_x = self.forward(x) + loss = self.compute_loss(recon_x, x) + return loss + + def compute_loss(self, recon_x: torch.Tensor, x: torch.Tensor) -> torch.Tensor: + return self.reconstruction_loss(recon_x, x) diff --git a/clinicadl/network/pythae/ae/ae_utils.py b/clinicadl/network/pythae/ae/ae_utils.py new file mode 100644 index 000000000..14501882b --- /dev/null +++ b/clinicadl/network/pythae/ae/ae_utils.py @@ -0,0 +1,40 @@ +from enum import Enum + +import torch + +from clinicadl.network.pythae import ClinicaDLModel +from clinicadl.network.pythae.utils import PythaeModel + + +class AENetworks(str, Enum): + AE_Conv5_FC3 = "AE_Conv5_FC3" + AE_Conv4_FC3 = "AE_Conv4_FC3" + CAE_half = "CAE_half" + + +class PythaeAEWrapper(ClinicaDLModel): + def __init__(self, model: PythaeModel): + super().__init__() + self.pythae_model = model + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.reconstruct(self.embed(x)) + + def embed(self, x: torch.Tensor) -> torch.Tensor: + return self.pythae_model.encoder(x) + + def reconstruct(self, x: torch.Tensor) -> torch.Tensor: + return self.pythae_model.decoder(x) + + def training_step(self, x: torch.Tensor) -> torch.Tensor: + inputs = {"data": x} + loss = self.pythae_model.forward(inputs).loss + return loss + + def compute_loss( + self, recon_x: torch.Tensor, x: torch.Tensor, **kwargs + ) -> torch.Tensor: + loss = self.pythae_model.loss_function(recon_x, x, **kwargs) + if isinstance(loss, tuple): + return loss[0] + return loss diff --git a/clinicadl/network/pythae/ae/builtin_models.py b/clinicadl/network/pythae/ae/builtin_models.py new file mode 100644 index 000000000..316fed0a4 --- /dev/null +++ b/clinicadl/network/pythae/ae/builtin_models.py @@ -0,0 +1,106 @@ +from torch import nn + +from clinicadl.network.autoencoder.cnn_transformer import CNN_Transformer +from clinicadl.network.cnn.models import Conv4_FC3, Conv5_FC3, resnet18 +from clinicadl.network.sub_network import AutoEncoder +from clinicadl.network.vae.vae_layers import ( + DecoderLayer3D, + EncoderLayer3D, + Flatten, + Unflatten3D, +) + +from .ae_model import AE + + +class AE_Conv5_FC3(AE): + """ + Autoencoder derived from the convolutional part of CNN Conv5_FC3. + """ + + def __init__(self, input_size, gpu=True): + # fmt: off + cnn_model = Conv5_FC3(input_size=input_size, gpu=gpu) + autoencoder = CNN_Transformer(cnn_model) + # fmt: on + super().__init__( + encoder=autoencoder.encoder, decoder=autoencoder.decoder, gpu=gpu + ) + + @staticmethod + def get_input_size(): + return "1@128x128" + + @staticmethod + def get_dimension(): + return "2D" + + @staticmethod + def get_task(): + return ["reconstruction"] + + +class AE_Conv4_FC3(AutoEncoder): + """ + Autoencoder derived from the convolutional part of CNN Conv4_FC3. + """ + + def __init__(self, input_size, gpu=True): + # fmt: off + cnn_model = Conv4_FC3(input_size=input_size, gpu=gpu) + autoencoder = CNN_Transformer(cnn_model) + # fmt: on + super().__init__( + encoder=autoencoder.encoder, decoder=autoencoder.decoder, gpu=gpu + ) + + @staticmethod + def get_input_size(): + return "1@128x128" + + @staticmethod + def get_dimension(): + return "2D" + + @staticmethod + def get_task(): + return ["reconstruction"] + + +class CAE_half(AutoEncoder): + """ + 3D Autoencoder derived from CVAE + """ + + def __init__(self, input_size, latent_space_size, gpu=True): + # fmt: off + self.encoder = nn.Sequential( + EncoderLayer3D(1, 32, kernel_size=3), + EncoderLayer3D(32, 64, kernel_size=3), + EncoderLayer3D(64, 128, kernel_size=3), + Flatten(), + nn.Linear(153600, latent_space_size) + ) + self.decoder = nn.Sequential( + nn.Linear(latent_space_size, 307200), + Unflatten3D(256, 10, 12, 10), + DecoderLayer3D(256, 128, kernel_size=3), + DecoderLayer3D(128, 64, kernel_size=3), + DecoderLayer3D(64, 1, kernel_size=3) + ) + # fmt: on + super(CAE_half, self).__init__( + encoder=self.encoder, decoder=self.decoder, gpu=gpu + ) + + @staticmethod + def get_input_size(): + return "1@dxhxw" + + @staticmethod + def get_dimension(): + return "3D" + + @staticmethod + def get_task(): + return ["reconstruction"] diff --git a/clinicadl/network/pythae/base/__init__.py b/clinicadl/network/pythae/base/__init__.py new file mode 100644 index 000000000..0b911ccee --- /dev/null +++ b/clinicadl/network/pythae/base/__init__.py @@ -0,0 +1,2 @@ +from .base_config import ModelConfig +from .base_model import ClinicaDLModel diff --git a/clinicadl/network/pythae/base/base_config.py b/clinicadl/network/pythae/base/base_config.py new file mode 100644 index 000000000..a4595f95d --- /dev/null +++ b/clinicadl/network/pythae/base/base_config.py @@ -0,0 +1,34 @@ +from abc import ABC, abstractmethod + +from pydantic import ( + BaseModel, + ConfigDict, + NonNegativeFloat, + field_validator, + model_validator, +) + + +class ModelConfig(ABC, BaseModel): + """ + Abstract base config class for ClinicaDL Models. + + network and loss are specific to the type of models + (e.g. CNN or AE) and must be specified in subclasses. + """ + + network: str + dropout: NonNegativeFloat = 0.0 + loss: str + # pydantic config + model_config = ConfigDict( + validate_assignment=True, validate_return=True, validate_default=True + ) + + @field_validator("dropout") + @classmethod + def validator_dropout(cls, v): + assert ( + 0 <= v <= 1 + ), f"dropout must be between 0 and 1 but it has been set to {v}." + return v diff --git a/clinicadl/network/pythae/base/base_model.py b/clinicadl/network/pythae/base/base_model.py new file mode 100644 index 000000000..e0bb768b1 --- /dev/null +++ b/clinicadl/network/pythae/base/base_model.py @@ -0,0 +1,111 @@ +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Tuple, Type, TypeVar, Union + +import torch +import torch.nn as nn + +from .base_config import ModelConfig + +T = TypeVar("T", bound="ClinicaDLModel") + + +class ClinicaDLModel(ABC, nn.Module): + """Abstract template for ClinicaDL Models.""" + + def __init__(self) -> None: + super().__init__() + + @classmethod + @abstractmethod + def from_config(cls: Type[T], config: ModelConfig) -> T: + """ + Creates a ClinicaDL Model from a config class. + + Parameters + ---------- + config : ModelConfig + The config class. + + Returns + ------- + ClinicaDLModel + The ClinicaDL Model. + """ + pass + + @abstractmethod + def forward(self, x: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: + """ + Pass forward in the network. + + Parameters + ---------- + x : torch.Tensor + Input data. + + Returns + ------- + Union[torch.Tensor, Tuple[torch.Tensor, ...]] + The output. Either a PyTorch tensor (e.g. output of a CNN) or a tuple of tensors + (e.g. embedding and output of an AutoEncoder). + """ + pass + + @abstractmethod + def training_step(self, x: torch.Tensor) -> torch.Tensor: + """ + Pass forward and loss computation. + + Parameters + ---------- + x : torch.Tensor + The batch. + + Returns + ------- + torch.Tensor + The loss on which backpropagation will be applied. A 1-item tensor. + """ + pass + + @abstractmethod + def predict(self, x: torch.Tensor) -> torch.Tensor: + """ + Makes predictions. + + Parameters + ---------- + x : torch.Tensor + The input data. + + Returns + ------- + torch.Tensor + The predictions. + """ + pass + + @abstractmethod + def save_weights(self, path: Path) -> None: + """ + Saves network weights. + + Parameters + ---------- + path : Path + The file where the weights will be stored. + """ + pass + + @abstractmethod + def load_weights(self, path: Path) -> None: + """ + Loads network weights. + + Parameters + ---------- + path : Path + The file where the weights are stored. + """ + pass diff --git a/clinicadl/network/pythae/cnn/__init__.py b/clinicadl/network/pythae/cnn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/cnn/cnn_config.py b/clinicadl/network/pythae/cnn/cnn_config.py new file mode 100644 index 000000000..15cbe83e5 --- /dev/null +++ b/clinicadl/network/pythae/cnn/cnn_config.py @@ -0,0 +1,13 @@ +from typing import Tuple + +from pydantic import NonNegativeInt + +from clinicadl.network.pythae.base import ModelConfig +from clinicadl.network.pythae.nn.networks.cnn import ImplementedCNN + + +class CNNConfig(ModelConfig): + network: ImplementedCNN = ImplementedCNN.Conv5_FC3 + loss: ClassificationLoss = ClassificationLoss.CrossEntropyLoss + input_size: Tuple[NonNegativeInt, ...] + output_size: NonNegativeInt = 1 diff --git a/clinicadl/network/pythae/cnn/cnn_model.py b/clinicadl/network/pythae/cnn/cnn_model.py new file mode 100644 index 000000000..a51bf2cd9 --- /dev/null +++ b/clinicadl/network/pythae/cnn/cnn_model.py @@ -0,0 +1,40 @@ +from typing import Callable, Type, TypeVar + +import torch +import torch.nn as nn + +from clinicadl.network.pythae import ClinicaDLModel +from clinicadl.network.pythae.utils import PythaeModel + +from .ae_config import AEConfig + +T = TypeVar("T", bound="AE") + + +class CNN(ClinicaDLModel): + def __init__( + self, + network: nn.Module, + loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], + ): + super().__init__() + self.network = network + self.loss = loss + + @classmethod + def from_config(cls: Type[T], config: AEConfig) -> T: + pass + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.network(x) + + def training_step(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + y_pred = self.forward(x) + loss = self.compute_loss(y_pred, y) + return loss + + def compute_loss(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + return self.loss(y_pred, y) + + def predict(self, x: torch.Tensor) -> torch.Tensor: + return self.forward(x) diff --git a/clinicadl/network/pythae/nn/__init__.py b/clinicadl/network/pythae/nn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/nn/layers/__init__.py b/clinicadl/network/pythae/nn/layers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/nn/layers/factory/__init__.py b/clinicadl/network/pythae/nn/layers/factory/__init__.py new file mode 100644 index 000000000..28423743d --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/factory/__init__.py @@ -0,0 +1,3 @@ +from .conv import ConvLayer +from .norm import NormLayer +from .pool import PoolLayer diff --git a/clinicadl/network/pythae/nn/layers/factory/conv.py b/clinicadl/network/pythae/nn/layers/factory/conv.py new file mode 100644 index 000000000..495f71c0e --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/factory/conv.py @@ -0,0 +1,26 @@ +from typing import Type + +import torch.nn as nn + + +class ConvLayer: + """Factory object for creating Convolutional layers.""" + + def __new__(cls, dim: int) -> Type[nn.Module]: + """ + Creates a Convolutional layer. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Type[nn.Module] + The Convolutional layer. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." + + layers = [nn.Conv2d, nn.Conv3d] + return layers[dim - 2] diff --git a/clinicadl/network/pythae/nn/layers/factory/norm.py b/clinicadl/network/pythae/nn/layers/factory/norm.py new file mode 100644 index 000000000..dc6ff086b --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/factory/norm.py @@ -0,0 +1,62 @@ +from typing import Type, Union + +import torch.nn as nn + +from clinicadl.utils.enum import BaseEnum + + +class Normalization(str, BaseEnum): + """Available normalization layers in ClinicaDL.""" + + BATCH = "BatchNorm" + GROUP = "GroupNorm" + INSTANCE = "InstanceNorm" + + +class NormLayer: + """Factory object for creating Normalization layers.""" + + def __new__( + cls, normalization: Union[str, Normalization], dim: int + ) -> Type[nn.Module]: + """ + Creates a Normalization layer. + + Parameters + ---------- + normalization : Normalization + Type of normalization. + dim : int + Dimension of the image. + + Returns + ------- + Type[nn.Module] + The normalization layer. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." + normalization = Normalization(normalization) + + if normalization == Normalization.BATCH: + factory = _batch_norm_factory + elif normalization == Normalization.INSTANCE: + factory = _instance_norm_factory + elif normalization == Normalization.GROUP: + factory = _group_norm_factory + return factory(dim) + + +def _instance_norm_factory( + dim: int, +) -> Union[Type[nn.InstanceNorm2d], Type[nn.InstanceNorm3d]]: + layers = (nn.InstanceNorm2d, nn.InstanceNorm3d) + return layers[dim - 2] + + +def _batch_norm_factory(dim: int) -> Union[Type[nn.BatchNorm2d], Type[nn.BatchNorm3d]]: + layers = (nn.BatchNorm2d, nn.BatchNorm3d) + return layers[dim - 2] + + +def _group_norm_factory(dim: int) -> Type[nn.GroupNorm]: + return nn.GroupNorm diff --git a/clinicadl/network/pythae/nn/layers/factory/pool.py b/clinicadl/network/pythae/nn/layers/factory/pool.py new file mode 100644 index 000000000..f6396011d --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/factory/pool.py @@ -0,0 +1,53 @@ +from typing import Type, Union + +import torch.nn as nn + +from clinicadl.utils.enum import BaseEnum + +from ..pool import PadMaxPool2d, PadMaxPool3d + + +class Pooling(str, BaseEnum): + """Available pooling layers in ClinicaDL.""" + + MAX = "MaxPool" + PADMAX = "PadMaxPool" + + +class PoolLayer: + """Factory object for creating Pooling layers.""" + + def __new__(cls, pooling: Union[str, Pooling], dim: int) -> Type[nn.Module]: + """ + Creates a Pooling layer. + + Parameters + ---------- + pooling : Pooling + Type of pooling. + dim : int + Dimension of the image. + + Returns + ------- + Type[nn.Module] + The normalization layer. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." + pooling = Pooling(pooling) + + if pooling == Pooling.MAX: + factory = _max_pool_factory + elif pooling == Pooling.PADMAX: + factory = _pad_max_pool_factory + return factory(dim) + + +def _max_pool_factory(dim: int) -> Union[Type[nn.MaxPool2d], Type[nn.MaxPool3d]]: + layers = (nn.MaxPool2d, nn.MaxPool3d) + return layers[dim - 2] + + +def _pad_max_pool_factory(dim: int) -> Union[Type[PadMaxPool2d], Type[PadMaxPool3d]]: + layers = (PadMaxPool2d, PadMaxPool3d) + return layers[dim - 2] diff --git a/clinicadl/network/pythae/nn/layers/pool.py b/clinicadl/network/pythae/nn/layers/pool.py new file mode 100644 index 000000000..92ff2d5dd --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/pool.py @@ -0,0 +1,81 @@ +import torch.nn as nn + + +class PadMaxPool3d(nn.Module): + def __init__(self, kernel_size, stride, return_indices=False, return_pad=False): + super(PadMaxPool3d, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.pool = nn.MaxPool3d(kernel_size, stride, return_indices=return_indices) + self.pad = nn.ConstantPad3d(padding=0, value=0) + self.return_indices = return_indices + self.return_pad = return_pad + + def set_new_return(self, return_indices=True, return_pad=True): + self.return_indices = return_indices + self.return_pad = return_pad + self.pool.return_indices = return_indices + + def forward(self, f_maps): + coords = [self.stride - f_maps.size(i + 2) % self.stride for i in range(3)] + for i, coord in enumerate(coords): + if coord == self.stride: + coords[i] = 0 + + self.pad.padding = (coords[2], 0, coords[1], 0, coords[0], 0) + + if self.return_indices: + output, indices = self.pool(self.pad(f_maps)) + + if self.return_pad: + return output, indices, (coords[2], 0, coords[1], 0, coords[0], 0) + else: + return output, indices + + else: + output = self.pool(self.pad(f_maps)) + + if self.return_pad: + return output, (coords[2], 0, coords[1], 0, coords[0], 0) + else: + return output + + +class PadMaxPool2d(nn.Module): + def __init__(self, kernel_size, stride, return_indices=False, return_pad=False): + super(PadMaxPool2d, self).__init__() + self.kernel_size = kernel_size + self.stride = stride + self.pool = nn.MaxPool2d(kernel_size, stride, return_indices=return_indices) + self.pad = nn.ConstantPad2d(padding=0, value=0) + self.return_indices = return_indices + self.return_pad = return_pad + + def set_new_return(self, return_indices=True, return_pad=True): + self.return_indices = return_indices + self.return_pad = return_pad + self.pool.return_indices = return_indices + + def forward(self, f_maps): + coords = [self.stride - f_maps.size(i + 2) % self.stride for i in range(2)] + for i, coord in enumerate(coords): + if coord == self.stride: + coords[i] = 0 + + self.pad.padding = (coords[1], 0, coords[0], 0) + + if self.return_indices: + output, indices = self.pool(self.pad(f_maps)) + + if self.return_pad: + return output, indices, (coords[1], 0, coords[0], 0) + else: + return output, indices + + else: + output = self.pool(self.pad(f_maps)) + + if self.return_pad: + return output, (coords[1], 0, coords[0], 0) + else: + return output diff --git a/clinicadl/network/pythae/nn/layers/unpool.py b/clinicadl/network/pythae/nn/layers/unpool.py new file mode 100644 index 000000000..90da20a3e --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/unpool.py @@ -0,0 +1,32 @@ +import torch.nn as nn + + +class CropMaxUnpool3d(nn.Module): + def __init__(self, kernel_size, stride): + super(CropMaxUnpool3d, self).__init__() + self.unpool = nn.MaxUnpool3d(kernel_size, stride) + + def forward(self, f_maps, indices, padding=None): + output = self.unpool(f_maps, indices) + if padding is not None: + x1 = padding[4] + y1 = padding[2] + z1 = padding[0] + output = output[:, :, x1::, y1::, z1::] + + return output + + +class CropMaxUnpool2d(nn.Module): + def __init__(self, kernel_size, stride): + super(CropMaxUnpool2d, self).__init__() + self.unpool = nn.MaxUnpool2d(kernel_size, stride) + + def forward(self, f_maps, indices, padding=None): + output = self.unpool(f_maps, indices) + if padding is not None: + x1 = padding[2] + y1 = padding[0] + output = output[:, :, x1::, y1::] + + return output diff --git a/clinicadl/network/pythae/nn/networks/__init__.py b/clinicadl/network/pythae/nn/networks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/nn/networks/cnn.py b/clinicadl/network/pythae/nn/networks/cnn.py new file mode 100644 index 000000000..caa1a53a1 --- /dev/null +++ b/clinicadl/network/pythae/nn/networks/cnn.py @@ -0,0 +1,394 @@ +from enum import Enum + +import numpy as np +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn +from torchvision.models.resnet import BasicBlock + +from clinicadl.network.pythae.nn.layers.factory import ConvLayer, NormLayer, PoolLayer +from clinicadl.network.pythae.nn.utils.resnet import ResNetDesigner, model_urls + +# from clinicadl.network.pythae.nn.utils.resnet3D import ResNetDesigner3D +# from clinicadl.network.pythae.nn.utils.SECNN import SECNNDesigner3D +# from clinicadl.network.sub_network import CNN, CNN_SSDA + + +class ImplementedCNN(str, Enum): + Conv5_FC3 = "Conv5_FC3" + Conv4_FC3 = "Conv4_FC3" + Stride_Conv5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented networks are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +class Conv5_FC3(nn.Module): + """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[1] + + conv = ConvLayer(dim) + norm = PoolLayer("PadMaxPool", dim=dim) + pool = NormLayer("BatchNorm", dim=dim) + + self.convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1), + norm(8), + nn.ReLU(), + pool(2, 2), + conv(8, 16, 3, padding=1), + norm(16), + nn.ReLU(), + pool(2, 2), + conv(16, 32, 3, padding=1), + norm(32), + nn.ReLU(), + pool(2, 2), + conv(32, 64, 3, padding=1), + norm(64), + nn.ReLU(), + pool(2, 2), + conv(64, 128, 3, padding=1), + norm(128), + nn.ReLU(), + pool(2, 2), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = self.convolutions(input_tensor).shape + + self.fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + + def forward(self, x): + x = self.convolutions(x) + return self.fc(x) + + +class Conv4_FC3(nn.Module): + """A Convolutional Neural Network with 4 convolution and 3 fully-connected layers.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[1] + + conv = ConvLayer(dim) + norm = PoolLayer("PadMaxPool", dim=dim) + pool = NormLayer("BatchNorm", dim=dim) + + self.convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1), + norm(8), + nn.ReLU(), + pool(2, 2), + conv(8, 16, 3, padding=1), + norm(16), + nn.ReLU(), + pool(2, 2), + conv(16, 32, 3, padding=1), + norm(32), + nn.ReLU(), + pool(2, 2), + conv(32, 64, 3, padding=1), + norm(64), + nn.ReLU(), + pool(2, 2), + conv(64, 128, 3, padding=1), + norm(128), + nn.ReLU(), + pool(2, 2), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = self.convolutions(input_tensor).shape + + self.fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 50), + nn.ReLU(), + nn.Linear(50, 40), + nn.ReLU(), + nn.Linear(40, output_size), + ) + + def forward(self, x): + x = self.convolutions(x) + return self.fc(x) + + +class Stride_Conv5_FC3(nn.Module): + """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers and a stride of 2 for each convolutional layer.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[1] + + conv = ConvLayer(dim) + norm = PoolLayer("PadMaxPool", dim=dim) + + self.convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1, stride=2), + norm(8), + nn.ReLU(), + conv(8, 16, 3, padding=1, stride=2), + norm(16), + nn.ReLU(), + conv(16, 32, 3, padding=1, stride=2), + norm(32), + nn.ReLU(), + conv(32, 64, 3, padding=1, stride=2), + norm(64), + nn.ReLU(), + conv(64, 128, 3, padding=1, stride=2), + norm(128), + nn.ReLU(), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = self.convolutions(input_tensor).shape + + self.fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + + def forward(self, x): + x = self.convolutions(x) + return self.fc(x) + + +class resnet18(nn.Module): + """ + ResNet-18 is a neural network that is 18 layers deep based on residual block. + It uses skip connections or shortcuts to jump over some layers. + It is an image classification pre-trained model. + The model input has 3 channels in RGB order. + + Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. + https://arxiv.org/abs/1512.03385?context=cs + """ + + def __init__(self, input_size, output_size, dropout): + model = ResNetDesigner(input_size, BasicBlock, [2, 2, 2, 2]) + model.load_state_dict(model_zoo.load_url(model_urls["resnet18"])) + + self.convolutions = nn.Sequential( + model.conv1, + model.bn1, + model.relu, + model.maxpool, + model.layer1, + model.layer2, + model.layer3, + model.layer4, + model.avgpool, + ) + + # add a fc layer on top of the transfer_learning model and a softmax classifier + self.fc = nn.Sequential(nn.Flatten(), model.fc) + self.fc.add_module("drop_out", nn.Dropout(p=dropout)) + self.fc.add_module("fc_out", nn.Linear(1000, output_size)) + + def forward(self, x): + x = self.convolutions(x) + return self.fc(x) + + +# TODO : check the following networks # + +# class ResNet3D(nn.Module): +# """ +# ResNet3D is a 3D neural network composed of 5 residual blocks. Each residual block +# is compose of 3D convolutions followed by a batch normalization and an activation function. +# It uses skip connections or shortcuts to jump over some layers. It's a 3D version of the +# original implementation of Kaiming He et al. + +# Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. +# https://arxiv.org/abs/1512.03385?context=cs +# """ + +# def __init__(self, input_size, dropout, output_size=1): +# model = ResNetDesigner3D(input_size) + +# self.convolutions = nn.Sequential( +# model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 +# ) + +# self.fc = model.fc + +# def forward(self, x): +# x = self.convolutions(x) +# return self.fc(x) + + +# class SqueezeExcitationCNN(CNN): +# """ +# SE-CNN is a combination of a ResNet-101 with Squeeze and Excitation blocks which was successfully +# tested on brain tumour classification by Ghosal et al. 2019. SE blocks are composed of a squeeze +# and an excitation step. The squeeze operation is obtained through an average pooling layer and +# provides a global understanding of each channel. + +# The excitation part consists of a two-layer feed-forward network that outputs a vector of n values +# corresponding to the weights of each channel of the feature maps. + +# Reference: Ghosal et al. Brain Tumor Classification Using ResNet-101 Based Squeeze and Excitation Deep Neural Network +# https://ieeexplore.ieee.org/document/8882973 + +# """ + +# def __init__( +# self, input_size=[1, 169, 208, 179], gpu=True, output_size=2, dropout=0.5 +# ): +# model = SECNNDesigner3D() + +# convolutions = nn.Sequential( +# model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 +# ) + +# fc = model.fc + +# super().__init__( +# convolutions=convolutions, +# fc=fc, +# n_classes=output_size, +# gpu=gpu, +# ) + +# @staticmethod +# def get_input_size(): +# return "1@169x207x179" + +# @staticmethod +# def get_dimension(): +# return "3D" + +# @staticmethod +# def get_task(): +# return ["classification"] + + +# class Conv5_FC3_SSDA(CNN_SSDA): +# """ +# Reduce the 2D or 3D input image to an array of size output_size. +# """ + +# def __init__(self, input_size, gpu=True, output_size=2, dropout=0.5): +# conv, norm, pool = get_layers_fn(input_size) +# # fmt: off +# convolutions = nn.Sequential( +# conv(input_size[0], 8, 3, padding=1), +# norm(8), +# nn.ReLU(), +# pool(2, 2), + +# conv(8, 16, 3, padding=1), +# norm(16), +# nn.ReLU(), +# pool(2, 2), + +# conv(16, 32, 3, padding=1), +# norm(32), +# nn.ReLU(), +# pool(2, 2), + +# conv(32, 64, 3, padding=1), +# norm(64), +# nn.ReLU(), +# pool(2, 2), + +# conv(64, 128, 3, padding=1), +# norm(128), +# nn.ReLU(), +# pool(2, 2), + +# # conv(128, 256, 3, padding=1), +# # norm(256), +# # nn.ReLU(), +# # pool(2, 2), +# ) + +# # Compute the size of the first FC layer +# input_tensor = torch.zeros(input_size).unsqueeze(0) +# output_convolutions = convolutions(input_tensor) + +# fc_class_source = nn.Sequential( +# nn.Flatten(), +# nn.Dropout(p=dropout), + +# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), +# nn.ReLU(), + +# nn.Linear(1300, 50), +# nn.ReLU(), + +# nn.Linear(50, output_size) +# ) + + +# fc_class_target= nn.Sequential( +# nn.Flatten(), +# nn.Dropout(p=dropout), + +# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), +# nn.ReLU(), + +# nn.Linear(1300, 50), +# nn.ReLU(), + +# nn.Linear(50, output_size) +# ) + +# fc_domain = nn.Sequential( +# nn.Flatten(), +# nn.Dropout(p=dropout), + +# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), +# nn.ReLU(), + +# nn.Linear(1300, 50), +# nn.ReLU(), + +# nn.Linear(50, output_size) +# ) +# # fmt: on +# super().__init__( +# convolutions=convolutions, +# fc_class_source=fc_class_source, +# fc_class_target=fc_class_target, +# fc_domain=fc_domain, +# n_classes=output_size, +# gpu=gpu, +# ) + +# @staticmethod +# def get_input_size(): +# return "1@128x128" + +# @staticmethod +# def get_dimension(): +# return "2D or 3D" + +# @staticmethod +# def get_task(): +# return ["classification", "regression"] diff --git a/clinicadl/network/pythae/nn/utils/SECNN.py b/clinicadl/network/pythae/nn/utils/SECNN.py new file mode 100644 index 000000000..4e34cd1ed --- /dev/null +++ b/clinicadl/network/pythae/nn/utils/SECNN.py @@ -0,0 +1,143 @@ +import torch +import torch.nn as nn + + +class Flatten(nn.Module): + def forward(self, input): + return input.view(input.size(0), -1) + + +class SE_Blocks(nn.Module): + def __init__(self, num_channels, ratio_channel): + super(SE_Blocks, self).__init__() + self.num_channels = num_channels + self.avg_pooling_3D = nn.AdaptiveAvgPool3d(1) + num_channels_reduced = num_channels // ratio_channel + self.fc1 = nn.Linear(num_channels, num_channels_reduced) + self.fc2 = nn.Linear(num_channels_reduced, num_channels) + self.act1 = nn.ReLU() + self.act2 = nn.Sigmoid() + + def forward(self, input_tensor): + """ + Parameters + ---------- + input_tensor: pt tensor + X, shape = (batch_size, num_channels, D, H, W) + + Returns + ------- + output_tensor: pt tensor + """ + batch_size, num_channels, D, H, W = input_tensor.size() + # Average along each channel + squeeze_tensor = self.avg_pooling_3D(input_tensor) + + # channel excitation + fc_out_1 = self.act1(self.fc1(squeeze_tensor.view(batch_size, num_channels))) + fc_out_2 = self.act2(self.fc2(fc_out_1)) + + output_tensor = torch.mul( + input_tensor, fc_out_2.view(batch_size, num_channels, 1, 1, 1) + ) + + return output_tensor + + +class ResBlock_SE(nn.Module): + def __init__(self, block_number, input_size, num_channels, ration_channel=8): + super(ResBlock_SE, self).__init__() + + layer_in = input_size if input_size is not None else 2 ** (block_number + 1) + layer_out = 2 ** (block_number + 2) + + self.conv1 = nn.Conv3d( + layer_in, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn1 = nn.BatchNorm3d(layer_out) + self.act1 = nn.ReLU() + + self.conv2 = nn.Conv3d( + layer_out, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn2 = nn.BatchNorm3d(layer_out) + + self.se_block = SE_Blocks(num_channels, ration_channel) + + # shortcut + self.shortcut = nn.Sequential( + nn.Conv3d( + layer_in, layer_out, kernel_size=1, stride=1, padding=0, bias=False + ) + ) + + self.act2 = nn.ReLU() + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.se_block(out) + out += self.shortcut(x) + out = self.act2(out) + return out + + +class SECNNDesigner3D(nn.Module): + def __init__(self, input_size=[1, 169, 208, 179]): + super(SECNNDesigner3D, self).__init__() + + assert ( + len(input_size) == 4 + ), "input must be in 3d with the corresponding number of channels" + + self.layer0 = self._make_block(1, 8, 8, input_size[0]) + self.layer1 = self._make_block(2, 16) + self.layer2 = self._make_block(3, 32) + self.layer3 = self._make_block(4, 64) + self.layer4 = self._make_block(5, 128) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + out = self.layer0(input_tensor) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) + self.fc = nn.Sequential( + Flatten(), + nn.Dropout(p=0.5), + nn.Linear(128 * d * h * w, 256), # t1 image + nn.ReLU(), + nn.Linear(256, 2), + ) + + for layer in self.fc: + out = layer(out) + + def _make_block( + self, block_number, num_channels, ration_channel=8, input_size=None + ): + return nn.Sequential( + ResBlock_SE(block_number, input_size, num_channels, ration_channel), + nn.MaxPool3d(3, stride=2), + ) + + def _maxpool_output_size( + self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 + ): + import math + + d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) + h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) + w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) + + if nb_layers == 1: + return d, h, w + return self._maxpool_output_size( + (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 + ) diff --git a/clinicadl/network/pythae/nn/utils/__init__.py b/clinicadl/network/pythae/nn/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/nn/utils/resnet.py b/clinicadl/network/pythae/nn/utils/resnet.py new file mode 100644 index 000000000..27f04fd38 --- /dev/null +++ b/clinicadl/network/pythae/nn/utils/resnet.py @@ -0,0 +1,63 @@ +import math + +import torch +from torch import nn + +model_urls = {"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth"} + + +class ResNetDesigner(nn.Module): + def __init__(self, input_size, block, layers, num_classes=1000): + self.inplanes = 64 + super(ResNetDesigner, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + + # Compute avgpool size + input_tensor = torch.zeros(input_size).unsqueeze(0) + out = self.conv1(input_tensor) + out = self.relu(self.bn1(out)) + out = self.maxpool(out) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + self.avgpool = nn.AvgPool2d((out.size(2), out.size(3)), stride=1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2.0 / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + ), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) diff --git a/clinicadl/network/pythae/nn/utils/resnet3D.py b/clinicadl/network/pythae/nn/utils/resnet3D.py new file mode 100644 index 000000000..5e3fd7baf --- /dev/null +++ b/clinicadl/network/pythae/nn/utils/resnet3D.py @@ -0,0 +1,100 @@ +import torch +import torch.nn as nn + + +class Flatten(nn.Module): + def forward(self, input): + return input.view(input.size(0), -1) + + +class ResBlock(nn.Module): + def __init__(self, block_number, input_size): + super(ResBlock, self).__init__() + + layer_in = input_size if input_size is not None else 2 ** (block_number + 1) + layer_out = 2 ** (block_number + 2) + + self.conv1 = nn.Conv3d( + layer_in, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn1 = nn.BatchNorm3d(layer_out) + self.act1 = nn.ELU() + + self.conv2 = nn.Conv3d( + layer_out, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn2 = nn.BatchNorm3d(layer_out) + + # shortcut + self.shortcut = nn.Sequential( + nn.Conv3d( + layer_in, layer_out, kernel_size=1, stride=1, padding=0, bias=False + ) + ) + + self.act2 = nn.ELU() + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.bn2(out) + out += self.shortcut(x) + out = self.act2(out) + return out + + +class ResNetDesigner3D(nn.Module): + def __init__(self, input_size=[1, 169, 208, 179]): + super(ResNetDesigner3D, self).__init__() + + assert ( + len(input_size) == 4 + ), "Input must be in 3D with the corresponding number of channels." + + self.layer0 = self._make_block(1, input_size[0]) + self.layer1 = self._make_block(2) + self.layer2 = self._make_block(3) + self.layer3 = self._make_block(4) + self.layer4 = self._make_block(5) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + out = self.layer0(input_tensor) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) + self.fc = nn.Sequential( + Flatten(), + nn.Linear(128 * d * h * w, 256), # t1 image + nn.ELU(), + nn.Dropout(p=0.8), + nn.Linear(256, 2), + ) + + for layer in self.fc: + out = layer(out) + + def _make_block(self, block_number, input_size=None): + return nn.Sequential( + ResBlock(block_number, input_size), nn.MaxPool3d(3, stride=2) + ) + + def _maxpool_output_size( + self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 + ): + import math + + d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) + h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) + w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) + + if nb_layers == 1: + return d, h, w + return self._maxpool_output_size( + (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 + ) diff --git a/clinicadl/network/pythae/utils.py b/clinicadl/network/pythae/utils.py new file mode 100644 index 000000000..15662c4b9 --- /dev/null +++ b/clinicadl/network/pythae/utils.py @@ -0,0 +1,27 @@ +from abc import ABC, abstractmethod +from collections import OrderedDict +from typing import Tuple, Union + +import torch +import torch.nn as nn +from pydantic.dataclasses import dataclass + + +class PythaeModelOuput(ABC): + loss: torch.Tensor + + +class PythaeModel(ABC): + model_config: dataclass + encoder: nn.Module + decoder: nn.Module + + @abstractmethod + def loss_function( + self, recon_x: torch.Tensor, x: torch.Tensor + ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: + pass + + @abstractmethod + def forward(self, inputs: OrderedDict, **kwargs) -> PythaeModelOuput: + pass diff --git a/clinicadl/network/pythae/vae/__init__.py b/clinicadl/network/pythae/vae/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/vae/ae_config.py b/clinicadl/network/pythae/vae/ae_config.py new file mode 100644 index 000000000..a91447d43 --- /dev/null +++ b/clinicadl/network/pythae/vae/ae_config.py @@ -0,0 +1,18 @@ +from pydantic import PositiveFloat, PositiveInt + +from clinicadl.network.pythae import ModelConfig +from clinicadl.utils.enum import Normalization, ReconstructionLoss + +from .ae_utils import AENetworks + + +class AEConfig(ModelConfig): + network: AENetworks = AENetworks.AE_Conv5_FC3 + loss: ReconstructionLoss = ReconstructionLoss.MSELoss + latent_space_size: PositiveInt = 128 + feature_size: PositiveInt = 1024 + n_conv: PositiveInt = 4 + io_layer_channels: PositiveInt = 8 + recons_weight: PositiveFloat = 1.0 + kl_weight: PositiveFloat = 1.0 + normalization: Normalization = Normalization.BATCH diff --git a/clinicadl/network/pythae/vae/vae_model.py b/clinicadl/network/pythae/vae/vae_model.py new file mode 100644 index 000000000..dde81d20f --- /dev/null +++ b/clinicadl/network/pythae/vae/vae_model.py @@ -0,0 +1,52 @@ +from typing import Callable, Type, TypeVar + +import torch +import torch.nn as nn + +from clinicadl.network.pythae import ClinicaDLModel +from clinicadl.network.pythae.utils import PythaeModel + +from .ae_config import AEConfig +from .ae_utils import PythaeAEWrapper + +T = TypeVar("T", bound="VAE") + + +class VAE(ClinicaDLModel): + def __init__( + self, + encoder: nn.Module, + decoder: nn.Module, + reconstruction_loss: Callable[ + [torch.Tensor, torch.Tensor], torch.Tensor + ] = nn.MSELoss(), + ) -> None: + super().__init__() + self.encoder = encoder + self.decoder = decoder + self.reconstruction_loss = reconstruction_loss + + @classmethod + def from_config(cls: Type[T], config: AEConfig) -> T: + pass + + @staticmethod + def from_pythae(model: PythaeModel) -> PythaeAEWrapper: + return PythaeAEWrapper(model) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.reconstruct(self.embed(x)) + + def embed(self, x: torch.Tensor) -> torch.Tensor: + return self.encoder(x) + + def reconstruct(self, x: torch.Tensor) -> torch.Tensor: + return self.decoder(x) + + def training_step(self, x: torch.Tensor) -> torch.Tensor: + recon_x = self.forward(x) + loss = self.compute_loss(recon_x, x) + return loss + + def compute_loss(self, recon_x: torch.Tensor, x: torch.Tensor) -> torch.Tensor: + return self.reconstruction_loss(recon_x, x) diff --git a/clinicadl/utils/enum.py b/clinicadl/utils/enum.py index 813836e08..9885021e4 100644 --- a/clinicadl/utils/enum.py +++ b/clinicadl/utils/enum.py @@ -1,6 +1,17 @@ from enum import Enum +class BaseEnum(Enum): + """Base Enum object that will print valid inputs if the value passed is not valid.""" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not a valid {cls.__name__}. Valid ones are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + class Task(str, Enum): """Tasks that can be performed in ClinicaDL.""" From e0275520c77728f01f4090b7fc787b103fdd1b26 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Wed, 12 Jun 2024 10:35:33 +0200 Subject: [PATCH 28/43] add otrch interface in loss --- clinicadl/losses/__init__.py | 15 --------------- clinicadl/losses/torch_interface.py | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 15 deletions(-) create mode 100644 clinicadl/losses/torch_interface.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py index 14d3963f0..e69de29bb 100644 --- a/clinicadl/losses/__init__.py +++ b/clinicadl/losses/__init__.py @@ -1,15 +0,0 @@ -from enum import Enum - - -class ImplementedLosses(str, Enum): - Conv5_FC3 = "Conv5_FC3" - Conv4_FC3 = "Conv4_FC3" - Stride_Conv5_FC3 = "Stride_Conv5_FC3" - RESNET = "resnet18" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented networks are: " - + ", ".join([repr(m.value) for m in cls]) - ) diff --git a/clinicadl/losses/torch_interface.py b/clinicadl/losses/torch_interface.py new file mode 100644 index 000000000..14d3963f0 --- /dev/null +++ b/clinicadl/losses/torch_interface.py @@ -0,0 +1,15 @@ +from enum import Enum + + +class ImplementedLosses(str, Enum): + Conv5_FC3 = "Conv5_FC3" + Conv4_FC3 = "Conv4_FC3" + Stride_Conv5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented networks are: " + + ", ".join([repr(m.value) for m in cls]) + ) From 17ea4c8b971da1b646d4bb6b46b615a9a3d1befb Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:24:34 +0200 Subject: [PATCH 29/43] modify nn --- clinicadl/losses/__init__.py | 1 + clinicadl/losses/factory.py | 60 ++++++ clinicadl/losses/torch_interface.py | 15 -- clinicadl/network/pythae/__init__.py | 1 - .../network/pythae/nn/blocks/__init__.py | 2 + clinicadl/network/pythae/nn/blocks/decoder.py | 185 ++++++++++++++++++ clinicadl/network/pythae/nn/blocks/encoder.py | 169 ++++++++++++++++ .../network/pythae/nn/layers/__init__.py | 5 + .../pythae/nn/layers/factory/__init__.py | 6 +- .../network/pythae/nn/layers/factory/conv.py | 38 ++-- .../network/pythae/nn/layers/factory/norm.py | 107 +++++++--- .../network/pythae/nn/layers/factory/pool.py | 76 ++++--- clinicadl/network/pythae/nn/layers/flatten.py | 6 + .../network/pythae/nn/layers/unflatten.py | 26 +++ clinicadl/network/pythae/nn/networks/ae.py | 0 clinicadl/network/pythae/nn/networks/cnn.py | 61 ++++-- clinicadl/network/pythae/nn/networks/utils.py | 124 ++++++++++++ clinicadl/network/pythae/nn/networks/vae.py | 1 + tests/unittests/losses/test_factory.py | 10 + .../network/pythae/nn/blocks/test_decoder.py | 59 ++++++ .../network/pythae/nn/blocks/test_encoder.py | 46 +++++ .../nn/layers/factory/test_factories.py | 27 +++ .../network/pythae/nn/layers/test_layers.py | 43 ++++ .../network/pythae/nn/networks/test_cnn.py | 32 +++ 24 files changed, 992 insertions(+), 108 deletions(-) create mode 100644 clinicadl/losses/factory.py delete mode 100644 clinicadl/losses/torch_interface.py create mode 100644 clinicadl/network/pythae/nn/blocks/__init__.py create mode 100644 clinicadl/network/pythae/nn/blocks/decoder.py create mode 100644 clinicadl/network/pythae/nn/blocks/encoder.py create mode 100644 clinicadl/network/pythae/nn/layers/flatten.py create mode 100644 clinicadl/network/pythae/nn/layers/unflatten.py create mode 100644 clinicadl/network/pythae/nn/networks/ae.py create mode 100644 clinicadl/network/pythae/nn/networks/utils.py create mode 100644 clinicadl/network/pythae/nn/networks/vae.py create mode 100644 tests/unittests/losses/test_factory.py create mode 100644 tests/unittests/network/pythae/nn/blocks/test_decoder.py create mode 100644 tests/unittests/network/pythae/nn/blocks/test_encoder.py create mode 100644 tests/unittests/network/pythae/nn/layers/factory/test_factories.py create mode 100644 tests/unittests/network/pythae/nn/layers/test_layers.py create mode 100644 tests/unittests/network/pythae/nn/networks/test_cnn.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py index e69de29bb..4bd74d41e 100644 --- a/clinicadl/losses/__init__.py +++ b/clinicadl/losses/__init__.py @@ -0,0 +1 @@ +from .factory import ImplementedLoss, get_loss_function diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py new file mode 100644 index 000000000..a6e192b30 --- /dev/null +++ b/clinicadl/losses/factory.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING, Type, Union + +if TYPE_CHECKING: + import torch.nn as nn + + +class ClassificationLoss(str, Enum): + """Losses that can be used only for classification.""" + + CrossENTROPY = "CrossEntropyLoss" + MultiMargin = "MultiMarginLoss" + + +class ImplementedLoss(str, Enum): + """Implemented losses in ClinicaDL.""" + + CrossENTROPY = "CrossEntropyLoss" + MultiMargin = "MultiMarginLoss" + L1 = "L1Loss" + MSE = "MSELoss" + HUBER = "HuberLoss" + SmoothL1 = "SmoothL1Loss" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented losses are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +def get_loss_function(loss: Union[str, ImplementedLoss]) -> Type[nn.Module]: + """ + Factory function to get a loss function from its name. + + Parameters + ---------- + loss : Union[str, ImplementedLoss] + The name of the loss. + + Returns + ------- + Type[nn.Module] + The loss function object. + """ + import torch.nn as nn + + loss = ImplementedLoss(loss) + return getattr(nn, loss.value) + + +# TODO : what about them? +# "KLDivLoss", +# "BCEWithLogitsLoss", +# "VAEGaussianLoss", +# "VAEBernoulliLoss", +# "VAEContinuousBernoulliLoss", diff --git a/clinicadl/losses/torch_interface.py b/clinicadl/losses/torch_interface.py deleted file mode 100644 index 14d3963f0..000000000 --- a/clinicadl/losses/torch_interface.py +++ /dev/null @@ -1,15 +0,0 @@ -from enum import Enum - - -class ImplementedLosses(str, Enum): - Conv5_FC3 = "Conv5_FC3" - Conv4_FC3 = "Conv4_FC3" - Stride_Conv5_FC3 = "Stride_Conv5_FC3" - RESNET = "resnet18" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented networks are: " - + ", ".join([repr(m.value) for m in cls]) - ) diff --git a/clinicadl/network/pythae/__init__.py b/clinicadl/network/pythae/__init__.py index 42e5facd8..5d83d9556 100644 --- a/clinicadl/network/pythae/__init__.py +++ b/clinicadl/network/pythae/__init__.py @@ -1,3 +1,2 @@ from .base.base_config import ModelConfig from .base.base_model import ClinicaDLModel -from .base.base_utils import ModelOutput diff --git a/clinicadl/network/pythae/nn/blocks/__init__.py b/clinicadl/network/pythae/nn/blocks/__init__.py new file mode 100644 index 000000000..a8b1ab550 --- /dev/null +++ b/clinicadl/network/pythae/nn/blocks/__init__.py @@ -0,0 +1,2 @@ +from .decoder import Decoder2D, Decoder3D, VAE_Decoder2D +from .encoder import Encoder2D, Encoder3D, VAE_Encoder2D diff --git a/clinicadl/network/pythae/nn/blocks/decoder.py b/clinicadl/network/pythae/nn/blocks/decoder.py new file mode 100644 index 000000000..50c8c3242 --- /dev/null +++ b/clinicadl/network/pythae/nn/blocks/decoder.py @@ -0,0 +1,185 @@ +import torch.nn as nn +import torch.nn.functional as F + +from clinicadl.network.pythae.nn.layers import Unflatten2D, get_norm_layer + +__all__ = [ + "Decoder2D", + "Decoder3D", + "VAE_Decoder2D", +] + + +class Decoder2D(nn.Module): + """ + Class defining the decoder's part of the Autoencoder. + This layer is composed of one 2D transposed convolutional layer, + a batch normalization layer with a relu activation function. + """ + + def __init__( + self, + input_channels, + output_channels, + kernel_size=4, + stride=2, + padding=1, + output_padding=0, + normalization="BatchNorm", + ): + super(Decoder2D, self).__init__() + self.layer = nn.Sequential( + nn.ConvTranspose2d( + input_channels, + output_channels, + kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + bias=False, + ), + get_norm_layer(normalization, dim=2)(output_channels), + ) + + def forward(self, x): + x = F.relu(self.layer(x), inplace=True) + return x + + +class Decoder3D(nn.Module): + """ + Class defining the decoder's part of the Autoencoder. + This layer is composed of one 3D transposed convolutional layer, + a batch normalization layer with a relu activation function. + """ + + def __init__( + self, + input_channels, + output_channels, + kernel_size=4, + stride=2, + padding=1, + output_padding=0, + normalization="BatchNorm", + ): + super(Decoder3D, self).__init__() + self.layer = nn.Sequential( + nn.ConvTranspose3d( + input_channels, + output_channels, + kernel_size, + stride=stride, + padding=padding, + output_padding=output_padding, + bias=False, + ), + get_norm_layer(normalization, dim=3)(output_channels), + ) + + def forward(self, x): + x = F.relu(self.layer(x), inplace=True) + return x + + +class VAE_Decoder2D(nn.Module): + def __init__( + self, + input_shape, + latent_size, + n_conv=4, + last_layer_channels=32, + latent_dim=1, + feature_size=1024, + padding=None, + ): + """ + Feature size is the size of the vector if latent_dim=1 + or is the W/H of the output channels if laten_dim=2 + """ + super(VAE_Decoder2D, self).__init__() + + self.input_c = input_shape[0] + self.input_h = input_shape[1] + self.input_w = input_shape[2] + + if not padding: + output_padding = [[0, 0] for _ in range(n_conv)] + else: + output_padding = padding + + self.layers = [] + + if latent_dim == 1: + n_pix = ( + last_layer_channels + * 2 ** (n_conv - 1) + * (self.input_h // (2**n_conv)) + * (self.input_w // (2**n_conv)) + ) + self.layers.append( + nn.Sequential( + nn.Linear(latent_size, feature_size), + nn.ReLU(), + nn.Linear(feature_size, n_pix), + nn.ReLU(), + Unflatten2D( + last_layer_channels * 2 ** (n_conv - 1), + self.input_h // (2**n_conv), + self.input_w // (2**n_conv), + ), + nn.ReLU(), + ) + ) + elif latent_dim == 2: + self.layers.append( + nn.Sequential( + nn.ConvTranspose2d( + latent_size, feature_size, 3, stride=1, padding=1, bias=False + ), + nn.ReLU(), + nn.ConvTranspose2d( + feature_size, + last_layer_channels * 2 ** (n_conv - 1), + 3, + stride=1, + padding=1, + bias=False, + ), + nn.ReLU(), + ) + ) + else: + raise AttributeError( + "Bad latent dimension specified. Latent dimension must be 1 or 2" + ) + + for i in range(n_conv - 1, 0, -1): + self.layers.append( + Decoder2D( + last_layer_channels * 2 ** (i), + last_layer_channels * 2 ** (i - 1), + output_padding=output_padding[i], + ) + ) + + self.layers.append( + nn.Sequential( + nn.ConvTranspose2d( + last_layer_channels, + self.input_c, + 4, + stride=2, + padding=1, + output_padding=output_padding[0], + bias=False, + ), + nn.Sigmoid(), + ) + ) + + self.sequential = nn.Sequential(*self.layers) + + def forward(self, z): + y = self.sequential(z) + return y diff --git a/clinicadl/network/pythae/nn/blocks/encoder.py b/clinicadl/network/pythae/nn/blocks/encoder.py new file mode 100644 index 000000000..729bbfbe1 --- /dev/null +++ b/clinicadl/network/pythae/nn/blocks/encoder.py @@ -0,0 +1,169 @@ +import torch.nn as nn +import torch.nn.functional as F + +from clinicadl.network.pythae.nn.layers import Flatten, get_norm_layer + +__all__ = [ + "Encoder2D", + "Encoder3D", + "VAE_Encoder2D", +] + + +class Encoder2D(nn.Module): + """ + Class defining the encoder's part of the Autoencoder. + This layer is composed of one 2D convolutional layer, + a batch normalization layer with a leaky relu + activation function. + """ + + def __init__( + self, + input_channels, + output_channels, + kernel_size=4, + stride=2, + padding=1, + normalization="BatchNorm", + ): + super(Encoder2D, self).__init__() + self.layer = nn.Sequential( + nn.Conv2d( + input_channels, + output_channels, + kernel_size, + stride=stride, + padding=padding, + bias=False, + ), + get_norm_layer(normalization, dim=2)( + output_channels + ), # TODO : will raise an error if GroupNorm + ) + + def forward(self, x): + x = F.leaky_relu(self.layer(x), negative_slope=0.2, inplace=True) + return x + + +class Encoder3D(nn.Module): + """ + Class defining the encoder's part of the Autoencoder. + This layer is composed of one 3D convolutional layer, + a batch normalization layer with a leaky relu + activation function. + """ + + def __init__( + self, + input_channels, + output_channels, + kernel_size=4, + stride=2, + padding=1, + normalization="BatchNorm", + ): + super(Encoder3D, self).__init__() + self.layer = nn.Sequential( + nn.Conv3d( + input_channels, + output_channels, + kernel_size, + stride=stride, + padding=padding, + bias=False, + ), + get_norm_layer(normalization, dim=3)(output_channels), + ) + + def forward(self, x): + x = F.leaky_relu(self.layer(x), negative_slope=0.2, inplace=True) + return x + + +class VAE_Encoder2D(nn.Module): + def __init__( + self, + input_shape, + n_conv=4, + first_layer_channels=32, + latent_dim=1, + feature_size=1024, + ): + """ + Feature size is the size of the vector if latent_dim=1 + or is the number of feature maps (number of channels) if latent_dim=2 + """ + super(VAE_Encoder2D, self).__init__() + + self.input_c = input_shape[0] + self.input_h = input_shape[1] + self.input_w = input_shape[2] + + decoder_padding = [] + tensor_h, tensor_w = self.input_h, self.input_w + + self.layers = [] + + # Input Layer + self.layers.append(Encoder2D(self.input_c, first_layer_channels)) + padding_h, padding_w = 0, 0 + if tensor_h % 2 != 0: + padding_h = 1 + if tensor_w % 2 != 0: + padding_w = 1 + decoder_padding.append([padding_h, padding_w]) + tensor_h, tensor_w = tensor_h // 2, tensor_w // 2 + # Conv Layers + for i in range(n_conv - 1): + self.layers.append( + Encoder2D( + first_layer_channels * 2**i, first_layer_channels * 2 ** (i + 1) + ) + ) + padding_h, padding_w = 0, 0 + if tensor_h % 2 != 0: + padding_h = 1 + if tensor_w % 2 != 0: + padding_w = 1 + decoder_padding.append([padding_h, padding_w]) + tensor_h, tensor_w = tensor_h // 2, tensor_w // 2 + + self.decoder_padding = decoder_padding + + # Final Layer + if latent_dim == 1: + n_pix = ( + first_layer_channels + * 2 ** (n_conv - 1) + * (self.input_h // (2**n_conv)) + * (self.input_w // (2**n_conv)) + ) + self.layers.append( + nn.Sequential(Flatten(), nn.Linear(n_pix, feature_size), nn.ReLU()) + ) + elif latent_dim == 2: + self.layers.append( + nn.Sequential( + nn.Conv2d( + first_layer_channels * 2 ** (n_conv - 1), + feature_size, + 3, + stride=1, + padding=1, + bias=False, + ), + nn.ReLU(), + ) + ) + else: + raise AttributeError( + "Bad latent dimension specified. Latent dimension must be 1 or 2" + ) + + self.sequential = nn.Sequential(*self.layers) + + def forward(self, x): + z = self.sequential(x) + return z diff --git a/clinicadl/network/pythae/nn/layers/__init__.py b/clinicadl/network/pythae/nn/layers/__init__.py index e69de29bb..b27ccb5a2 100644 --- a/clinicadl/network/pythae/nn/layers/__init__.py +++ b/clinicadl/network/pythae/nn/layers/__init__.py @@ -0,0 +1,5 @@ +from .factory import get_conv_layer, get_norm_layer, get_pool_layer +from .flatten import Flatten +from .pool import PadMaxPool2d, PadMaxPool3d +from .unflatten import Unflatten2D, Unflatten3D +from .unpool import CropMaxUnpool2d, CropMaxUnpool3d diff --git a/clinicadl/network/pythae/nn/layers/factory/__init__.py b/clinicadl/network/pythae/nn/layers/factory/__init__.py index 28423743d..55988c334 100644 --- a/clinicadl/network/pythae/nn/layers/factory/__init__.py +++ b/clinicadl/network/pythae/nn/layers/factory/__init__.py @@ -1,3 +1,3 @@ -from .conv import ConvLayer -from .norm import NormLayer -from .pool import PoolLayer +from .conv import get_conv_layer +from .norm import get_norm_layer +from .pool import get_pool_layer diff --git a/clinicadl/network/pythae/nn/layers/factory/conv.py b/clinicadl/network/pythae/nn/layers/factory/conv.py index 495f71c0e..3a789da61 100644 --- a/clinicadl/network/pythae/nn/layers/factory/conv.py +++ b/clinicadl/network/pythae/nn/layers/factory/conv.py @@ -1,26 +1,28 @@ -from typing import Type +from typing import Type, Union import torch.nn as nn -class ConvLayer: - """Factory object for creating Convolutional layers.""" +def get_conv_layer(dim: int) -> Union[Type[nn.Conv2d], Type[nn.Conv3d]]: + """ + A factory function for creating Convolutional layers. - def __new__(cls, dim: int) -> Type[nn.Module]: - """ - Creates a Convolutional layer. + Parameters + ---------- + dim : int + Dimension of the image. - Parameters - ---------- - dim : int - Dimension of the image. + Returns + ------- + Type[nn.Module] + The Convolutional layer. - Returns - ------- - Type[nn.Module] - The Convolutional layer. - """ - assert dim in {2, 3}, "Input dimension must be 2 or 3." + Raises + ------ + AssertionError + If dim is not 2 or 3. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." - layers = [nn.Conv2d, nn.Conv3d] - return layers[dim - 2] + layers = (nn.Conv2d, nn.Conv3d) + return layers[dim - 2] diff --git a/clinicadl/network/pythae/nn/layers/factory/norm.py b/clinicadl/network/pythae/nn/layers/factory/norm.py index dc6ff086b..a95022924 100644 --- a/clinicadl/network/pythae/nn/layers/factory/norm.py +++ b/clinicadl/network/pythae/nn/layers/factory/norm.py @@ -5,7 +5,7 @@ from clinicadl.utils.enum import BaseEnum -class Normalization(str, BaseEnum): +class Normalization(str, BaseEnum): # TODO : remove from global enum """Available normalization layers in ClinicaDL.""" BATCH = "BatchNorm" @@ -13,50 +13,93 @@ class Normalization(str, BaseEnum): INSTANCE = "InstanceNorm" -class NormLayer: - """Factory object for creating Normalization layers.""" - - def __new__( - cls, normalization: Union[str, Normalization], dim: int - ) -> Type[nn.Module]: - """ - Creates a Normalization layer. - - Parameters - ---------- - normalization : Normalization - Type of normalization. - dim : int - Dimension of the image. - - Returns - ------- - Type[nn.Module] - The normalization layer. - """ - assert dim in {2, 3}, "Input dimension must be 2 or 3." - normalization = Normalization(normalization) - - if normalization == Normalization.BATCH: - factory = _batch_norm_factory - elif normalization == Normalization.INSTANCE: - factory = _instance_norm_factory - elif normalization == Normalization.GROUP: - factory = _group_norm_factory - return factory(dim) +def get_norm_layer( + normalization: Union[str, Normalization], dim: int +) -> Type[nn.Module]: + """ + A factory function for creating Normalization layers. + + Parameters + ---------- + normalization : Normalization + Type of normalization. + dim : int + Dimension of the image. + + Returns + ------- + Type[nn.Module] + The normalization layer. + + Raises + ------ + AssertionError + If dim is not 2 or 3. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." + normalization = Normalization(normalization) + + if normalization == Normalization.BATCH: + factory = _batch_norm_factory + elif normalization == Normalization.INSTANCE: + factory = _instance_norm_factory + elif normalization == Normalization.GROUP: + factory = _group_norm_factory + return factory(dim) def _instance_norm_factory( dim: int, ) -> Union[Type[nn.InstanceNorm2d], Type[nn.InstanceNorm3d]]: + """ + A factory function for creating Instance Normalization layers. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Union[Type[nn.InstanceNorm2d], Type[nn.InstanceNorm3d]] + The normalization layer. + """ layers = (nn.InstanceNorm2d, nn.InstanceNorm3d) return layers[dim - 2] def _batch_norm_factory(dim: int) -> Union[Type[nn.BatchNorm2d], Type[nn.BatchNorm3d]]: + """ + A factory function for creating Batch Normalization layers. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Union[Type[nn.BatchNorm2d], Type[nn.BatchNorm3d]] + The normalization layer. + """ layers = (nn.BatchNorm2d, nn.BatchNorm3d) return layers[dim - 2] def _group_norm_factory(dim: int) -> Type[nn.GroupNorm]: + """ + A dummy function that returns a Group Normalization layer. + + To match other factory functions. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Type[nn.GroupNorm] + The normalization layer. + """ return nn.GroupNorm diff --git a/clinicadl/network/pythae/nn/layers/factory/pool.py b/clinicadl/network/pythae/nn/layers/factory/pool.py index f6396011d..b48cf7465 100644 --- a/clinicadl/network/pythae/nn/layers/factory/pool.py +++ b/clinicadl/network/pythae/nn/layers/factory/pool.py @@ -1,4 +1,4 @@ -from typing import Type, Union +from typing import TYPE_CHECKING, Type, Union import torch.nn as nn @@ -14,40 +14,68 @@ class Pooling(str, BaseEnum): PADMAX = "PadMaxPool" -class PoolLayer: - """Factory object for creating Pooling layers.""" +def get_pool_layer(pooling: Union[str, Pooling], dim: int) -> Type[nn.Module]: + """ + A factory object for creating Pooling layers. - def __new__(cls, pooling: Union[str, Pooling], dim: int) -> Type[nn.Module]: - """ - Creates a Pooling layer. + Parameters + ---------- + pooling : Pooling + Type of pooling. + dim : int + Dimension of the image. - Parameters - ---------- - pooling : Pooling - Type of pooling. - dim : int - Dimension of the image. + Returns + ------- + Type[nn.Module] + The pooling layer. - Returns - ------- - Type[nn.Module] - The normalization layer. - """ - assert dim in {2, 3}, "Input dimension must be 2 or 3." - pooling = Pooling(pooling) + Raises + ------ + AssertionError + If dim is not 2 or 3. + """ + assert dim in {2, 3}, "Input dimension must be 2 or 3." + pooling = Pooling(pooling) - if pooling == Pooling.MAX: - factory = _max_pool_factory - elif pooling == Pooling.PADMAX: - factory = _pad_max_pool_factory - return factory(dim) + if pooling == Pooling.MAX: + factory = _max_pool_factory + elif pooling == Pooling.PADMAX: + factory = _pad_max_pool_factory + return factory(dim) def _max_pool_factory(dim: int) -> Union[Type[nn.MaxPool2d], Type[nn.MaxPool3d]]: + """ + A factory object for creating Max Pooling layers. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Union[Type[nn.MaxPool2d], Type[nn.MaxPool3d]] + The pooling layer. + """ layers = (nn.MaxPool2d, nn.MaxPool3d) return layers[dim - 2] def _pad_max_pool_factory(dim: int) -> Union[Type[PadMaxPool2d], Type[PadMaxPool3d]]: + """ + A factory object for creating Pad-Max Pooling layers. + + Parameters + ---------- + dim : int + Dimension of the image. + + Returns + ------- + Union[Type[PadMaxPool2d], Type[PadMaxPool3d]] + The pooling layer. + """ layers = (PadMaxPool2d, PadMaxPool3d) return layers[dim - 2] diff --git a/clinicadl/network/pythae/nn/layers/flatten.py b/clinicadl/network/pythae/nn/layers/flatten.py new file mode 100644 index 000000000..d78f66318 --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/flatten.py @@ -0,0 +1,6 @@ +import torch.nn as nn + + +class Flatten(nn.Module): + def forward(self, input): + return input.view(input.size(0), -1) diff --git a/clinicadl/network/pythae/nn/layers/unflatten.py b/clinicadl/network/pythae/nn/layers/unflatten.py new file mode 100644 index 000000000..7515c35ab --- /dev/null +++ b/clinicadl/network/pythae/nn/layers/unflatten.py @@ -0,0 +1,26 @@ +import torch.nn as nn + + +class Unflatten2D(nn.Module): + def __init__(self, channel, height, width): + super(Unflatten2D, self).__init__() + self.channel = channel + self.height = height + self.width = width + + def forward(self, input): + return input.view(input.size(0), self.channel, self.height, self.width) + + +class Unflatten3D(nn.Module): + def __init__(self, channel, height, width, depth): + super(Unflatten3D, self).__init__() + self.channel = channel + self.height = height + self.width = width + self.depth = depth + + def forward(self, input): + return input.view( + input.size(0), self.channel, self.height, self.width, self.depth + ) diff --git a/clinicadl/network/pythae/nn/networks/ae.py b/clinicadl/network/pythae/nn/networks/ae.py new file mode 100644 index 000000000..e69de29bb diff --git a/clinicadl/network/pythae/nn/networks/cnn.py b/clinicadl/network/pythae/nn/networks/cnn.py index caa1a53a1..d0ec23934 100644 --- a/clinicadl/network/pythae/nn/networks/cnn.py +++ b/clinicadl/network/pythae/nn/networks/cnn.py @@ -1,20 +1,42 @@ -from enum import Enum - import numpy as np import torch import torch.utils.model_zoo as model_zoo from torch import nn from torchvision.models.resnet import BasicBlock -from clinicadl.network.pythae.nn.layers.factory import ConvLayer, NormLayer, PoolLayer +from clinicadl.network.pythae.nn.layers.factory import ( + get_conv_layer, + get_norm_layer, + get_pool_layer, +) from clinicadl.network.pythae.nn.utils.resnet import ResNetDesigner, model_urls +from clinicadl.utils.enum import BaseEnum # from clinicadl.network.pythae.nn.utils.resnet3D import ResNetDesigner3D # from clinicadl.network.pythae.nn.utils.SECNN import SECNNDesigner3D # from clinicadl.network.sub_network import CNN, CNN_SSDA -class ImplementedCNN(str, Enum): +class CNN2d(str, BaseEnum): + """Neural Networks compatible with 2D inputs.""" + + Conv5_FC3 = "Conv5_FC3" + Conv4_FC3 = "Conv4_FC3" + Stride_Conv5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + + +class CNN3d(str, BaseEnum): + """Neural Networks compatible with 3D inputs.""" + + Conv5_FC3 = "Conv5_FC3" + Conv4_FC3 = "Conv4_FC3" + Stride_Conv5_FC3 = "Stride_Conv5_FC3" + + +class ImplementedCNN(str, BaseEnum): + """Implemented Neural Networks in ClinicaDL.""" + Conv5_FC3 = "Conv5_FC3" Conv4_FC3 = "Conv4_FC3" Stride_Conv5_FC3 = "Stride_Conv5_FC3" @@ -28,16 +50,19 @@ def _missing_(cls, value): ) +# Networks # class Conv5_FC3(nn.Module): """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers.""" def __init__(self, input_size, output_size, dropout): + super().__init__() + dim = len(input_size) - 1 - in_channels = input_size[1] + in_channels = input_size[0] - conv = ConvLayer(dim) - norm = PoolLayer("PadMaxPool", dim=dim) - pool = NormLayer("BatchNorm", dim=dim) + conv = get_conv_layer(dim) + pool = get_pool_layer("PadMaxPool", dim=dim) + norm = get_norm_layer("BatchNorm", dim=dim) self.convolutions = nn.Sequential( conv(in_channels, 8, 3, padding=1), @@ -84,12 +109,14 @@ class Conv4_FC3(nn.Module): """A Convolutional Neural Network with 4 convolution and 3 fully-connected layers.""" def __init__(self, input_size, output_size, dropout): + super().__init__() + dim = len(input_size) - 1 - in_channels = input_size[1] + in_channels = input_size[0] - conv = ConvLayer(dim) - norm = PoolLayer("PadMaxPool", dim=dim) - pool = NormLayer("BatchNorm", dim=dim) + conv = get_conv_layer(dim) + pool = get_pool_layer("PadMaxPool", dim=dim) + norm = get_norm_layer("BatchNorm", dim=dim) self.convolutions = nn.Sequential( conv(in_channels, 8, 3, padding=1), @@ -136,11 +163,13 @@ class Stride_Conv5_FC3(nn.Module): """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers and a stride of 2 for each convolutional layer.""" def __init__(self, input_size, output_size, dropout): + super().__init__() + dim = len(input_size) - 1 - in_channels = input_size[1] + in_channels = input_size[0] - conv = ConvLayer(dim) - norm = PoolLayer("PadMaxPool", dim=dim) + conv = get_conv_layer(dim) + norm = get_norm_layer("BatchNorm", dim=dim) self.convolutions = nn.Sequential( conv(in_channels, 8, 3, padding=1, stride=2), @@ -190,6 +219,8 @@ class resnet18(nn.Module): """ def __init__(self, input_size, output_size, dropout): + super().__init__() + model = ResNetDesigner(input_size, BasicBlock, [2, 2, 2, 2]) model.load_state_dict(model_zoo.load_url(model_urls["resnet18"])) diff --git a/clinicadl/network/pythae/nn/networks/utils.py b/clinicadl/network/pythae/nn/networks/utils.py new file mode 100644 index 000000000..b1224f624 --- /dev/null +++ b/clinicadl/network/pythae/nn/networks/utils.py @@ -0,0 +1,124 @@ +from copy import deepcopy + +from torch import nn + +from clinicadl.network.pythae.nn.layers import ( + CropMaxUnpool2d, + CropMaxUnpool3d, + PadMaxPool2d, + PadMaxPool3d, + Reshape, +) + + +class CNN_Transformer(nn.Module): + def __init__(self, model=None): + """ + Construct an autoencoder from a given CNN. The encoder part corresponds to the convolutional part of the CNN. + + :param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable. + """ + from copy import deepcopy + + super(CNN_Transformer, self).__init__() + + self.level = 0 + + if model is not None: + self.encoder = deepcopy(model.convolutions) + self.decoder = self.construct_inv_layers(model) + + for i, layer in enumerate(self.encoder): + if isinstance(layer, PadMaxPool3d) or isinstance(layer, PadMaxPool2d): + self.encoder[i].set_new_return() + elif isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d): + self.encoder[i].return_indices = True + else: + self.encoder = nn.Sequential() + self.decoder = nn.Sequential() + + def __len__(self): + return len(self.encoder) + + def construct_inv_layers(self, model): + """ + Implements the decoder part from the CNN. The decoder part is the symmetrical list of the encoder + in which some layers are replaced by their transpose counterpart. + ConvTranspose and ReLU layers are inverted in the end. + + :param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable. + :return: (Module) decoder part of the Autoencoder + """ + inv_layers = [] + for i, layer in enumerate(self.encoder): + if isinstance(layer, nn.Conv3d): + inv_layers.append( + nn.ConvTranspose3d( + layer.out_channels, + layer.in_channels, + layer.kernel_size, + stride=layer.stride, + padding=layer.padding, + ) + ) + self.level += 1 + elif isinstance(layer, nn.Conv2d): + inv_layers.append( + nn.ConvTranspose2d( + layer.out_channels, + layer.in_channels, + layer.kernel_size, + stride=layer.stride, + padding=layer.padding, + ) + ) + self.level += 1 + elif isinstance(layer, PadMaxPool3d): + inv_layers.append( + CropMaxUnpool3d(layer.kernel_size, stride=layer.stride) + ) + elif isinstance(layer, PadMaxPool2d): + inv_layers.append( + CropMaxUnpool2d(layer.kernel_size, stride=layer.stride) + ) + elif isinstance(layer, nn.Linear): + inv_layers.append(nn.Linear(layer.out_features, layer.in_features)) + elif isinstance(layer, nn.Flatten): + inv_layers.append(Reshape(model.flattened_shape)) + elif isinstance(layer, nn.LeakyReLU): + inv_layers.append(nn.LeakyReLU(negative_slope=1 / layer.negative_slope)) + else: + inv_layers.append(deepcopy(layer)) + inv_layers = self.replace_relu(inv_layers) + inv_layers.reverse() + return nn.Sequential(*inv_layers) + + @staticmethod + def replace_relu(inv_layers): + """ + Invert convolutional and ReLU layers (give empirical better results) + + :param inv_layers: (list) list of the layers of decoder part of the Auto-Encoder + :return: (list) the layers with the inversion + """ + idx_relu, idx_conv = -1, -1 + for idx, layer in enumerate(inv_layers): + if isinstance(layer, nn.ConvTranspose3d): + idx_conv = idx + elif isinstance(layer, nn.ReLU) or isinstance(layer, nn.LeakyReLU): + idx_relu = idx + + if idx_conv != -1 and idx_relu != -1: + inv_layers[idx_relu], inv_layers[idx_conv] = ( + inv_layers[idx_conv], + inv_layers[idx_relu], + ) + idx_conv, idx_relu = -1, -1 + + # Check if number of features of batch normalization layers is still correct + for idx, layer in enumerate(inv_layers): + if isinstance(layer, nn.BatchNorm3d): + conv = inv_layers[idx + 1] + inv_layers[idx] = nn.BatchNorm3d(conv.out_channels) + + return inv_layers diff --git a/clinicadl/network/pythae/nn/networks/vae.py b/clinicadl/network/pythae/nn/networks/vae.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/clinicadl/network/pythae/nn/networks/vae.py @@ -0,0 +1 @@ + diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py new file mode 100644 index 000000000..43df2831c --- /dev/null +++ b/tests/unittests/losses/test_factory.py @@ -0,0 +1,10 @@ +import pytest + + +def test_get_loss_function(): + from clinicadl.losses import ImplementedLoss, get_loss_function + + for loss in [e.value for e in ImplementedLoss]: + get_loss_function(loss) + with pytest.raises(ValueError): + get_loss_function("abc") diff --git a/tests/unittests/network/pythae/nn/blocks/test_decoder.py b/tests/unittests/network/pythae/nn/blocks/test_decoder.py new file mode 100644 index 000000000..dc9c05e4a --- /dev/null +++ b/tests/unittests/network/pythae/nn/blocks/test_decoder.py @@ -0,0 +1,59 @@ +import pytest +import torch + +import clinicadl.network.pythae.nn.blocks.decoder as decoder + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 1, 10, 10) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 10, 10, 10) + + +@pytest.fixture +def latent_vector(): + return torch.randn(2, 3) + + +@pytest.fixture(params=["latent_vector", "input_2d"]) +def to_decode(request): + return request.getfixturevalue(request.param) + + +def test_decoder2d(input_2d): + network = decoder.Decoder2D( + input_channels=input_2d.shape[1], output_channels=(input_2d.shape[1] + 3) + ) + output_2d = network(input_2d) + assert output_2d.shape[1] == input_2d.shape[1] + 3 + assert len(output_2d.shape) == 4 + + +def test_vae_decoder2d(to_decode): + latent_dim = 1 if len(to_decode.shape) == 2 else 2 + + network = decoder.VAE_Decoder2D( + input_shape=(1, 5, 5), + latent_size=to_decode.shape[1], + n_conv=1, + last_layer_channels=2, + latent_dim=latent_dim, + feature_size=4, + ) + output_2d = network(to_decode) + assert len(output_2d.shape) == 4 + assert output_2d.shape[0] == 2 + assert output_2d.shape[1] == 1 + + +def test_decoder3d(input_3d): + network = decoder.Decoder3D( + input_channels=input_3d.shape[1], output_channels=(input_3d.shape[1] + 3) + ) + output_3d = network(input_3d) + assert output_3d.shape[1] == input_3d.shape[1] + 3 + assert len(output_3d.shape) == 5 diff --git a/tests/unittests/network/pythae/nn/blocks/test_encoder.py b/tests/unittests/network/pythae/nn/blocks/test_encoder.py new file mode 100644 index 000000000..34574e8fa --- /dev/null +++ b/tests/unittests/network/pythae/nn/blocks/test_encoder.py @@ -0,0 +1,46 @@ +import pytest +import torch + +import clinicadl.network.pythae.nn.blocks.encoder as encoder + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 1, 10, 10) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 10, 10, 10) + + +def test_encoder2d(input_2d): + network = encoder.Encoder2D( + input_channels=input_2d.shape[1], output_channels=(input_2d.shape[1] + 3) + ) + output_2d = network(input_2d) + assert output_2d.shape[1] == input_2d.shape[1] + 3 + assert len(output_2d.shape) == 4 + + +@pytest.mark.parametrize("latent_dim", [1, 2]) +def test_vae_encoder2d(latent_dim, input_2d): + network = encoder.VAE_Encoder2D( + input_shape=(1, 10, 10), + n_conv=1, + first_layer_channels=4, + latent_dim=latent_dim, + feature_size=4, + ) + output = network(input_2d) + assert output.shape[0] == 2 + assert len(output.shape) == 2 if latent_dim == 1 else 4 + + +def test_encoder3d(input_3d): + network = encoder.Encoder3D( + input_channels=input_3d.shape[1], output_channels=(input_3d.shape[1] + 3) + ) + output_3d = network(input_3d) + assert output_3d.shape[1] == input_3d.shape[1] + 3 + assert len(output_3d.shape) == 5 diff --git a/tests/unittests/network/pythae/nn/layers/factory/test_factories.py b/tests/unittests/network/pythae/nn/layers/factory/test_factories.py new file mode 100644 index 000000000..40eea7b14 --- /dev/null +++ b/tests/unittests/network/pythae/nn/layers/factory/test_factories.py @@ -0,0 +1,27 @@ +import pytest +import torch.nn as nn + + +def test_get_conv_layer(): + from clinicadl.network.pythae.nn.layers.factory import get_conv_layer + + assert get_conv_layer(2) == nn.Conv2d + assert get_conv_layer(3) == nn.Conv3d + with pytest.raises(AssertionError): + get_conv_layer(1) + + +def test_get_conv_layer(): + from clinicadl.network.pythae.nn.layers.factory import get_norm_layer + + assert get_norm_layer("InstanceNorm", 2) == nn.InstanceNorm2d + assert get_norm_layer("BatchNorm", 3) == nn.BatchNorm3d + assert get_norm_layer("GroupNorm", 3) == nn.GroupNorm + + +def test_get_conv_layer(): + from clinicadl.network.pythae.nn.layers import PadMaxPool3d + from clinicadl.network.pythae.nn.layers.factory import get_pool_layer + + assert get_pool_layer("MaxPool", 2) == nn.MaxPool2d + assert get_pool_layer("PadMaxPool", 3) == PadMaxPool3d diff --git a/tests/unittests/network/pythae/nn/layers/test_layers.py b/tests/unittests/network/pythae/nn/layers/test_layers.py new file mode 100644 index 000000000..dcea8a65c --- /dev/null +++ b/tests/unittests/network/pythae/nn/layers/test_layers.py @@ -0,0 +1,43 @@ +import pytest +import torch + +import clinicadl.network.pythae.nn.layers as layers + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 1, 5, 5) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 5, 5, 5) + + +def test_pool_layers(input_2d, input_3d): # TODO : test unpool + output_3d = layers.PadMaxPool3d(kernel_size=2, stride=1)(input_3d) + output_2d = layers.PadMaxPool2d(kernel_size=2, stride=1)(input_2d) + + assert len(output_3d.shape) == 5 # TODO : test more precisely + assert output_3d.shape[0] == 2 + assert len(output_2d.shape) == 4 + assert output_2d.shape[0] == 2 + + +def test_flatten_layers(input_2d, input_3d): + output_3d = layers.Flatten()(input_3d) + output_2d = layers.Flatten()(input_2d) + + assert output_3d.shape == torch.Size((2, 1 * 5 * 5 * 5)) + assert output_2d.shape == torch.Size((2, 1 * 5 * 5)) + + +def test_unflatten_layers(): + flattened_2d = torch.randn(2, 1 * 5 * 4) + flattened_3d = torch.randn(2, 1 * 5 * 4 * 3) + + output_3d = layers.Unflatten3D(channel=1, height=5, width=4, depth=3)(flattened_3d) + output_2d = layers.Unflatten2D(channel=1, height=5, width=4)(flattened_2d) + + assert output_3d.shape == torch.Size((2, 1, 5, 4, 3)) + assert output_2d.shape == torch.Size((2, 1, 5, 4)) diff --git a/tests/unittests/network/pythae/nn/networks/test_cnn.py b/tests/unittests/network/pythae/nn/networks/test_cnn.py new file mode 100644 index 000000000..ff48c9a3a --- /dev/null +++ b/tests/unittests/network/pythae/nn/networks/test_cnn.py @@ -0,0 +1,32 @@ +import pytest +import torch + +import clinicadl.network.pythae.nn.networks.cnn as cnn + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 1, 50, 100) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 100, 100, 100) + + +@pytest.mark.parametrize("network", [net.value for net in cnn.CNN2d]) +def test_2d_cnn(network, input_2d): + network = getattr(cnn, network)( + input_size=input_2d.shape[1:], output_size=3, dropout=0.5 + ) + output_2d = network(input_2d) + assert output_2d.shape == (2, 3) + + +@pytest.mark.parametrize("network", [net.value for net in cnn.CNN3d]) +def test_3d_cnn(network, input_3d): + network = getattr(cnn, network)( + input_size=input_3d.shape[1:], output_size=1, dropout=0.5 + ) + output_2d = network(input_3d) + assert output_2d.shape == (2, 1) From c646edd88ee852d667b3ca81c96faaa4f90d3335 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:11:29 +0200 Subject: [PATCH 30/43] add nn module --- clinicadl/network/pythae/__init__.py | 2 - clinicadl/network/pythae/ae/ae_config.py | 18 - clinicadl/network/pythae/ae/ae_model.py | 52 -- clinicadl/network/pythae/ae/ae_utils.py | 40 -- clinicadl/network/pythae/ae/builtin_models.py | 106 ---- clinicadl/network/pythae/base/__init__.py | 2 - clinicadl/network/pythae/base/base_config.py | 34 -- clinicadl/network/pythae/base/base_model.py | 111 ---- clinicadl/network/pythae/cnn/__init__.py | 0 clinicadl/network/pythae/cnn/cnn_config.py | 13 - clinicadl/network/pythae/cnn/cnn_model.py | 40 -- clinicadl/network/pythae/nn/__init__.py | 0 .../network/pythae/nn/blocks/__init__.py | 2 - clinicadl/network/pythae/nn/layers/flatten.py | 6 - .../network/pythae/nn/networks/__init__.py | 0 clinicadl/network/pythae/nn/networks/ae.py | 0 clinicadl/network/pythae/nn/networks/cnn.py | 425 ------------- clinicadl/network/pythae/nn/networks/utils.py | 124 ---- clinicadl/network/pythae/nn/networks/vae.py | 1 - clinicadl/network/pythae/nn/utils/__init__.py | 0 clinicadl/network/pythae/nn/utils/resnet3D.py | 100 ---- clinicadl/network/pythae/utils.py | 27 - clinicadl/network/pythae/vae/__init__.py | 0 clinicadl/network/pythae/vae/ae_config.py | 18 - clinicadl/network/pythae/vae/vae_model.py | 52 -- .../{network/pythae/ae => nn}/__init__.py | 0 clinicadl/nn/blocks/__init__.py | 5 + .../{network/pythae => }/nn/blocks/decoder.py | 2 +- .../{network/pythae => }/nn/blocks/encoder.py | 4 +- clinicadl/nn/blocks/residual.py | 40 ++ .../nn/utils/SECNN.py => nn/blocks/se.py} | 71 +-- clinicadl/nn/blocks/unet.py | 71 +++ .../pythae => }/nn/layers/__init__.py | 4 +- .../pythae => }/nn/layers/factory/__init__.py | 0 .../pythae => }/nn/layers/factory/conv.py | 0 .../pythae => }/nn/layers/factory/norm.py | 0 .../pythae => }/nn/layers/factory/pool.py | 0 .../{network/pythae => }/nn/layers/pool.py | 0 clinicadl/nn/layers/reverse.py | 30 + .../pythae => }/nn/layers/unflatten.py | 9 + .../{network/pythae => }/nn/layers/unpool.py | 0 clinicadl/nn/networks/__init__.py | 21 + clinicadl/nn/networks/ae.py | 147 +++++ clinicadl/nn/networks/cnn.py | 288 +++++++++ clinicadl/nn/networks/factory/__init__.py | 3 + clinicadl/nn/networks/factory/ae.py | 142 +++++ .../utils => nn/networks/factory}/resnet.py | 56 ++ clinicadl/nn/networks/factory/secnn.py | 61 ++ clinicadl/nn/networks/random.py | 222 +++++++ clinicadl/nn/networks/ssda.py | 111 ++++ clinicadl/nn/networks/unet.py | 39 ++ clinicadl/nn/networks/vae.py | 566 ++++++++++++++++++ clinicadl/nn/utils.py | 74 +++ 53 files changed, 1895 insertions(+), 1244 deletions(-) delete mode 100644 clinicadl/network/pythae/__init__.py delete mode 100644 clinicadl/network/pythae/ae/ae_config.py delete mode 100644 clinicadl/network/pythae/ae/ae_model.py delete mode 100644 clinicadl/network/pythae/ae/ae_utils.py delete mode 100644 clinicadl/network/pythae/ae/builtin_models.py delete mode 100644 clinicadl/network/pythae/base/__init__.py delete mode 100644 clinicadl/network/pythae/base/base_config.py delete mode 100644 clinicadl/network/pythae/base/base_model.py delete mode 100644 clinicadl/network/pythae/cnn/__init__.py delete mode 100644 clinicadl/network/pythae/cnn/cnn_config.py delete mode 100644 clinicadl/network/pythae/cnn/cnn_model.py delete mode 100644 clinicadl/network/pythae/nn/__init__.py delete mode 100644 clinicadl/network/pythae/nn/blocks/__init__.py delete mode 100644 clinicadl/network/pythae/nn/layers/flatten.py delete mode 100644 clinicadl/network/pythae/nn/networks/__init__.py delete mode 100644 clinicadl/network/pythae/nn/networks/ae.py delete mode 100644 clinicadl/network/pythae/nn/networks/cnn.py delete mode 100644 clinicadl/network/pythae/nn/networks/utils.py delete mode 100644 clinicadl/network/pythae/nn/networks/vae.py delete mode 100644 clinicadl/network/pythae/nn/utils/__init__.py delete mode 100644 clinicadl/network/pythae/nn/utils/resnet3D.py delete mode 100644 clinicadl/network/pythae/utils.py delete mode 100644 clinicadl/network/pythae/vae/__init__.py delete mode 100644 clinicadl/network/pythae/vae/ae_config.py delete mode 100644 clinicadl/network/pythae/vae/vae_model.py rename clinicadl/{network/pythae/ae => nn}/__init__.py (100%) create mode 100644 clinicadl/nn/blocks/__init__.py rename clinicadl/{network/pythae => }/nn/blocks/decoder.py (98%) rename clinicadl/{network/pythae => }/nn/blocks/encoder.py (96%) create mode 100644 clinicadl/nn/blocks/residual.py rename clinicadl/{network/pythae/nn/utils/SECNN.py => nn/blocks/se.py} (51%) create mode 100644 clinicadl/nn/blocks/unet.py rename clinicadl/{network/pythae => }/nn/layers/__init__.py (63%) rename clinicadl/{network/pythae => }/nn/layers/factory/__init__.py (100%) rename clinicadl/{network/pythae => }/nn/layers/factory/conv.py (100%) rename clinicadl/{network/pythae => }/nn/layers/factory/norm.py (100%) rename clinicadl/{network/pythae => }/nn/layers/factory/pool.py (100%) rename clinicadl/{network/pythae => }/nn/layers/pool.py (100%) create mode 100644 clinicadl/nn/layers/reverse.py rename clinicadl/{network/pythae => }/nn/layers/unflatten.py (76%) rename clinicadl/{network/pythae => }/nn/layers/unpool.py (100%) create mode 100644 clinicadl/nn/networks/__init__.py create mode 100644 clinicadl/nn/networks/ae.py create mode 100644 clinicadl/nn/networks/cnn.py create mode 100644 clinicadl/nn/networks/factory/__init__.py create mode 100644 clinicadl/nn/networks/factory/ae.py rename clinicadl/{network/pythae/nn/utils => nn/networks/factory}/resnet.py (56%) create mode 100644 clinicadl/nn/networks/factory/secnn.py create mode 100644 clinicadl/nn/networks/random.py create mode 100644 clinicadl/nn/networks/ssda.py create mode 100644 clinicadl/nn/networks/unet.py create mode 100644 clinicadl/nn/networks/vae.py create mode 100644 clinicadl/nn/utils.py diff --git a/clinicadl/network/pythae/__init__.py b/clinicadl/network/pythae/__init__.py deleted file mode 100644 index 5d83d9556..000000000 --- a/clinicadl/network/pythae/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .base.base_config import ModelConfig -from .base.base_model import ClinicaDLModel diff --git a/clinicadl/network/pythae/ae/ae_config.py b/clinicadl/network/pythae/ae/ae_config.py deleted file mode 100644 index a91447d43..000000000 --- a/clinicadl/network/pythae/ae/ae_config.py +++ /dev/null @@ -1,18 +0,0 @@ -from pydantic import PositiveFloat, PositiveInt - -from clinicadl.network.pythae import ModelConfig -from clinicadl.utils.enum import Normalization, ReconstructionLoss - -from .ae_utils import AENetworks - - -class AEConfig(ModelConfig): - network: AENetworks = AENetworks.AE_Conv5_FC3 - loss: ReconstructionLoss = ReconstructionLoss.MSELoss - latent_space_size: PositiveInt = 128 - feature_size: PositiveInt = 1024 - n_conv: PositiveInt = 4 - io_layer_channels: PositiveInt = 8 - recons_weight: PositiveFloat = 1.0 - kl_weight: PositiveFloat = 1.0 - normalization: Normalization = Normalization.BATCH diff --git a/clinicadl/network/pythae/ae/ae_model.py b/clinicadl/network/pythae/ae/ae_model.py deleted file mode 100644 index cb96bf76e..000000000 --- a/clinicadl/network/pythae/ae/ae_model.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Callable, Type, TypeVar - -import torch -import torch.nn as nn - -from clinicadl.network.pythae import ClinicaDLModel -from clinicadl.network.pythae.utils import PythaeModel - -from .ae_config import AEConfig -from .ae_utils import PythaeAEWrapper - -T = TypeVar("T", bound="AE") - - -class AE(ClinicaDLModel): - def __init__( - self, - encoder: nn.Module, - decoder: nn.Module, - reconstruction_loss: Callable[ - [torch.Tensor, torch.Tensor], torch.Tensor - ] = nn.MSELoss(), - ) -> None: - super().__init__() - self.encoder = encoder - self.decoder = decoder - self.reconstruction_loss = reconstruction_loss - - @classmethod - def from_config(cls: Type[T], config: AEConfig) -> T: - pass - - @staticmethod - def from_pythae(model: PythaeModel) -> PythaeAEWrapper: - return PythaeAEWrapper(model) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.reconstruct(self.embed(x)) - - def embed(self, x: torch.Tensor) -> torch.Tensor: - return self.encoder(x) - - def reconstruct(self, x: torch.Tensor) -> torch.Tensor: - return self.decoder(x) - - def training_step(self, x: torch.Tensor) -> torch.Tensor: - recon_x = self.forward(x) - loss = self.compute_loss(recon_x, x) - return loss - - def compute_loss(self, recon_x: torch.Tensor, x: torch.Tensor) -> torch.Tensor: - return self.reconstruction_loss(recon_x, x) diff --git a/clinicadl/network/pythae/ae/ae_utils.py b/clinicadl/network/pythae/ae/ae_utils.py deleted file mode 100644 index 14501882b..000000000 --- a/clinicadl/network/pythae/ae/ae_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -from enum import Enum - -import torch - -from clinicadl.network.pythae import ClinicaDLModel -from clinicadl.network.pythae.utils import PythaeModel - - -class AENetworks(str, Enum): - AE_Conv5_FC3 = "AE_Conv5_FC3" - AE_Conv4_FC3 = "AE_Conv4_FC3" - CAE_half = "CAE_half" - - -class PythaeAEWrapper(ClinicaDLModel): - def __init__(self, model: PythaeModel): - super().__init__() - self.pythae_model = model - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.reconstruct(self.embed(x)) - - def embed(self, x: torch.Tensor) -> torch.Tensor: - return self.pythae_model.encoder(x) - - def reconstruct(self, x: torch.Tensor) -> torch.Tensor: - return self.pythae_model.decoder(x) - - def training_step(self, x: torch.Tensor) -> torch.Tensor: - inputs = {"data": x} - loss = self.pythae_model.forward(inputs).loss - return loss - - def compute_loss( - self, recon_x: torch.Tensor, x: torch.Tensor, **kwargs - ) -> torch.Tensor: - loss = self.pythae_model.loss_function(recon_x, x, **kwargs) - if isinstance(loss, tuple): - return loss[0] - return loss diff --git a/clinicadl/network/pythae/ae/builtin_models.py b/clinicadl/network/pythae/ae/builtin_models.py deleted file mode 100644 index 316fed0a4..000000000 --- a/clinicadl/network/pythae/ae/builtin_models.py +++ /dev/null @@ -1,106 +0,0 @@ -from torch import nn - -from clinicadl.network.autoencoder.cnn_transformer import CNN_Transformer -from clinicadl.network.cnn.models import Conv4_FC3, Conv5_FC3, resnet18 -from clinicadl.network.sub_network import AutoEncoder -from clinicadl.network.vae.vae_layers import ( - DecoderLayer3D, - EncoderLayer3D, - Flatten, - Unflatten3D, -) - -from .ae_model import AE - - -class AE_Conv5_FC3(AE): - """ - Autoencoder derived from the convolutional part of CNN Conv5_FC3. - """ - - def __init__(self, input_size, gpu=True): - # fmt: off - cnn_model = Conv5_FC3(input_size=input_size, gpu=gpu) - autoencoder = CNN_Transformer(cnn_model) - # fmt: on - super().__init__( - encoder=autoencoder.encoder, decoder=autoencoder.decoder, gpu=gpu - ) - - @staticmethod - def get_input_size(): - return "1@128x128" - - @staticmethod - def get_dimension(): - return "2D" - - @staticmethod - def get_task(): - return ["reconstruction"] - - -class AE_Conv4_FC3(AutoEncoder): - """ - Autoencoder derived from the convolutional part of CNN Conv4_FC3. - """ - - def __init__(self, input_size, gpu=True): - # fmt: off - cnn_model = Conv4_FC3(input_size=input_size, gpu=gpu) - autoencoder = CNN_Transformer(cnn_model) - # fmt: on - super().__init__( - encoder=autoencoder.encoder, decoder=autoencoder.decoder, gpu=gpu - ) - - @staticmethod - def get_input_size(): - return "1@128x128" - - @staticmethod - def get_dimension(): - return "2D" - - @staticmethod - def get_task(): - return ["reconstruction"] - - -class CAE_half(AutoEncoder): - """ - 3D Autoencoder derived from CVAE - """ - - def __init__(self, input_size, latent_space_size, gpu=True): - # fmt: off - self.encoder = nn.Sequential( - EncoderLayer3D(1, 32, kernel_size=3), - EncoderLayer3D(32, 64, kernel_size=3), - EncoderLayer3D(64, 128, kernel_size=3), - Flatten(), - nn.Linear(153600, latent_space_size) - ) - self.decoder = nn.Sequential( - nn.Linear(latent_space_size, 307200), - Unflatten3D(256, 10, 12, 10), - DecoderLayer3D(256, 128, kernel_size=3), - DecoderLayer3D(128, 64, kernel_size=3), - DecoderLayer3D(64, 1, kernel_size=3) - ) - # fmt: on - super(CAE_half, self).__init__( - encoder=self.encoder, decoder=self.decoder, gpu=gpu - ) - - @staticmethod - def get_input_size(): - return "1@dxhxw" - - @staticmethod - def get_dimension(): - return "3D" - - @staticmethod - def get_task(): - return ["reconstruction"] diff --git a/clinicadl/network/pythae/base/__init__.py b/clinicadl/network/pythae/base/__init__.py deleted file mode 100644 index 0b911ccee..000000000 --- a/clinicadl/network/pythae/base/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .base_config import ModelConfig -from .base_model import ClinicaDLModel diff --git a/clinicadl/network/pythae/base/base_config.py b/clinicadl/network/pythae/base/base_config.py deleted file mode 100644 index a4595f95d..000000000 --- a/clinicadl/network/pythae/base/base_config.py +++ /dev/null @@ -1,34 +0,0 @@ -from abc import ABC, abstractmethod - -from pydantic import ( - BaseModel, - ConfigDict, - NonNegativeFloat, - field_validator, - model_validator, -) - - -class ModelConfig(ABC, BaseModel): - """ - Abstract base config class for ClinicaDL Models. - - network and loss are specific to the type of models - (e.g. CNN or AE) and must be specified in subclasses. - """ - - network: str - dropout: NonNegativeFloat = 0.0 - loss: str - # pydantic config - model_config = ConfigDict( - validate_assignment=True, validate_return=True, validate_default=True - ) - - @field_validator("dropout") - @classmethod - def validator_dropout(cls, v): - assert ( - 0 <= v <= 1 - ), f"dropout must be between 0 and 1 but it has been set to {v}." - return v diff --git a/clinicadl/network/pythae/base/base_model.py b/clinicadl/network/pythae/base/base_model.py deleted file mode 100644 index e0bb768b1..000000000 --- a/clinicadl/network/pythae/base/base_model.py +++ /dev/null @@ -1,111 +0,0 @@ -from abc import ABC, abstractmethod -from pathlib import Path -from typing import Tuple, Type, TypeVar, Union - -import torch -import torch.nn as nn - -from .base_config import ModelConfig - -T = TypeVar("T", bound="ClinicaDLModel") - - -class ClinicaDLModel(ABC, nn.Module): - """Abstract template for ClinicaDL Models.""" - - def __init__(self) -> None: - super().__init__() - - @classmethod - @abstractmethod - def from_config(cls: Type[T], config: ModelConfig) -> T: - """ - Creates a ClinicaDL Model from a config class. - - Parameters - ---------- - config : ModelConfig - The config class. - - Returns - ------- - ClinicaDLModel - The ClinicaDL Model. - """ - pass - - @abstractmethod - def forward(self, x: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: - """ - Pass forward in the network. - - Parameters - ---------- - x : torch.Tensor - Input data. - - Returns - ------- - Union[torch.Tensor, Tuple[torch.Tensor, ...]] - The output. Either a PyTorch tensor (e.g. output of a CNN) or a tuple of tensors - (e.g. embedding and output of an AutoEncoder). - """ - pass - - @abstractmethod - def training_step(self, x: torch.Tensor) -> torch.Tensor: - """ - Pass forward and loss computation. - - Parameters - ---------- - x : torch.Tensor - The batch. - - Returns - ------- - torch.Tensor - The loss on which backpropagation will be applied. A 1-item tensor. - """ - pass - - @abstractmethod - def predict(self, x: torch.Tensor) -> torch.Tensor: - """ - Makes predictions. - - Parameters - ---------- - x : torch.Tensor - The input data. - - Returns - ------- - torch.Tensor - The predictions. - """ - pass - - @abstractmethod - def save_weights(self, path: Path) -> None: - """ - Saves network weights. - - Parameters - ---------- - path : Path - The file where the weights will be stored. - """ - pass - - @abstractmethod - def load_weights(self, path: Path) -> None: - """ - Loads network weights. - - Parameters - ---------- - path : Path - The file where the weights are stored. - """ - pass diff --git a/clinicadl/network/pythae/cnn/__init__.py b/clinicadl/network/pythae/cnn/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/cnn/cnn_config.py b/clinicadl/network/pythae/cnn/cnn_config.py deleted file mode 100644 index 15cbe83e5..000000000 --- a/clinicadl/network/pythae/cnn/cnn_config.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Tuple - -from pydantic import NonNegativeInt - -from clinicadl.network.pythae.base import ModelConfig -from clinicadl.network.pythae.nn.networks.cnn import ImplementedCNN - - -class CNNConfig(ModelConfig): - network: ImplementedCNN = ImplementedCNN.Conv5_FC3 - loss: ClassificationLoss = ClassificationLoss.CrossEntropyLoss - input_size: Tuple[NonNegativeInt, ...] - output_size: NonNegativeInt = 1 diff --git a/clinicadl/network/pythae/cnn/cnn_model.py b/clinicadl/network/pythae/cnn/cnn_model.py deleted file mode 100644 index a51bf2cd9..000000000 --- a/clinicadl/network/pythae/cnn/cnn_model.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Callable, Type, TypeVar - -import torch -import torch.nn as nn - -from clinicadl.network.pythae import ClinicaDLModel -from clinicadl.network.pythae.utils import PythaeModel - -from .ae_config import AEConfig - -T = TypeVar("T", bound="AE") - - -class CNN(ClinicaDLModel): - def __init__( - self, - network: nn.Module, - loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], - ): - super().__init__() - self.network = network - self.loss = loss - - @classmethod - def from_config(cls: Type[T], config: AEConfig) -> T: - pass - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.network(x) - - def training_step(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - y_pred = self.forward(x) - loss = self.compute_loss(y_pred, y) - return loss - - def compute_loss(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - return self.loss(y_pred, y) - - def predict(self, x: torch.Tensor) -> torch.Tensor: - return self.forward(x) diff --git a/clinicadl/network/pythae/nn/__init__.py b/clinicadl/network/pythae/nn/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/nn/blocks/__init__.py b/clinicadl/network/pythae/nn/blocks/__init__.py deleted file mode 100644 index a8b1ab550..000000000 --- a/clinicadl/network/pythae/nn/blocks/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .decoder import Decoder2D, Decoder3D, VAE_Decoder2D -from .encoder import Encoder2D, Encoder3D, VAE_Encoder2D diff --git a/clinicadl/network/pythae/nn/layers/flatten.py b/clinicadl/network/pythae/nn/layers/flatten.py deleted file mode 100644 index d78f66318..000000000 --- a/clinicadl/network/pythae/nn/layers/flatten.py +++ /dev/null @@ -1,6 +0,0 @@ -import torch.nn as nn - - -class Flatten(nn.Module): - def forward(self, input): - return input.view(input.size(0), -1) diff --git a/clinicadl/network/pythae/nn/networks/__init__.py b/clinicadl/network/pythae/nn/networks/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/nn/networks/ae.py b/clinicadl/network/pythae/nn/networks/ae.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/nn/networks/cnn.py b/clinicadl/network/pythae/nn/networks/cnn.py deleted file mode 100644 index d0ec23934..000000000 --- a/clinicadl/network/pythae/nn/networks/cnn.py +++ /dev/null @@ -1,425 +0,0 @@ -import numpy as np -import torch -import torch.utils.model_zoo as model_zoo -from torch import nn -from torchvision.models.resnet import BasicBlock - -from clinicadl.network.pythae.nn.layers.factory import ( - get_conv_layer, - get_norm_layer, - get_pool_layer, -) -from clinicadl.network.pythae.nn.utils.resnet import ResNetDesigner, model_urls -from clinicadl.utils.enum import BaseEnum - -# from clinicadl.network.pythae.nn.utils.resnet3D import ResNetDesigner3D -# from clinicadl.network.pythae.nn.utils.SECNN import SECNNDesigner3D -# from clinicadl.network.sub_network import CNN, CNN_SSDA - - -class CNN2d(str, BaseEnum): - """Neural Networks compatible with 2D inputs.""" - - Conv5_FC3 = "Conv5_FC3" - Conv4_FC3 = "Conv4_FC3" - Stride_Conv5_FC3 = "Stride_Conv5_FC3" - RESNET = "resnet18" - - -class CNN3d(str, BaseEnum): - """Neural Networks compatible with 3D inputs.""" - - Conv5_FC3 = "Conv5_FC3" - Conv4_FC3 = "Conv4_FC3" - Stride_Conv5_FC3 = "Stride_Conv5_FC3" - - -class ImplementedCNN(str, BaseEnum): - """Implemented Neural Networks in ClinicaDL.""" - - Conv5_FC3 = "Conv5_FC3" - Conv4_FC3 = "Conv4_FC3" - Stride_Conv5_FC3 = "Stride_Conv5_FC3" - RESNET = "resnet18" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented networks are: " - + ", ".join([repr(m.value) for m in cls]) - ) - - -# Networks # -class Conv5_FC3(nn.Module): - """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers.""" - - def __init__(self, input_size, output_size, dropout): - super().__init__() - - dim = len(input_size) - 1 - in_channels = input_size[0] - - conv = get_conv_layer(dim) - pool = get_pool_layer("PadMaxPool", dim=dim) - norm = get_norm_layer("BatchNorm", dim=dim) - - self.convolutions = nn.Sequential( - conv(in_channels, 8, 3, padding=1), - norm(8), - nn.ReLU(), - pool(2, 2), - conv(8, 16, 3, padding=1), - norm(16), - nn.ReLU(), - pool(2, 2), - conv(16, 32, 3, padding=1), - norm(32), - nn.ReLU(), - pool(2, 2), - conv(32, 64, 3, padding=1), - norm(64), - nn.ReLU(), - pool(2, 2), - conv(64, 128, 3, padding=1), - norm(128), - nn.ReLU(), - pool(2, 2), - ) - - input_tensor = torch.zeros(input_size).unsqueeze(0) - output_shape = self.convolutions(input_tensor).shape - - self.fc = nn.Sequential( - nn.Flatten(), - nn.Dropout(p=dropout), - nn.Linear(np.prod(list(output_shape)).item(), 1300), - nn.ReLU(), - nn.Linear(1300, 50), - nn.ReLU(), - nn.Linear(50, output_size), - ) - - def forward(self, x): - x = self.convolutions(x) - return self.fc(x) - - -class Conv4_FC3(nn.Module): - """A Convolutional Neural Network with 4 convolution and 3 fully-connected layers.""" - - def __init__(self, input_size, output_size, dropout): - super().__init__() - - dim = len(input_size) - 1 - in_channels = input_size[0] - - conv = get_conv_layer(dim) - pool = get_pool_layer("PadMaxPool", dim=dim) - norm = get_norm_layer("BatchNorm", dim=dim) - - self.convolutions = nn.Sequential( - conv(in_channels, 8, 3, padding=1), - norm(8), - nn.ReLU(), - pool(2, 2), - conv(8, 16, 3, padding=1), - norm(16), - nn.ReLU(), - pool(2, 2), - conv(16, 32, 3, padding=1), - norm(32), - nn.ReLU(), - pool(2, 2), - conv(32, 64, 3, padding=1), - norm(64), - nn.ReLU(), - pool(2, 2), - conv(64, 128, 3, padding=1), - norm(128), - nn.ReLU(), - pool(2, 2), - ) - - input_tensor = torch.zeros(input_size).unsqueeze(0) - output_shape = self.convolutions(input_tensor).shape - - self.fc = nn.Sequential( - nn.Flatten(), - nn.Dropout(p=dropout), - nn.Linear(np.prod(list(output_shape)).item(), 50), - nn.ReLU(), - nn.Linear(50, 40), - nn.ReLU(), - nn.Linear(40, output_size), - ) - - def forward(self, x): - x = self.convolutions(x) - return self.fc(x) - - -class Stride_Conv5_FC3(nn.Module): - """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers and a stride of 2 for each convolutional layer.""" - - def __init__(self, input_size, output_size, dropout): - super().__init__() - - dim = len(input_size) - 1 - in_channels = input_size[0] - - conv = get_conv_layer(dim) - norm = get_norm_layer("BatchNorm", dim=dim) - - self.convolutions = nn.Sequential( - conv(in_channels, 8, 3, padding=1, stride=2), - norm(8), - nn.ReLU(), - conv(8, 16, 3, padding=1, stride=2), - norm(16), - nn.ReLU(), - conv(16, 32, 3, padding=1, stride=2), - norm(32), - nn.ReLU(), - conv(32, 64, 3, padding=1, stride=2), - norm(64), - nn.ReLU(), - conv(64, 128, 3, padding=1, stride=2), - norm(128), - nn.ReLU(), - ) - - input_tensor = torch.zeros(input_size).unsqueeze(0) - output_shape = self.convolutions(input_tensor).shape - - self.fc = nn.Sequential( - nn.Flatten(), - nn.Dropout(p=dropout), - nn.Linear(np.prod(list(output_shape)).item(), 1300), - nn.ReLU(), - nn.Linear(1300, 50), - nn.ReLU(), - nn.Linear(50, output_size), - ) - - def forward(self, x): - x = self.convolutions(x) - return self.fc(x) - - -class resnet18(nn.Module): - """ - ResNet-18 is a neural network that is 18 layers deep based on residual block. - It uses skip connections or shortcuts to jump over some layers. - It is an image classification pre-trained model. - The model input has 3 channels in RGB order. - - Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. - https://arxiv.org/abs/1512.03385?context=cs - """ - - def __init__(self, input_size, output_size, dropout): - super().__init__() - - model = ResNetDesigner(input_size, BasicBlock, [2, 2, 2, 2]) - model.load_state_dict(model_zoo.load_url(model_urls["resnet18"])) - - self.convolutions = nn.Sequential( - model.conv1, - model.bn1, - model.relu, - model.maxpool, - model.layer1, - model.layer2, - model.layer3, - model.layer4, - model.avgpool, - ) - - # add a fc layer on top of the transfer_learning model and a softmax classifier - self.fc = nn.Sequential(nn.Flatten(), model.fc) - self.fc.add_module("drop_out", nn.Dropout(p=dropout)) - self.fc.add_module("fc_out", nn.Linear(1000, output_size)) - - def forward(self, x): - x = self.convolutions(x) - return self.fc(x) - - -# TODO : check the following networks # - -# class ResNet3D(nn.Module): -# """ -# ResNet3D is a 3D neural network composed of 5 residual blocks. Each residual block -# is compose of 3D convolutions followed by a batch normalization and an activation function. -# It uses skip connections or shortcuts to jump over some layers. It's a 3D version of the -# original implementation of Kaiming He et al. - -# Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. -# https://arxiv.org/abs/1512.03385?context=cs -# """ - -# def __init__(self, input_size, dropout, output_size=1): -# model = ResNetDesigner3D(input_size) - -# self.convolutions = nn.Sequential( -# model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 -# ) - -# self.fc = model.fc - -# def forward(self, x): -# x = self.convolutions(x) -# return self.fc(x) - - -# class SqueezeExcitationCNN(CNN): -# """ -# SE-CNN is a combination of a ResNet-101 with Squeeze and Excitation blocks which was successfully -# tested on brain tumour classification by Ghosal et al. 2019. SE blocks are composed of a squeeze -# and an excitation step. The squeeze operation is obtained through an average pooling layer and -# provides a global understanding of each channel. - -# The excitation part consists of a two-layer feed-forward network that outputs a vector of n values -# corresponding to the weights of each channel of the feature maps. - -# Reference: Ghosal et al. Brain Tumor Classification Using ResNet-101 Based Squeeze and Excitation Deep Neural Network -# https://ieeexplore.ieee.org/document/8882973 - -# """ - -# def __init__( -# self, input_size=[1, 169, 208, 179], gpu=True, output_size=2, dropout=0.5 -# ): -# model = SECNNDesigner3D() - -# convolutions = nn.Sequential( -# model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 -# ) - -# fc = model.fc - -# super().__init__( -# convolutions=convolutions, -# fc=fc, -# n_classes=output_size, -# gpu=gpu, -# ) - -# @staticmethod -# def get_input_size(): -# return "1@169x207x179" - -# @staticmethod -# def get_dimension(): -# return "3D" - -# @staticmethod -# def get_task(): -# return ["classification"] - - -# class Conv5_FC3_SSDA(CNN_SSDA): -# """ -# Reduce the 2D or 3D input image to an array of size output_size. -# """ - -# def __init__(self, input_size, gpu=True, output_size=2, dropout=0.5): -# conv, norm, pool = get_layers_fn(input_size) -# # fmt: off -# convolutions = nn.Sequential( -# conv(input_size[0], 8, 3, padding=1), -# norm(8), -# nn.ReLU(), -# pool(2, 2), - -# conv(8, 16, 3, padding=1), -# norm(16), -# nn.ReLU(), -# pool(2, 2), - -# conv(16, 32, 3, padding=1), -# norm(32), -# nn.ReLU(), -# pool(2, 2), - -# conv(32, 64, 3, padding=1), -# norm(64), -# nn.ReLU(), -# pool(2, 2), - -# conv(64, 128, 3, padding=1), -# norm(128), -# nn.ReLU(), -# pool(2, 2), - -# # conv(128, 256, 3, padding=1), -# # norm(256), -# # nn.ReLU(), -# # pool(2, 2), -# ) - -# # Compute the size of the first FC layer -# input_tensor = torch.zeros(input_size).unsqueeze(0) -# output_convolutions = convolutions(input_tensor) - -# fc_class_source = nn.Sequential( -# nn.Flatten(), -# nn.Dropout(p=dropout), - -# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), -# nn.ReLU(), - -# nn.Linear(1300, 50), -# nn.ReLU(), - -# nn.Linear(50, output_size) -# ) - - -# fc_class_target= nn.Sequential( -# nn.Flatten(), -# nn.Dropout(p=dropout), - -# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), -# nn.ReLU(), - -# nn.Linear(1300, 50), -# nn.ReLU(), - -# nn.Linear(50, output_size) -# ) - -# fc_domain = nn.Sequential( -# nn.Flatten(), -# nn.Dropout(p=dropout), - -# nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), -# nn.ReLU(), - -# nn.Linear(1300, 50), -# nn.ReLU(), - -# nn.Linear(50, output_size) -# ) -# # fmt: on -# super().__init__( -# convolutions=convolutions, -# fc_class_source=fc_class_source, -# fc_class_target=fc_class_target, -# fc_domain=fc_domain, -# n_classes=output_size, -# gpu=gpu, -# ) - -# @staticmethod -# def get_input_size(): -# return "1@128x128" - -# @staticmethod -# def get_dimension(): -# return "2D or 3D" - -# @staticmethod -# def get_task(): -# return ["classification", "regression"] diff --git a/clinicadl/network/pythae/nn/networks/utils.py b/clinicadl/network/pythae/nn/networks/utils.py deleted file mode 100644 index b1224f624..000000000 --- a/clinicadl/network/pythae/nn/networks/utils.py +++ /dev/null @@ -1,124 +0,0 @@ -from copy import deepcopy - -from torch import nn - -from clinicadl.network.pythae.nn.layers import ( - CropMaxUnpool2d, - CropMaxUnpool3d, - PadMaxPool2d, - PadMaxPool3d, - Reshape, -) - - -class CNN_Transformer(nn.Module): - def __init__(self, model=None): - """ - Construct an autoencoder from a given CNN. The encoder part corresponds to the convolutional part of the CNN. - - :param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable. - """ - from copy import deepcopy - - super(CNN_Transformer, self).__init__() - - self.level = 0 - - if model is not None: - self.encoder = deepcopy(model.convolutions) - self.decoder = self.construct_inv_layers(model) - - for i, layer in enumerate(self.encoder): - if isinstance(layer, PadMaxPool3d) or isinstance(layer, PadMaxPool2d): - self.encoder[i].set_new_return() - elif isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d): - self.encoder[i].return_indices = True - else: - self.encoder = nn.Sequential() - self.decoder = nn.Sequential() - - def __len__(self): - return len(self.encoder) - - def construct_inv_layers(self, model): - """ - Implements the decoder part from the CNN. The decoder part is the symmetrical list of the encoder - in which some layers are replaced by their transpose counterpart. - ConvTranspose and ReLU layers are inverted in the end. - - :param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable. - :return: (Module) decoder part of the Autoencoder - """ - inv_layers = [] - for i, layer in enumerate(self.encoder): - if isinstance(layer, nn.Conv3d): - inv_layers.append( - nn.ConvTranspose3d( - layer.out_channels, - layer.in_channels, - layer.kernel_size, - stride=layer.stride, - padding=layer.padding, - ) - ) - self.level += 1 - elif isinstance(layer, nn.Conv2d): - inv_layers.append( - nn.ConvTranspose2d( - layer.out_channels, - layer.in_channels, - layer.kernel_size, - stride=layer.stride, - padding=layer.padding, - ) - ) - self.level += 1 - elif isinstance(layer, PadMaxPool3d): - inv_layers.append( - CropMaxUnpool3d(layer.kernel_size, stride=layer.stride) - ) - elif isinstance(layer, PadMaxPool2d): - inv_layers.append( - CropMaxUnpool2d(layer.kernel_size, stride=layer.stride) - ) - elif isinstance(layer, nn.Linear): - inv_layers.append(nn.Linear(layer.out_features, layer.in_features)) - elif isinstance(layer, nn.Flatten): - inv_layers.append(Reshape(model.flattened_shape)) - elif isinstance(layer, nn.LeakyReLU): - inv_layers.append(nn.LeakyReLU(negative_slope=1 / layer.negative_slope)) - else: - inv_layers.append(deepcopy(layer)) - inv_layers = self.replace_relu(inv_layers) - inv_layers.reverse() - return nn.Sequential(*inv_layers) - - @staticmethod - def replace_relu(inv_layers): - """ - Invert convolutional and ReLU layers (give empirical better results) - - :param inv_layers: (list) list of the layers of decoder part of the Auto-Encoder - :return: (list) the layers with the inversion - """ - idx_relu, idx_conv = -1, -1 - for idx, layer in enumerate(inv_layers): - if isinstance(layer, nn.ConvTranspose3d): - idx_conv = idx - elif isinstance(layer, nn.ReLU) or isinstance(layer, nn.LeakyReLU): - idx_relu = idx - - if idx_conv != -1 and idx_relu != -1: - inv_layers[idx_relu], inv_layers[idx_conv] = ( - inv_layers[idx_conv], - inv_layers[idx_relu], - ) - idx_conv, idx_relu = -1, -1 - - # Check if number of features of batch normalization layers is still correct - for idx, layer in enumerate(inv_layers): - if isinstance(layer, nn.BatchNorm3d): - conv = inv_layers[idx + 1] - inv_layers[idx] = nn.BatchNorm3d(conv.out_channels) - - return inv_layers diff --git a/clinicadl/network/pythae/nn/networks/vae.py b/clinicadl/network/pythae/nn/networks/vae.py deleted file mode 100644 index 8b1378917..000000000 --- a/clinicadl/network/pythae/nn/networks/vae.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/clinicadl/network/pythae/nn/utils/__init__.py b/clinicadl/network/pythae/nn/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/nn/utils/resnet3D.py b/clinicadl/network/pythae/nn/utils/resnet3D.py deleted file mode 100644 index 5e3fd7baf..000000000 --- a/clinicadl/network/pythae/nn/utils/resnet3D.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -import torch.nn as nn - - -class Flatten(nn.Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -class ResBlock(nn.Module): - def __init__(self, block_number, input_size): - super(ResBlock, self).__init__() - - layer_in = input_size if input_size is not None else 2 ** (block_number + 1) - layer_out = 2 ** (block_number + 2) - - self.conv1 = nn.Conv3d( - layer_in, layer_out, kernel_size=3, stride=1, padding=1, bias=False - ) - self.bn1 = nn.BatchNorm3d(layer_out) - self.act1 = nn.ELU() - - self.conv2 = nn.Conv3d( - layer_out, layer_out, kernel_size=3, stride=1, padding=1, bias=False - ) - self.bn2 = nn.BatchNorm3d(layer_out) - - # shortcut - self.shortcut = nn.Sequential( - nn.Conv3d( - layer_in, layer_out, kernel_size=1, stride=1, padding=0, bias=False - ) - ) - - self.act2 = nn.ELU() - - def forward(self, x): - out = self.conv1(x) - out = self.bn1(out) - out = self.act1(out) - - out = self.conv2(out) - out = self.bn2(out) - out += self.shortcut(x) - out = self.act2(out) - return out - - -class ResNetDesigner3D(nn.Module): - def __init__(self, input_size=[1, 169, 208, 179]): - super(ResNetDesigner3D, self).__init__() - - assert ( - len(input_size) == 4 - ), "Input must be in 3D with the corresponding number of channels." - - self.layer0 = self._make_block(1, input_size[0]) - self.layer1 = self._make_block(2) - self.layer2 = self._make_block(3) - self.layer3 = self._make_block(4) - self.layer4 = self._make_block(5) - - input_tensor = torch.zeros(input_size).unsqueeze(0) - out = self.layer0(input_tensor) - out = self.layer1(out) - out = self.layer2(out) - out = self.layer3(out) - out = self.layer4(out) - - d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) - self.fc = nn.Sequential( - Flatten(), - nn.Linear(128 * d * h * w, 256), # t1 image - nn.ELU(), - nn.Dropout(p=0.8), - nn.Linear(256, 2), - ) - - for layer in self.fc: - out = layer(out) - - def _make_block(self, block_number, input_size=None): - return nn.Sequential( - ResBlock(block_number, input_size), nn.MaxPool3d(3, stride=2) - ) - - def _maxpool_output_size( - self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 - ): - import math - - d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) - h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) - w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) - - if nb_layers == 1: - return d, h, w - return self._maxpool_output_size( - (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 - ) diff --git a/clinicadl/network/pythae/utils.py b/clinicadl/network/pythae/utils.py deleted file mode 100644 index 15662c4b9..000000000 --- a/clinicadl/network/pythae/utils.py +++ /dev/null @@ -1,27 +0,0 @@ -from abc import ABC, abstractmethod -from collections import OrderedDict -from typing import Tuple, Union - -import torch -import torch.nn as nn -from pydantic.dataclasses import dataclass - - -class PythaeModelOuput(ABC): - loss: torch.Tensor - - -class PythaeModel(ABC): - model_config: dataclass - encoder: nn.Module - decoder: nn.Module - - @abstractmethod - def loss_function( - self, recon_x: torch.Tensor, x: torch.Tensor - ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: - pass - - @abstractmethod - def forward(self, inputs: OrderedDict, **kwargs) -> PythaeModelOuput: - pass diff --git a/clinicadl/network/pythae/vae/__init__.py b/clinicadl/network/pythae/vae/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/clinicadl/network/pythae/vae/ae_config.py b/clinicadl/network/pythae/vae/ae_config.py deleted file mode 100644 index a91447d43..000000000 --- a/clinicadl/network/pythae/vae/ae_config.py +++ /dev/null @@ -1,18 +0,0 @@ -from pydantic import PositiveFloat, PositiveInt - -from clinicadl.network.pythae import ModelConfig -from clinicadl.utils.enum import Normalization, ReconstructionLoss - -from .ae_utils import AENetworks - - -class AEConfig(ModelConfig): - network: AENetworks = AENetworks.AE_Conv5_FC3 - loss: ReconstructionLoss = ReconstructionLoss.MSELoss - latent_space_size: PositiveInt = 128 - feature_size: PositiveInt = 1024 - n_conv: PositiveInt = 4 - io_layer_channels: PositiveInt = 8 - recons_weight: PositiveFloat = 1.0 - kl_weight: PositiveFloat = 1.0 - normalization: Normalization = Normalization.BATCH diff --git a/clinicadl/network/pythae/vae/vae_model.py b/clinicadl/network/pythae/vae/vae_model.py deleted file mode 100644 index dde81d20f..000000000 --- a/clinicadl/network/pythae/vae/vae_model.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Callable, Type, TypeVar - -import torch -import torch.nn as nn - -from clinicadl.network.pythae import ClinicaDLModel -from clinicadl.network.pythae.utils import PythaeModel - -from .ae_config import AEConfig -from .ae_utils import PythaeAEWrapper - -T = TypeVar("T", bound="VAE") - - -class VAE(ClinicaDLModel): - def __init__( - self, - encoder: nn.Module, - decoder: nn.Module, - reconstruction_loss: Callable[ - [torch.Tensor, torch.Tensor], torch.Tensor - ] = nn.MSELoss(), - ) -> None: - super().__init__() - self.encoder = encoder - self.decoder = decoder - self.reconstruction_loss = reconstruction_loss - - @classmethod - def from_config(cls: Type[T], config: AEConfig) -> T: - pass - - @staticmethod - def from_pythae(model: PythaeModel) -> PythaeAEWrapper: - return PythaeAEWrapper(model) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.reconstruct(self.embed(x)) - - def embed(self, x: torch.Tensor) -> torch.Tensor: - return self.encoder(x) - - def reconstruct(self, x: torch.Tensor) -> torch.Tensor: - return self.decoder(x) - - def training_step(self, x: torch.Tensor) -> torch.Tensor: - recon_x = self.forward(x) - loss = self.compute_loss(recon_x, x) - return loss - - def compute_loss(self, recon_x: torch.Tensor, x: torch.Tensor) -> torch.Tensor: - return self.reconstruction_loss(recon_x, x) diff --git a/clinicadl/network/pythae/ae/__init__.py b/clinicadl/nn/__init__.py similarity index 100% rename from clinicadl/network/pythae/ae/__init__.py rename to clinicadl/nn/__init__.py diff --git a/clinicadl/nn/blocks/__init__.py b/clinicadl/nn/blocks/__init__.py new file mode 100644 index 000000000..1f15bafb2 --- /dev/null +++ b/clinicadl/nn/blocks/__init__.py @@ -0,0 +1,5 @@ +from .decoder import Decoder2D, Decoder3D, VAE_Decoder2D +from .encoder import Encoder2D, Encoder3D, VAE_Encoder2D +from .residual import ResBlock +from .se import ResBlock_SE, SE_Block +from .unet import UNetDown, UNetFinalLayer, UNetUp diff --git a/clinicadl/network/pythae/nn/blocks/decoder.py b/clinicadl/nn/blocks/decoder.py similarity index 98% rename from clinicadl/network/pythae/nn/blocks/decoder.py rename to clinicadl/nn/blocks/decoder.py index 50c8c3242..27938c8d7 100644 --- a/clinicadl/network/pythae/nn/blocks/decoder.py +++ b/clinicadl/nn/blocks/decoder.py @@ -1,7 +1,7 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.network.pythae.nn.layers import Unflatten2D, get_norm_layer +from clinicadl.nn.layers import Unflatten2D, get_norm_layer __all__ = [ "Decoder2D", diff --git a/clinicadl/network/pythae/nn/blocks/encoder.py b/clinicadl/nn/blocks/encoder.py similarity index 96% rename from clinicadl/network/pythae/nn/blocks/encoder.py rename to clinicadl/nn/blocks/encoder.py index 729bbfbe1..fde13b956 100644 --- a/clinicadl/network/pythae/nn/blocks/encoder.py +++ b/clinicadl/nn/blocks/encoder.py @@ -1,7 +1,7 @@ import torch.nn as nn import torch.nn.functional as F -from clinicadl.network.pythae.nn.layers import Flatten, get_norm_layer +from clinicadl.nn.layers import get_norm_layer __all__ = [ "Encoder2D", @@ -141,7 +141,7 @@ def __init__( * (self.input_w // (2**n_conv)) ) self.layers.append( - nn.Sequential(Flatten(), nn.Linear(n_pix, feature_size), nn.ReLU()) + nn.Sequential(nn.Flatten(), nn.Linear(n_pix, feature_size), nn.ReLU()) ) elif latent_dim == 2: self.layers.append( diff --git a/clinicadl/nn/blocks/residual.py b/clinicadl/nn/blocks/residual.py new file mode 100644 index 000000000..ec0a07316 --- /dev/null +++ b/clinicadl/nn/blocks/residual.py @@ -0,0 +1,40 @@ +import torch.nn as nn + + +class ResBlock(nn.Module): + def __init__(self, block_number, input_size): + super(ResBlock, self).__init__() + + layer_in = input_size if input_size is not None else 2 ** (block_number + 1) + layer_out = 2 ** (block_number + 2) + + self.conv1 = nn.Conv3d( + layer_in, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn1 = nn.BatchNorm3d(layer_out) + self.act1 = nn.ELU() + + self.conv2 = nn.Conv3d( + layer_out, layer_out, kernel_size=3, stride=1, padding=1, bias=False + ) + self.bn2 = nn.BatchNorm3d(layer_out) + + # shortcut + self.shortcut = nn.Sequential( + nn.Conv3d( + layer_in, layer_out, kernel_size=1, stride=1, padding=0, bias=False + ) + ) + + self.act2 = nn.ELU() + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.act1(out) + + out = self.conv2(out) + out = self.bn2(out) + out += self.shortcut(x) + out = self.act2(out) + return out diff --git a/clinicadl/network/pythae/nn/utils/SECNN.py b/clinicadl/nn/blocks/se.py similarity index 51% rename from clinicadl/network/pythae/nn/utils/SECNN.py rename to clinicadl/nn/blocks/se.py index 4e34cd1ed..f406b7a92 100644 --- a/clinicadl/network/pythae/nn/utils/SECNN.py +++ b/clinicadl/nn/blocks/se.py @@ -2,14 +2,9 @@ import torch.nn as nn -class Flatten(nn.Module): - def forward(self, input): - return input.view(input.size(0), -1) - - -class SE_Blocks(nn.Module): +class SE_Block(nn.Module): def __init__(self, num_channels, ratio_channel): - super(SE_Blocks, self).__init__() + super().__init__() self.num_channels = num_channels self.avg_pooling_3D = nn.AdaptiveAvgPool3d(1) num_channels_reduced = num_channels // ratio_channel @@ -30,6 +25,7 @@ def forward(self, input_tensor): output_tensor: pt tensor """ batch_size, num_channels, D, H, W = input_tensor.size() + # Average along each channel squeeze_tensor = self.avg_pooling_3D(input_tensor) @@ -45,7 +41,7 @@ def forward(self, input_tensor): class ResBlock_SE(nn.Module): - def __init__(self, block_number, input_size, num_channels, ration_channel=8): + def __init__(self, block_number, input_size, num_channels, ratio_channel=8): super(ResBlock_SE, self).__init__() layer_in = input_size if input_size is not None else 2 ** (block_number + 1) @@ -62,7 +58,7 @@ def __init__(self, block_number, input_size, num_channels, ration_channel=8): ) self.bn2 = nn.BatchNorm3d(layer_out) - self.se_block = SE_Blocks(num_channels, ration_channel) + self.se_block = SE_Block(layer_out, ratio_channel) # shortcut self.shortcut = nn.Sequential( @@ -84,60 +80,3 @@ def forward(self, x): out += self.shortcut(x) out = self.act2(out) return out - - -class SECNNDesigner3D(nn.Module): - def __init__(self, input_size=[1, 169, 208, 179]): - super(SECNNDesigner3D, self).__init__() - - assert ( - len(input_size) == 4 - ), "input must be in 3d with the corresponding number of channels" - - self.layer0 = self._make_block(1, 8, 8, input_size[0]) - self.layer1 = self._make_block(2, 16) - self.layer2 = self._make_block(3, 32) - self.layer3 = self._make_block(4, 64) - self.layer4 = self._make_block(5, 128) - - input_tensor = torch.zeros(input_size).unsqueeze(0) - out = self.layer0(input_tensor) - out = self.layer1(out) - out = self.layer2(out) - out = self.layer3(out) - out = self.layer4(out) - - d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) - self.fc = nn.Sequential( - Flatten(), - nn.Dropout(p=0.5), - nn.Linear(128 * d * h * w, 256), # t1 image - nn.ReLU(), - nn.Linear(256, 2), - ) - - for layer in self.fc: - out = layer(out) - - def _make_block( - self, block_number, num_channels, ration_channel=8, input_size=None - ): - return nn.Sequential( - ResBlock_SE(block_number, input_size, num_channels, ration_channel), - nn.MaxPool3d(3, stride=2), - ) - - def _maxpool_output_size( - self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 - ): - import math - - d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) - h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) - w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) - - if nb_layers == 1: - return d, h, w - return self._maxpool_output_size( - (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 - ) diff --git a/clinicadl/nn/blocks/unet.py b/clinicadl/nn/blocks/unet.py new file mode 100644 index 000000000..4ca275cbd --- /dev/null +++ b/clinicadl/nn/blocks/unet.py @@ -0,0 +1,71 @@ +import torch +from torch import nn + + +class UNetDown(nn.Module): + """Descending block of the U-Net. + + Args: + in_size: (int) number of channels in the input image. + out_size : (int) number of channels in the output image. + + """ + + def __init__(self, in_size, out_size): + super(UNetDown, self).__init__() + self.model = nn.Sequential( + nn.Conv3d(in_size, out_size, kernel_size=3, stride=2, padding=1), + nn.InstanceNorm3d(out_size), + nn.LeakyReLU(0.2), + ) + + def forward(self, x): + return self.model(x) + + +class UNetUp(nn.Module): + """Ascending block of the U-Net. + + Args: + in_size: (int) number of channels in the input image. + out_size : (int) number of channels in the output image. + + """ + + def __init__(self, in_size, out_size): + super(UNetUp, self).__init__() + self.model = nn.Sequential( + nn.ConvTranspose3d(in_size, out_size, kernel_size=4, stride=2, padding=1), + nn.InstanceNorm3d(out_size), + nn.ReLU(inplace=True), + ) + + def forward(self, x, skip_input=None): + if skip_input is not None: + x = torch.cat((x, skip_input), 1) # add the skip connection + x = self.model(x) + return x + + +class UNetFinalLayer(nn.Module): + """Final block of the U-Net. + + Args: + in_size: (int) number of channels in the input image. + out_size : (int) number of channels in the output image. + + """ + + def __init__(self, in_size, out_size): + super(UNetFinalLayer, self).__init__() + self.model = nn.Sequential( + nn.Upsample(scale_factor=2), + nn.Conv3d(in_size, out_size, kernel_size=3, padding=1), + nn.Tanh(), + ) + + def forward(self, x, skip_input=None): + if skip_input is not None: + x = torch.cat((x, skip_input), 1) # add the skip connection + x = self.model(x) + return x diff --git a/clinicadl/network/pythae/nn/layers/__init__.py b/clinicadl/nn/layers/__init__.py similarity index 63% rename from clinicadl/network/pythae/nn/layers/__init__.py rename to clinicadl/nn/layers/__init__.py index b27ccb5a2..00c3ed1ae 100644 --- a/clinicadl/network/pythae/nn/layers/__init__.py +++ b/clinicadl/nn/layers/__init__.py @@ -1,5 +1,5 @@ from .factory import get_conv_layer, get_norm_layer, get_pool_layer -from .flatten import Flatten from .pool import PadMaxPool2d, PadMaxPool3d -from .unflatten import Unflatten2D, Unflatten3D +from .reverse import GradientReversal +from .unflatten import Reshape, Unflatten2D, Unflatten3D from .unpool import CropMaxUnpool2d, CropMaxUnpool3d diff --git a/clinicadl/network/pythae/nn/layers/factory/__init__.py b/clinicadl/nn/layers/factory/__init__.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/factory/__init__.py rename to clinicadl/nn/layers/factory/__init__.py diff --git a/clinicadl/network/pythae/nn/layers/factory/conv.py b/clinicadl/nn/layers/factory/conv.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/factory/conv.py rename to clinicadl/nn/layers/factory/conv.py diff --git a/clinicadl/network/pythae/nn/layers/factory/norm.py b/clinicadl/nn/layers/factory/norm.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/factory/norm.py rename to clinicadl/nn/layers/factory/norm.py diff --git a/clinicadl/network/pythae/nn/layers/factory/pool.py b/clinicadl/nn/layers/factory/pool.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/factory/pool.py rename to clinicadl/nn/layers/factory/pool.py diff --git a/clinicadl/network/pythae/nn/layers/pool.py b/clinicadl/nn/layers/pool.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/pool.py rename to clinicadl/nn/layers/pool.py diff --git a/clinicadl/nn/layers/reverse.py b/clinicadl/nn/layers/reverse.py new file mode 100644 index 000000000..d433ac47f --- /dev/null +++ b/clinicadl/nn/layers/reverse.py @@ -0,0 +1,30 @@ +import torch +from torch import nn +from torch.autograd import Function + + +class GradientReversalFunction(Function): + @staticmethod + def forward(ctx, x, alpha): + ctx.save_for_backward(x, alpha) + return x + + @staticmethod + def backward(ctx, grad_output): + grad_input = None + _, alpha = ctx.saved_tensors + if ctx.needs_input_grad[0]: + grad_input = -alpha * grad_output + return grad_input, None + + +revgrad = GradientReversalFunction.apply + + +class GradientReversal(nn.Module): + def __init__(self, alpha): + super().__init__() + self.alpha = torch.tensor(alpha, requires_grad=False) + + def forward(self, x): + return revgrad(x, self.alpha) diff --git a/clinicadl/network/pythae/nn/layers/unflatten.py b/clinicadl/nn/layers/unflatten.py similarity index 76% rename from clinicadl/network/pythae/nn/layers/unflatten.py rename to clinicadl/nn/layers/unflatten.py index 7515c35ab..75d4eb92f 100644 --- a/clinicadl/network/pythae/nn/layers/unflatten.py +++ b/clinicadl/nn/layers/unflatten.py @@ -24,3 +24,12 @@ def forward(self, input): return input.view( input.size(0), self.channel, self.height, self.width, self.depth ) + + +class Reshape(nn.Module): # TODO : redundant with Unflatten + def __init__(self, size): + super(Reshape, self).__init__() + self.size = size + + def forward(self, input): + return input.view(*self.size) diff --git a/clinicadl/network/pythae/nn/layers/unpool.py b/clinicadl/nn/layers/unpool.py similarity index 100% rename from clinicadl/network/pythae/nn/layers/unpool.py rename to clinicadl/nn/layers/unpool.py diff --git a/clinicadl/nn/networks/__init__.py b/clinicadl/nn/networks/__init__.py new file mode 100644 index 000000000..c77097e60 --- /dev/null +++ b/clinicadl/nn/networks/__init__.py @@ -0,0 +1,21 @@ +from .ae import AE_Conv4_FC3, AE_Conv5_FC3, CAE_half +from .cnn import ( + Conv4_FC3, + Conv5_FC3, + ResNet3D, + SqueezeExcitationCNN, + Stride_Conv5_FC3, + resnet18, +) +from .random import RandomArchitecture +from .ssda import Conv5_FC3_SSDA +from .unet import UNet +from .vae import ( + CVAE_3D, + CVAE_3D_final_conv, + CVAE_3D_half, + VanillaDenseVAE, + VanillaDenseVAE3D, + VanillaSpatialVAE, + VanillaSpatialVAE3D, +) diff --git a/clinicadl/nn/networks/ae.py b/clinicadl/nn/networks/ae.py new file mode 100644 index 000000000..1a8ed283f --- /dev/null +++ b/clinicadl/nn/networks/ae.py @@ -0,0 +1,147 @@ +import numpy as np +from torch import nn + +from clinicadl.nn.blocks import Decoder3D, Encoder3D +from clinicadl.nn.layers import ( + CropMaxUnpool2d, + CropMaxUnpool3d, + PadMaxPool2d, + PadMaxPool3d, + Unflatten3D, +) +from clinicadl.nn.networks.cnn import Conv4_FC3, Conv5_FC3 +from clinicadl.nn.networks.factory import autoencoder_from_cnn +from clinicadl.nn.utils import compute_output_size +from clinicadl.utils.enum import BaseEnum + + +class AE2d(str, BaseEnum): + """AutoEncoders compatible with 2D inputs.""" + + AE_CONV5_FC3 = "AE_Conv5_FC3" + AE_CONV4_FC3 = "AE_Conv4_FC3" + + +class AE3d(str, BaseEnum): + """AutoEncoders compatible with 3D inputs.""" + + AE_CONV5_FC3 = "AE_Conv5_FC3" + AE_CONV4_FC3 = "AE_Conv4_FC3" + CAE_HALF = "CAE_half" + + +class ImplementedAE(str, BaseEnum): + """Implemented AutoEncoders in ClinicaDL.""" + + AE_CONV5_FC3 = "AE_Conv5_FC3" + AE_CONV4_FC3 = "AE_Conv4_FC3" + CAE_HALF = "CAE_half" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented AutoEncoders are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +# Networks # +class AE(nn.Module): + """Base class for AutoEncoders.""" + + def __init__(self, encoder: nn.Module, decoder: nn.Module) -> None: + super().__init__() + self.encoder = encoder + self.decoder = decoder + + def encode(self, x): + indices_list = [] + pad_list = [] + for layer in self.encoder: + if ( + (isinstance(layer, PadMaxPool3d) or isinstance(layer, PadMaxPool2d)) + and layer.return_indices + and layer.return_pad + ): + x, indices, pad = layer(x) + indices_list.append(indices) + pad_list.append(pad) + elif ( + isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d) + ) and layer.return_indices: + x, indices = layer(x) + indices_list.append(indices) + else: + x = layer(x) + return x, indices_list, pad_list + + def decode(self, x, indices_list=None, pad_list=None): + for layer in self.decoder: + if isinstance(layer, CropMaxUnpool3d) or isinstance(layer, CropMaxUnpool2d): + x = layer(x, indices_list.pop(), pad_list.pop()) + elif isinstance(layer, nn.MaxUnpool3d) or isinstance(layer, nn.MaxUnpool2d): + x = layer(x, indices_list.pop()) + else: + x = layer(x) + return x + + def forward( + self, x + ): # TODO : simplify and remove indices_list and pad_list (it is too complicated, there are lot of cases that can raise an issue) + encoded, indices_list, pad_list = self.encode(x) + return self.decode(encoded, indices_list, pad_list) + + +class AE_Conv5_FC3(AE): + """ + Autoencoder derived from the convolutional part of CNN Conv5_FC3. + """ + + def __init__(self, input_size, dropout): + cnn_model = Conv5_FC3( + input_size=input_size, output_size=1, dropout=dropout + ) # outputsize is not useful as we only take the convolutional part + encoder, decoder = autoencoder_from_cnn(cnn_model) + super().__init__(encoder, decoder) + + +class AE_Conv4_FC3(AE): + """ + Autoencoder derived from the convolutional part of CNN Conv4_FC3. + """ + + def __init__(self, input_size, dropout): + cnn_model = Conv4_FC3( + input_size=input_size, output_size=1, dropout=dropout + ) # outputsize is not useful as we only take the convolutional part + encoder, decoder = autoencoder_from_cnn(cnn_model) + super().__init__(encoder, decoder) + + +class CAE_half(AE): + """ + 3D Autoencoder derived from CVAE. + """ + + def __init__( + self, input_size, latent_space_size + ): # TODO: doesn't work for even inputs + encoder = nn.Sequential( + Encoder3D(1, 32, kernel_size=3), + Encoder3D(32, 64, kernel_size=3), + Encoder3D(64, 128, kernel_size=3), + ) + conv_output_shape = compute_output_size(input_size, encoder) + flattened_size = np.prod(conv_output_shape) + encoder.append(nn.Flatten()) + encoder.append(nn.Linear(flattened_size, latent_space_size)) + decoder = nn.Sequential( + nn.Linear(latent_space_size, flattened_size * 2), + Unflatten3D( + 256, conv_output_shape[1], conv_output_shape[2], conv_output_shape[3] + ), + Decoder3D(256, 128, kernel_size=3), + Decoder3D(128, 64, kernel_size=3), + Decoder3D(64, 1, kernel_size=3), + ) + super().__init__(encoder, decoder) diff --git a/clinicadl/nn/networks/cnn.py b/clinicadl/nn/networks/cnn.py new file mode 100644 index 000000000..eb2104b1e --- /dev/null +++ b/clinicadl/nn/networks/cnn.py @@ -0,0 +1,288 @@ +import numpy as np +import torch +import torch.utils.model_zoo as model_zoo +from torch import nn +from torchvision.models.resnet import BasicBlock + +from clinicadl.nn.layers.factory import ( + get_conv_layer, + get_norm_layer, + get_pool_layer, +) +from clinicadl.utils.enum import BaseEnum + +from .factory import ResNetDesigner, ResNetDesigner3D, SECNNDesigner3D +from .factory.resnet import model_urls + + +class CNN2d(str, BaseEnum): + """CNNs compatible with 2D inputs.""" + + CONV5_FC3 = "Conv5_FC3" + CONV4_FC3 = "Conv4_FC3" + STRIDE_CONV5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + + +class CNN3d(str, BaseEnum): + """CNNs compatible with 3D inputs.""" + + CONV5_FC3 = "Conv5_FC3" + CONV4_FC3 = "Conv4_FC3" + STRIDE_CONV5_FC3 = "Stride_Conv5_FC3" + RESNET3D = "ResNet3D" + SECNN = "SqueezeExcitationCNN" + + +class ImplementedCNN(str, BaseEnum): + """Implemented CNNs in ClinicaDL.""" + + CONV5_FC3 = "Conv5_FC3" + CONV4_FC3 = "Conv4_FC3" + STRIDE_CONV5_FC3 = "Stride_Conv5_FC3" + RESNET = "resnet18" + RESNET3D = "ResNet3D" + SECNN = "SqueezeExcitationCNN" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented CNNs are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +# Networks # +class CNN(nn.Module): + """Base class for CNN.""" + + def __init__(self, convolution_layers: nn.Module, fc_layers: nn.Module) -> None: + super().__init__() + self.convolutions = convolution_layers + self.fc = fc_layers + + def forward(self, x): + inter = self.convolutions(x) + print(self.convolutions) + print(inter.shape) + return self.fc(inter) + + +class Conv5_FC3(CNN): + """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[0] + + conv = get_conv_layer(dim) + pool = get_pool_layer("PadMaxPool", dim=dim) + norm = get_norm_layer("BatchNorm", dim=dim) + + convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1), + norm(8), + nn.ReLU(), + pool(2, 2), + conv(8, 16, 3, padding=1), + norm(16), + nn.ReLU(), + pool(2, 2), + conv(16, 32, 3, padding=1), + norm(32), + nn.ReLU(), + pool(2, 2), + conv(32, 64, 3, padding=1), + norm(64), + nn.ReLU(), + pool(2, 2), + conv(64, 128, 3, padding=1), + norm(128), + nn.ReLU(), + pool(2, 2), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = convolutions(input_tensor).shape + + fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + super().__init__(convolutions, fc) + + +class Conv4_FC3(CNN): + """A Convolutional Neural Network with 4 convolution and 3 fully-connected layers.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[0] + + conv = get_conv_layer(dim) + pool = get_pool_layer("PadMaxPool", dim=dim) + norm = get_norm_layer("BatchNorm", dim=dim) + + convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1), + norm(8), + nn.ReLU(), + pool(2, 2), + conv(8, 16, 3, padding=1), + norm(16), + nn.ReLU(), + pool(2, 2), + conv(16, 32, 3, padding=1), + norm(32), + nn.ReLU(), + pool(2, 2), + conv(32, 64, 3, padding=1), + norm(64), + nn.ReLU(), + pool(2, 2), + conv(64, 128, 3, padding=1), + norm(128), + nn.ReLU(), + pool(2, 2), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = convolutions(input_tensor).shape + + fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 50), + nn.ReLU(), + nn.Linear(50, 40), + nn.ReLU(), + nn.Linear(40, output_size), + ) + super().__init__(convolutions, fc) + + +class Stride_Conv5_FC3(CNN): + """A Convolutional Neural Network with 5 convolution and 3 fully-connected layers and a stride of 2 for each convolutional layer.""" + + def __init__(self, input_size, output_size, dropout): + dim = len(input_size) - 1 + in_channels = input_size[0] + + conv = get_conv_layer(dim) + norm = get_norm_layer("BatchNorm", dim=dim) + + convolutions = nn.Sequential( + conv(in_channels, 8, 3, padding=1, stride=2), + norm(8), + nn.ReLU(), + conv(8, 16, 3, padding=1, stride=2), + norm(16), + nn.ReLU(), + conv(16, 32, 3, padding=1, stride=2), + norm(32), + nn.ReLU(), + conv(32, 64, 3, padding=1, stride=2), + norm(64), + nn.ReLU(), + conv(64, 128, 3, padding=1, stride=2), + norm(128), + nn.ReLU(), + ) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_shape = convolutions(input_tensor).shape + + fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + super().__init__(convolutions, fc) + + +class resnet18(CNN): + """ + ResNet-18 is a neural network that is 18 layers deep based on residual block. + It uses skip connections or shortcuts to jump over some layers. + It is an image classification pre-trained model. + The model input has 3 channels in RGB order. + + Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. + https://arxiv.org/abs/1512.03385?context=cs + """ + + def __init__(self, input_size, output_size, dropout): + model = ResNetDesigner(input_size, BasicBlock, [2, 2, 2, 2]) + model.load_state_dict(model_zoo.load_url(model_urls["resnet18"])) + + convolutions = nn.Sequential( + model.conv1, + model.bn1, + model.relu, + model.maxpool, + model.layer1, + model.layer2, + model.layer3, + model.layer4, + model.avgpool, + ) + + # add a fc layer on top of the transfer_learning model and a softmax classifier + fc = nn.Sequential(nn.Flatten(), model.fc) + fc.add_module("drop_out", nn.Dropout(p=dropout)) + fc.add_module("fc_out", nn.Linear(1000, output_size)) + + super().__init__(convolutions, fc) + + +class ResNet3D(CNN): + """ + ResNet3D is a 3D neural network composed of 5 residual blocks. Each residual block + is compose of 3D convolutions followed by a batch normalization and an activation function. + It uses skip connections or shortcuts to jump over some layers. It's a 3D version of the + original implementation of Kaiming He et al. + + Reference: Kaiming He et al., Deep Residual Learning for Image Recognition. + https://arxiv.org/abs/1512.03385?context=cs + """ + + def __init__(self, input_size, output_size, dropout): + model = ResNetDesigner3D(input_size, output_size, dropout) + convolutions = nn.Sequential( + model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 + ) + fc_layers = model.fc + super().__init__(convolutions, fc_layers) + + +class SqueezeExcitationCNN(CNN): + """ + SE-CNN is a combination of a ResNet-101 with Squeeze and Excitation blocks which was successfully + tested on brain tumour classification by Ghosal et al. 2019. SE blocks are composed of a squeeze + and an excitation step. The squeeze operation is obtained through an average pooling layer and + provides a global understanding of each channel. + + The excitation part consists of a two-layer feed-forward network that outputs a vector of n values + corresponding to the weights of each channel of the feature maps. + + Reference: Ghosal et al. Brain Tumor Classification Using ResNet-101 Based Squeeze and Excitation Deep Neural Network + https://ieeexplore.ieee.org/document/8882973 + + """ + + def __init__(self, input_size, output_size, dropout): + model = SECNNDesigner3D(input_size, output_size, dropout) + convolutions = nn.Sequential( + model.layer0, model.layer1, model.layer2, model.layer3, model.layer4 + ) + fc_layers = model.fc + super().__init__(convolutions, fc_layers) diff --git a/clinicadl/nn/networks/factory/__init__.py b/clinicadl/nn/networks/factory/__init__.py new file mode 100644 index 000000000..85e6303c0 --- /dev/null +++ b/clinicadl/nn/networks/factory/__init__.py @@ -0,0 +1,3 @@ +from .ae import autoencoder_from_cnn +from .resnet import ResNetDesigner, ResNetDesigner3D +from .secnn import SECNNDesigner3D diff --git a/clinicadl/nn/networks/factory/ae.py b/clinicadl/nn/networks/factory/ae.py new file mode 100644 index 000000000..fccb14484 --- /dev/null +++ b/clinicadl/nn/networks/factory/ae.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +from copy import deepcopy +from typing import TYPE_CHECKING, List, Tuple + +from torch import nn + +from clinicadl.nn.layers import ( + CropMaxUnpool2d, + CropMaxUnpool3d, + PadMaxPool2d, + PadMaxPool3d, +) + +if TYPE_CHECKING: + from clinicadl.nn.networks.cnn import CNN + + +def autoencoder_from_cnn(model: CNN) -> Tuple[nn.Module, nn.Module]: + """ + Constructs an autoencoder from a given CNN. + + The encoder part corresponds to the convolutional part of the CNN. + The decoder part is the symmetrical network of the encoder. + + Parameters + ---------- + model : CNN + The input CNN model + + Returns + ------- + Tuple[nn.Module, nn.Module] + The encoder and the decoder. + """ + + encoder = deepcopy(model.convolutions) + decoder = _construct_inv_cnn(encoder) + + for i, layer in enumerate(encoder): + if isinstance(layer, PadMaxPool3d) or isinstance(layer, PadMaxPool2d): + encoder[i].set_new_return() + elif isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d): + encoder[i].return_indices = True + + return encoder, decoder + + +def _construct_inv_cnn(model: nn.Module) -> nn.Module: + """ + Implements a decoder from an CNN encoder. + + The decoder part is the symmetrical list of the encoder + in which some layers are replaced by their transpose counterpart. + ConvTranspose and ReLU layers are also inverted. + + Parameters + ---------- + model : nn.Module + The input CNN encoder. + + Returns + ------- + nn.Module + The symmetrical CNN decoder. + """ + inv_layers = [] + for layer in model: + if isinstance(layer, nn.Conv3d): + inv_layers.append( + nn.ConvTranspose3d( + layer.out_channels, + layer.in_channels, + layer.kernel_size, + stride=layer.stride, + padding=layer.padding, + ) + ) + elif isinstance(layer, nn.Conv2d): + inv_layers.append( + nn.ConvTranspose2d( + layer.out_channels, + layer.in_channels, + layer.kernel_size, + stride=layer.stride, + padding=layer.padding, + ) + ) + elif isinstance(layer, PadMaxPool3d): + inv_layers.append(CropMaxUnpool3d(layer.kernel_size, stride=layer.stride)) + elif isinstance(layer, PadMaxPool2d): + inv_layers.append(CropMaxUnpool2d(layer.kernel_size, stride=layer.stride)) + elif isinstance(layer, nn.LeakyReLU): + inv_layers.append(nn.LeakyReLU(negative_slope=1 / layer.negative_slope)) + else: + inv_layers.append(deepcopy(layer)) + inv_layers = _invert_conv_and_relu(inv_layers) + inv_layers.reverse() + + return nn.Sequential(*inv_layers) + + +def _invert_conv_and_relu(inv_layers: List[nn.Module]) -> List[nn.Module]: + """ + Invert convolutional and ReLU layers (give empirical better results). + + Parameters + ---------- + inv_layers : List[nn.Module] + The list of layers. + + Returns + ------- + List[nn.Module] + The modified list of layers. + """ + idx_relu, idx_conv = -1, -1 + for idx, layer in enumerate(inv_layers): + if isinstance(layer, nn.ConvTranspose3d) or isinstance( + layer, nn.ConvTranspose2d + ): + idx_conv = idx + elif isinstance(layer, nn.ReLU) or isinstance(layer, nn.LeakyReLU): + idx_relu = idx + + if idx_conv != -1 and idx_relu != -1: + inv_layers[idx_relu], inv_layers[idx_conv] = ( + inv_layers[idx_conv], + inv_layers[idx_relu], + ) + idx_conv, idx_relu = -1, -1 + + # Check if number of features of batch normalization layers is still correct + for idx, layer in enumerate(inv_layers): + if isinstance(layer, nn.BatchNorm3d): + conv = inv_layers[idx + 1] + inv_layers[idx] = nn.BatchNorm3d(conv.out_channels) + elif isinstance(layer, nn.BatchNorm2d): + conv = inv_layers[idx + 1] + inv_layers[idx] = nn.BatchNorm2d(conv.out_channels) + + return inv_layers diff --git a/clinicadl/network/pythae/nn/utils/resnet.py b/clinicadl/nn/networks/factory/resnet.py similarity index 56% rename from clinicadl/network/pythae/nn/utils/resnet.py rename to clinicadl/nn/networks/factory/resnet.py index 27f04fd38..251199c92 100644 --- a/clinicadl/network/pythae/nn/utils/resnet.py +++ b/clinicadl/nn/networks/factory/resnet.py @@ -3,6 +3,8 @@ import torch from torch import nn +from clinicadl.nn.blocks import ResBlock + model_urls = {"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth"} @@ -61,3 +63,57 @@ def _make_layer(self, block, planes, blocks, stride=1): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) + + +class ResNetDesigner3D(nn.Module): + def __init__(self, input_size, output_size, dropout): + super(ResNetDesigner3D, self).__init__() + + assert ( + len(input_size) == 4 + ), "Input must be in 3D with the corresponding number of channels." + + self.layer0 = self._make_block(1, input_size[0]) + self.layer1 = self._make_block(2) + self.layer2 = self._make_block(3) + self.layer3 = self._make_block(4) + self.layer4 = self._make_block(5) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + out = self.layer0(input_tensor) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) + self.fc = nn.Sequential( + nn.Flatten(), + nn.Linear(128 * d * h * w, 256), # t1 image + nn.ELU(), + nn.Dropout(p=dropout), + nn.Linear(256, output_size), + ) + + for layer in self.fc: + out = layer(out) + + def _make_block(self, block_number, input_size=None): + return nn.Sequential( + ResBlock(block_number, input_size), nn.MaxPool3d(3, stride=2) + ) + + def _maxpool_output_size( + self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 + ): + import math + + d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) + h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) + w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) + + if nb_layers == 1: + return d, h, w + return self._maxpool_output_size( + (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 + ) diff --git a/clinicadl/nn/networks/factory/secnn.py b/clinicadl/nn/networks/factory/secnn.py new file mode 100644 index 000000000..270f0a357 --- /dev/null +++ b/clinicadl/nn/networks/factory/secnn.py @@ -0,0 +1,61 @@ +import torch +import torch.nn as nn + +from clinicadl.nn.blocks import ResBlock_SE + + +class SECNNDesigner3D(nn.Module): + def __init__(self, input_size, output_size, dropout): + super(SECNNDesigner3D, self).__init__() + + assert ( + len(input_size) == 4 + ), "input must be in 3d with the corresponding number of channels" + + self.layer0 = self._make_block(1, 8, 8, input_size[0]) + self.layer1 = self._make_block(2, 16) + self.layer2 = self._make_block(3, 32) + self.layer3 = self._make_block(4, 64) + self.layer4 = self._make_block(5, 128) + + input_tensor = torch.zeros(input_size).unsqueeze(0) + out = self.layer0(input_tensor) + out = self.layer1(out) + out = self.layer2(out) + out = self.layer3(out) + out = self.layer4(out) + + d, h, w = self._maxpool_output_size(input_size[1::], nb_layers=5) + self.fc = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(128 * d * h * w, 256), # t1 image + nn.ReLU(), + nn.Linear(256, output_size), + ) + + for layer in self.fc: + out = layer(out) + + def _make_block( + self, block_number, num_channels, ration_channel=8, input_size=None + ): + return nn.Sequential( + ResBlock_SE(block_number, input_size, num_channels, ration_channel), + nn.MaxPool3d(3, stride=2), + ) + + def _maxpool_output_size( + self, input_size, kernel_size=(3, 3, 3), stride=(2, 2, 2), nb_layers=1 + ): + import math + + d = math.floor((input_size[0] - kernel_size[0]) / stride[0] + 1) + h = math.floor((input_size[1] - kernel_size[1]) / stride[1] + 1) + w = math.floor((input_size[2] - kernel_size[2]) / stride[2] + 1) + + if nb_layers == 1: + return d, h, w + return self._maxpool_output_size( + (d, h, w), kernel_size=kernel_size, stride=stride, nb_layers=nb_layers - 1 + ) diff --git a/clinicadl/nn/networks/random.py b/clinicadl/nn/networks/random.py new file mode 100644 index 000000000..50b18dd60 --- /dev/null +++ b/clinicadl/nn/networks/random.py @@ -0,0 +1,222 @@ +import numpy as np +import torch.nn as nn + +from clinicadl.nn.layers import PadMaxPool2d, PadMaxPool3d +from clinicadl.nn.networks.cnn import CNN +from clinicadl.utils.exceptions import ClinicaDLNetworksError + + +class RandomArchitecture(CNN): # TODO : unabled to test it + def __init__( + self, + convolutions_dict, + n_fcblocks, + input_size, + dropout=0.5, + network_normalization="BatchNorm", + output_size=2, + ): + """ + Construct the Architecture randomly chosen for Random Search. + + Args: + convolutions_dict: (dict) description of the convolutional blocks. + n_fcblocks: (int) number of FC blocks in the network. + input_size: (list) gives the structure of the input of the network. + dropout: (float) rate of the dropout. + network_normalization: (str) type of normalization layer in the network. + output_size: (int) Number of output neurones of the network. + gpu: (bool) If True the network weights are stored on a CPU, else GPU. + """ + self.dimension = len(input_size) - 1 + self.first_in_channels = input_size[0] + self.layers_dict = self.return_layers_dict() + self.network_normalization = network_normalization + + convolutions = nn.Sequential() + for key, item in convolutions_dict.items(): + convolutional_block = self._define_convolutional_block(item) + convolutions.add_module(key, convolutional_block) + + classifier = nn.Sequential(nn.Flatten(), nn.Dropout(p=dropout)) + + fc, _ = self._fc_dict_design( + n_fcblocks, convolutions_dict, input_size, output_size + ) + for key, item in fc.items(): + n_fc = int(key[2::]) + if n_fc == len(fc) - 1: + fc_block = self._define_fc_layer(item, last_block=True) + else: + fc_block = self._define_fc_layer(item, last_block=False) + classifier.add_module(key, fc_block) + + super().__init__(convolution_layers=convolutions, fc_layers=classifier) + + def _define_convolutional_block(self, conv_dict): + """ + Design a convolutional block from the dictionary conv_dict. + + Args: + conv_dict: (dict) A dictionary with the specifications to build a convolutional block + - n_conv (int) number of convolutional layers in the block + - in_channels (int) number of input channels + - out_channels (int) number of output channels (2 * in_channels or threshold = 512) + - d_reduction (String) "MaxPooling" or "stride" + Returns: + (nn.Module) a list of modules in a nn.Sequential list + """ + in_channels = ( + conv_dict["in_channels"] + if conv_dict["in_channels"] is not None + else self.first_in_channels + ) + out_channels = conv_dict["out_channels"] + + conv_block = [] + for i in range(conv_dict["n_conv"] - 1): + conv_block.append( + self.layers_dict["Conv"]( + in_channels, in_channels, 3, stride=1, padding=1 + ) + ) + conv_block = self._append_normalization_layer(conv_block, in_channels) + conv_block.append(nn.LeakyReLU()) + if conv_dict["d_reduction"] == "MaxPooling": + conv_block.append( + self.layers_dict["Conv"]( + in_channels, out_channels, 3, stride=1, padding=1 + ) + ) + conv_block = self._append_normalization_layer(conv_block, out_channels) + conv_block.append(nn.LeakyReLU()) + conv_block.append(self.layers_dict["Pool"](2, 2)) + elif conv_dict["d_reduction"] == "stride": + conv_block.append( + self.layers_dict["Conv"]( + in_channels, out_channels, 3, stride=2, padding=1 + ) + ) + conv_block = self._append_normalization_layer(conv_block, out_channels) + conv_block.append(nn.LeakyReLU()) + else: + raise ClinicaDLNetworksError( + f"Dimension reduction {conv_dict['d_reduction']} is not supported. Please only include" + "'MaxPooling' or 'stride' in your sampling options." + ) + + return nn.Sequential(*conv_block) + + def _append_normalization_layer(self, conv_block, num_features): + """ + Appends or not a normalization layer to a convolutional block depending on network attributes. + + Args: + conv_block: (list) list of the modules of the convolutional block + num_features: (int) number of features to normalize + Returns: + (list) the updated convolutional block + """ + if self.network_normalization in ["BatchNorm", "InstanceNorm", "GroupNorm"]: + conv_block.append( + self.layers_dict[self.network_normalization](num_features) + ) + elif self.network_normalization is not None: + raise ClinicaDLNetworksError( + f"The network normalization {self.network_normalization} value must be in ['BatchNorm', 'InstanceNorm', 'GroupNorm', None]" + ) + return conv_block + + def return_layers_dict(self): + if self.dimension == 3: + layers = { + "Conv": nn.Conv3d, + "Pool": PadMaxPool3d, + "InstanceNorm": nn.InstanceNorm3d, + "BatchNorm": nn.BatchNorm3d, + "GroupNorm": nn.GroupNorm, + } + elif self.dimension == 2: + layers = { + "Conv": nn.Conv2d, + "Pool": PadMaxPool2d, + "InstanceNorm": nn.InstanceNorm2d, + "BatchNorm": nn.BatchNorm2d, + "GroupNorm": nn.GroupNorm, + } + else: + raise ValueError( + "Cannot construct random network in dimension {self.dimension}." + ) + return layers + + @staticmethod + def _define_fc_layer(fc_dict, last_block=False): + """ + Implement the FC block from the dictionary fc_dict. + + Args: + fc_dict: (dict) A dictionary with the specifications to build a FC block + - in_features (int) number of input neurones + - out_features (int) number of output neurones + last_block: (bool) indicates if the current FC layer is the last one of the network. + Returns: + (nn.Module) a list of modules in a nn.Sequential list + """ + in_features = fc_dict["in_features"] + out_features = fc_dict["out_features"] + + if last_block: + fc_block = [nn.Linear(in_features, out_features)] + else: + fc_block = [nn.Linear(in_features, out_features), nn.LeakyReLU()] + + return nn.Sequential(*fc_block) + + @staticmethod + def recursive_init(layer): + if isinstance(layer, nn.Sequential): + for sub_layer in layer: + RandomArchitecture.recursive_init(sub_layer) + else: + try: + layer.reset_parameters() + except AttributeError: + pass + + @staticmethod + def _fc_dict_design(n_fcblocks, convolutions, initial_shape, n_classes=2): + """ + Sample parameters for a random architecture (FC part). + + Args: + n_fcblocks: (int) number of fully connected blocks in the architecture. + convolutions: (dict) parameters of the convolutional part. + initial_shape: (array_like) shape of the initial input. + n_classes: (int) number of classes in the classification problem. + Returns: + (dict) parameters of the architecture + (list) the shape of the flattened layer + """ + n_conv = len(convolutions) + last_conv = convolutions[f"conv{(len(convolutions) - 1)}"] + out_channels = last_conv["out_channels"] + flattened_shape = np.ceil(np.array(initial_shape) / 2**n_conv) + flattened_shape[0] = out_channels + in_features = np.product(flattened_shape) + + # Sample number of FC layers + ratio = (in_features / n_classes) ** (1 / n_fcblocks) + + # Designing the parameters of each FC block + fc = dict() + for i in range(n_fcblocks): + fc_dict = dict() + out_features = in_features / ratio + fc_dict["in_features"] = int(np.round(in_features)) + fc_dict["out_features"] = int(np.round(out_features)) + + in_features = out_features + fc["FC" + str(i)] = fc_dict + + return fc, flattened_shape diff --git a/clinicadl/nn/networks/ssda.py b/clinicadl/nn/networks/ssda.py new file mode 100644 index 000000000..a87cb33b5 --- /dev/null +++ b/clinicadl/nn/networks/ssda.py @@ -0,0 +1,111 @@ +import numpy as np +import torch +import torch.nn as nn + +from clinicadl.nn.layers import ( + GradientReversal, + get_conv_layer, + get_norm_layer, + get_pool_layer, +) + + +class CNN_SSDA(nn.Module): + """Base class for SSDA CNN.""" + + def __init__( + self, + convolutions, + fc_class_source, + fc_class_target, + fc_domain, + alpha=1.0, + ): + super().__init__() + self.convolutions = convolutions + self.fc_class_source = fc_class_source + self.fc_class_target = fc_class_target + self.fc_domain = fc_domain + self.grad_reverse = GradientReversal(alpha=alpha) + + def forward(self, x): + x = self.convolutions(x) + x_class_source = self.fc_class_source(x) + x_class_target = self.fc_class_target(x) + x_reverse = self.grad_reverse(x) + x_domain = self.fc_domain(x_reverse) + return x_class_source, x_class_target, x_domain + + +class Conv5_FC3_SSDA(CNN_SSDA): + """ + Reduce the 2D or 3D input image to an array of size output_size. + """ + + def __init__(self, input_size, output_size=2, dropout=0.5, alpha=1.0): + dim = len(input_size) - 1 + conv = get_conv_layer(dim) + pool = get_pool_layer("PadMaxPool", dim=dim) + norm = get_norm_layer("BatchNorm", dim=dim) + + convolutions = nn.Sequential( + conv(input_size[0], 8, 3, padding=1), + norm(8), + nn.ReLU(), + pool(2, 2), + conv(8, 16, 3, padding=1), + norm(16), + nn.ReLU(), + pool(2, 2), + conv(16, 32, 3, padding=1), + norm(32), + nn.ReLU(), + pool(2, 2), + conv(32, 64, 3, padding=1), + norm(64), + nn.ReLU(), + pool(2, 2), + conv(64, 128, 3, padding=1), + norm(128), + nn.ReLU(), + pool(2, 2), + ) + + # Compute the size of the first FC layer + input_tensor = torch.zeros(input_size).unsqueeze(0) + output_convolutions = convolutions(input_tensor) + + fc_class_source = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + fc_class_target = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + fc_domain = nn.Sequential( + nn.Flatten(), + nn.Dropout(p=dropout), + nn.Linear(np.prod(list(output_convolutions.shape)).item(), 1300), + nn.ReLU(), + nn.Linear(1300, 50), + nn.ReLU(), + nn.Linear(50, output_size), + ) + super().__init__( + convolutions, + fc_class_source, + fc_class_target, + fc_domain, + alpha, + ) diff --git a/clinicadl/nn/networks/unet.py b/clinicadl/nn/networks/unet.py new file mode 100644 index 000000000..45850de29 --- /dev/null +++ b/clinicadl/nn/networks/unet.py @@ -0,0 +1,39 @@ +from torch import nn + +from clinicadl.nn.blocks import UNetDown, UNetFinalLayer, UNetUp + + +class UNet(nn.Module): + """ + Generator Unet. + """ + + def __init__(self): + super().__init__() + + self.down1 = UNetDown(1, 64) + self.down2 = UNetDown(64, 128) + self.down3 = UNetDown(128, 256) + self.down4 = UNetDown(256, 512) + self.down5 = UNetDown(512, 512) + + self.up1 = UNetUp(512, 512) + self.up2 = UNetUp(1024, 256) + self.up3 = UNetUp(512, 128) + self.up4 = UNetUp(256, 64) + + self.final = UNetFinalLayer(128, 1) + + def forward(self, x): + d1 = self.down1(x) + d2 = self.down2(d1) + d3 = self.down3(d2) + d4 = self.down4(d3) + d5 = self.down5(d4) + + u1 = self.up1(d5) + u2 = self.up2(u1, d4) + u3 = self.up3(u2, d3) + u4 = self.up4(u3, d2) + + return self.final(u4, d1) diff --git a/clinicadl/nn/networks/vae.py b/clinicadl/nn/networks/vae.py new file mode 100644 index 000000000..9e9b3e72f --- /dev/null +++ b/clinicadl/nn/networks/vae.py @@ -0,0 +1,566 @@ +import torch +import torch.nn as nn + +from clinicadl.nn.blocks import ( + Decoder3D, + Encoder3D, + VAE_Decoder2D, + VAE_Encoder2D, +) +from clinicadl.nn.layers import Unflatten3D +from clinicadl.nn.utils import multiply_list +from clinicadl.utils.enum import BaseEnum + + +class VAE2d(str, BaseEnum): + """VAEs compatible with 2D inputs.""" + + VANILLA_DENSE_VAE = "VanillaDenseVAE" + VANILLA_SPATIAL_VAE = "VanillaSpatialVAE" + + +class VAE3d(str, BaseEnum): + """VAEs compatible with 3D inputs.""" + + VANILLA_DENSE_VAE3D = "VanillaSpatialVAE3D" + VANILLA_SPATIAL_VAE3D = "VanillaDenseVAE3D" + CVAE_3D_FINAL_CONV = "CVAE_3D_final_conv" + CVAE_3D = "CVAE_3D" + CVAE_3D_HALF = "CVAE_3D_half" + + +class ImplementedVAE(str, BaseEnum): + """Implemented VAEs in ClinicaDL.""" + + VANILLA_DENSE_VAE = "VanillaDenseVAE" + VANILLA_SPATIAL_VAE = "VanillaSpatialVAE" + VANILLA_DENSE_VAE3D = "VanillaDenseVAE3D" + VANILLA_SPATIAL_VAE3D = "VanillaSpatialVAE3D" + CVAE_3D_FINAL_CONV = "CVAE_3D_final_conv" + CVAE_3D = "CVAE_3D" + CVAE_3D_HALF = "CVAE_3D_half" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented VAEs are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +class VAE(nn.Module): + def __init__(self, encoder, decoder, mu_layers, log_var_layers): + super().__init__() + self.encoder = encoder + self.mu_layers = mu_layers + self.log_var_layers = log_var_layers + self.decoder = decoder + + def encode(self, image): + feature = self.encoder(image) + mu = self.mu_layers(feature) + log_var = self.log_var_layers(feature) + return mu, log_var + + def decode(self, encoded): + reconstructed = self.decoder(encoded) + return reconstructed + + @staticmethod + def _sample(mu, log_var): + std = torch.exp(log_var / 2) + eps = torch.randn_like(std) + return mu + eps * std + + def forward(self, image): + mu, log_var = self.encode(image) + if self.training: + encoded = self._sample(mu, log_var) + else: + encoded = mu + reconstructed = self.decode(encoded) + return mu, log_var, reconstructed + + +class VanillaDenseVAE(VAE): + """ + This network is a 2D convolutional variational autoencoder with a dense latent space. + + reference: Diederik P Kingma et al., Auto-Encoding Variational Bayes. + https://arxiv.org/abs/1312.6114 + """ + + def __init__(self, input_size, latent_space_size, feature_size): + n_conv = 4 + io_layer_channel = 32 + + encoder = VAE_Encoder2D( + input_shape=input_size, + feature_size=feature_size, + latent_dim=1, + n_conv=n_conv, + first_layer_channels=io_layer_channel, + ) + mu_layers = nn.Linear(feature_size, latent_space_size) + log_var_layers = nn.Linear(feature_size, latent_space_size) + decoder = VAE_Decoder2D( + input_shape=input_size, + latent_size=latent_space_size, + feature_size=feature_size, + latent_dim=1, + n_conv=n_conv, + last_layer_channels=io_layer_channel, + padding=encoder.decoder_padding, + ) + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class VanillaDenseVAE3D(VAE): + """ + This network is a 3D convolutional variational autoencoder with a dense latent space. + + reference: Diederik P Kingma et al., Auto-Encoding Variational Bayes. + https://arxiv.org/abs/1312.6114 + """ + + def __init__( + self, + size_reduction_factor, + latent_space_size=256, + feature_size=1024, + n_conv=4, + io_layer_channels=8, + ): + first_layer_channels = io_layer_channels + last_layer_channels = io_layer_channels + # automatically compute padding + decoder_output_padding = [] + + if ( + size_reduction_factor == 2 + ): # TODO : specify that it only works with certain images + self.input_size = [1, 80, 96, 80] + elif size_reduction_factor == 3: + self.input_size = [1, 56, 64, 56] + elif size_reduction_factor == 4: + self.input_size = [1, 40, 48, 40] + elif size_reduction_factor == 5: + self.input_size = [1, 32, 40, 32] + + input_c = self.input_size[0] + input_d = self.input_size[1] + input_h = self.input_size[2] + input_w = self.input_size[3] + d, h, w = input_d, input_h, input_w + + # ENCODER + encoder_layers = [] + # Input Layer + encoder_layers.append(Encoder3D(input_c, first_layer_channels)) + decoder_output_padding.append([d % 2, h % 2, w % 2]) + d, h, w = d // 2, h // 2, w // 2 + # Conv Layers + for i in range(n_conv - 1): + encoder_layers.append( + Encoder3D( + first_layer_channels * 2**i, first_layer_channels * 2 ** (i + 1) + ) + ) + # Construct output paddings + decoder_output_padding.append([d % 2, h % 2, w % 2]) + d, h, w = d // 2, h // 2, w // 2 + # Compute size of the feature space + n_pix = ( + first_layer_channels + * 2 ** (n_conv - 1) + * (input_d // (2**n_conv)) + * (input_h // (2**n_conv)) + * (input_w // (2**n_conv)) + ) + # Flatten + encoder_layers.append(nn.Flatten()) + # Intermediate feature space + if feature_size == 0: + feature_space = n_pix + else: + feature_space = feature_size + encoder_layers.append( + nn.Sequential(nn.Linear(n_pix, feature_space), nn.ReLU()) + ) + encoder = nn.Sequential(*encoder_layers) + + # LATENT SPACE + mu_layers = nn.Linear(feature_space, latent_space_size) + log_var_layers = nn.Linear(feature_space, latent_space_size) + + # DECODER + decoder_layers = [] + # Intermediate feature space + if feature_size == 0: + decoder_layers.append( + nn.Sequential( + nn.Linear(latent_space_size, n_pix), + nn.ReLU(), + ) + ) + else: + decoder_layers.append( + nn.Sequential( + nn.Linear(latent_space_size, feature_size), + nn.ReLU(), + nn.Linear(feature_size, n_pix), + nn.ReLU(), + ) + ) + # Unflatten + decoder_layers.append( + Unflatten3D( + last_layer_channels * 2 ** (n_conv - 1), + input_d // (2**n_conv), + input_h // (2**n_conv), + input_w // (2**n_conv), + ) + ) + # Decoder layers + for i in range(n_conv - 1, 0, -1): + decoder_layers.append( + Decoder3D( + last_layer_channels * 2 ** (i), + last_layer_channels * 2 ** (i - 1), + output_padding=decoder_output_padding[i], + ) + ) + # Output layer + decoder_layers.append( + nn.Sequential( + nn.ConvTranspose3d( + last_layer_channels, + input_c, + 4, + stride=2, + padding=1, + output_padding=decoder_output_padding[0], + bias=False, + ), + nn.Sigmoid(), + ) + ) + decoder = nn.Sequential(*decoder_layers) + + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class VanillaSpatialVAE(VAE): + """ + This network is a 2D convolutional variational autoencoder with a spatial latent space. + + reference: Diederik P Kingma et al., Auto-Encoding Variational Bayes. + https://arxiv.org/abs/1312.6114 + """ + + def __init__( + self, + input_size, + ): + feature_channels = 64 + latent_channels = 1 + n_conv = 4 + io_layer_channel = 32 + + encoder = VAE_Encoder2D( + input_shape=input_size, + feature_size=feature_channels, + latent_dim=2, + n_conv=n_conv, + first_layer_channels=io_layer_channel, + ) + mu_layers = nn.Conv2d( + feature_channels, latent_channels, 3, stride=1, padding=1, bias=False + ) + log_var_layers = nn.Conv2d( + feature_channels, latent_channels, 3, stride=1, padding=1, bias=False + ) + decoder = VAE_Decoder2D( + input_shape=input_size, + latent_size=latent_channels, + feature_size=feature_channels, + latent_dim=2, + n_conv=n_conv, + last_layer_channels=io_layer_channel, + padding=encoder.decoder_padding, + ) + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class VanillaSpatialVAE3D(VAE): + """ + This network is a 3D convolutional variational autoencoder with a spatial latent space. + + reference: Diederik P Kingma et al., Auto-Encoding Variational Bayes. + https://arxiv.org/abs/1312.6114 + """ + + def __init__(self, input_size): + n_conv = 4 + first_layer_channels = 32 + last_layer_channels = 32 + feature_channels = 512 + latent_channels = 1 + decoder_output_padding = [ + [1, 0, 0], + [0, 0, 0], + [0, 0, 1], + ] + input_c = input_size[0] + + encoder_layers = [] + encoder_layers.append(Encoder3D(input_c, first_layer_channels)) + for i in range(n_conv - 1): + encoder_layers.append( + Encoder3D( + first_layer_channels * 2**i, first_layer_channels * 2 ** (i + 1) + ) + ) + encoder_layers.append( + nn.Sequential( + nn.Conv3d( + first_layer_channels * 2 ** (n_conv - 1), + feature_channels, + 4, + stride=2, + padding=1, + bias=False, + ), + nn.ReLU(), + ) + ) + encoder = nn.Sequential(*encoder_layers) + mu_layers = nn.Conv3d( + feature_channels, latent_channels, 3, stride=1, padding=1, bias=False + ) + log_var_layers = nn.Conv3d( + feature_channels, latent_channels, 3, stride=1, padding=1, bias=False + ) + decoder_layers = [] + decoder_layers.append( + nn.Sequential( + nn.ConvTranspose3d( + latent_channels, + feature_channels, + 3, + stride=1, + padding=1, + bias=False, + ), + nn.ReLU(), + nn.ConvTranspose3d( + feature_channels, + last_layer_channels * 2 ** (n_conv - 1), + 4, + stride=2, + padding=1, + output_padding=[0, 1, 1], + bias=False, + ), + nn.ReLU(), + ) + ) + for i in range(n_conv - 1, 0, -1): + decoder_layers.append( + Decoder3D( + last_layer_channels * 2 ** (i), + last_layer_channels * 2 ** (i - 1), + output_padding=decoder_output_padding[i], + ) + ) + decoder_layers.append( + nn.Sequential( + nn.ConvTranspose3d( + last_layer_channels, + input_c, + 4, + stride=2, + padding=1, + output_padding=[1, 0, 1], + bias=False, + ), + nn.Sigmoid(), + ) + ) + decoder = nn.Sequential(*decoder_layers) + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class CVAE_3D_final_conv(VAE): + """ + This is the convolutional autoencoder whose main objective is to project the MRI into a smaller space + with the sole criterion of correctly reconstructing the data. Nothing longitudinal here. + fc = final layer conv + """ + + def __init__(self, size_reduction_factor, latent_space_size): + n_conv = 3 + + if size_reduction_factor == 2: + self.input_size = [1, 80, 96, 80] + elif size_reduction_factor == 3: + self.input_size = [1, 56, 64, 56] + elif size_reduction_factor == 4: + self.input_size = [1, 40, 48, 40] + elif size_reduction_factor == 5: + self.input_size = [1, 32, 40, 32] + feature_size = int(multiply_list(self.input_size[1:], 2**n_conv) * 128) + + encoder = nn.Sequential( + nn.Conv3d(1, 32, 3, stride=2, padding=1), + nn.InstanceNorm3d(32), + nn.LeakyReLU(negative_slope=0.2), + nn.Conv3d(32, 64, 3, stride=2, padding=1), + nn.InstanceNorm3d(64), + nn.LeakyReLU(negative_slope=0.2), + nn.Conv3d(64, 128, 3, stride=2, padding=1), + nn.InstanceNorm3d(128), + nn.LeakyReLU(negative_slope=0.2), + nn.Flatten(start_dim=1), + ) + mu_layers = nn.Sequential( + nn.Linear(feature_size, latent_space_size), + nn.Tanh(), + ) + log_var_layers = nn.Linear(feature_size, latent_space_size) + decoder = nn.Sequential( + nn.Linear(latent_space_size, 2 * feature_size), + nn.LeakyReLU(), + nn.Unflatten( + dim=1, + unflattened_size=( + 256, + self.input_size[1] // 2**n_conv, + self.input_size[2] // 2**n_conv, + self.input_size[3] // 2**n_conv, + ), + ), + nn.ConvTranspose3d(256, 128, 3, stride=2, padding=1, output_padding=1), + nn.InstanceNorm3d(128), + nn.LeakyReLU(), + nn.ConvTranspose3d(128, 64, 3, stride=2, padding=1, output_padding=1), + nn.InstanceNorm3d(64), + nn.LeakyReLU(), + nn.ConvTranspose3d(64, 1, 3, stride=2, padding=1, output_padding=1), + nn.InstanceNorm3d(1), + nn.LeakyReLU(), + nn.Conv3d(1, 1, 3, stride=1, padding=1), + nn.Sigmoid(), + ) + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class CVAE_3D(VAE): + """ + This is the convolutional autoencoder whose main objective is to project the MRI into a smaller space + with the sole criterion of correctly reconstructing the data. Nothing longitudinal here. + """ + + def __init__(self, latent_space_size): # TODO : only work with 1-channel input + encoder = nn.Sequential( + nn.Conv3d(1, 32, 3, stride=2, padding=1), + nn.BatchNorm3d(32), + nn.ReLU(), + nn.Conv3d(32, 64, 3, stride=2, padding=1), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.Conv3d(64, 128, 3, stride=2, padding=1), + nn.BatchNorm3d(128), + nn.ReLU(), + nn.Flatten(start_dim=1), + ) + mu_layers = nn.Sequential( + nn.Linear(1683968, latent_space_size), + nn.Tanh(), + ) + log_var_layers = nn.Linear(1683968, latent_space_size) + decoder = nn.Sequential( + nn.Linear(latent_space_size, 3367936), + nn.ReLU(), + nn.Unflatten( + dim=1, + unflattened_size=( + 256, + 22, + 26, + 23, + ), + ), + nn.ConvTranspose3d( + 256, 128, 3, stride=2, padding=1, output_padding=[0, 1, 0] + ), + nn.BatchNorm3d(128), + nn.ReLU(), + nn.ConvTranspose3d( + 128, 64, 3, stride=2, padding=1, output_padding=[0, 1, 1] + ), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.ConvTranspose3d(64, 1, 3, stride=2, padding=1, output_padding=[0, 1, 0]), + nn.ReLU(), + ) + super().__init__(encoder, decoder, mu_layers, log_var_layers) + + +class CVAE_3D_half(VAE): + """ + This is the convolutional autoencoder whose main objective is to project the MRI into a smaller space + with the sole criterion of correctly reconstructing the data. Nothing longitudinal here. + """ + + def __init__(self, size_reduction_factor, latent_space_size): + n_conv = 3 + if size_reduction_factor == 2: + self.input_size = [1, 80, 96, 80] + elif size_reduction_factor == 3: + self.input_size = [1, 56, 64, 56] + elif size_reduction_factor == 4: + self.input_size = [1, 40, 48, 40] + elif size_reduction_factor == 5: + self.input_size = [1, 32, 40, 32] + feature_size = int(multiply_list(self.input_size[1:], 2**n_conv) * 128) + + encoder = nn.Sequential( + nn.Conv3d(1, 32, 3, stride=2, padding=1), + nn.InstanceNorm3d(32), + nn.LeakyReLU(negative_slope=0.2), + nn.Conv3d(32, 64, 3, stride=2, padding=1), + nn.InstanceNorm3d(64), + nn.LeakyReLU(negative_slope=0.2), + nn.Conv3d(64, 128, 3, stride=2, padding=1), + nn.InstanceNorm3d(128), + nn.LeakyReLU(negative_slope=0.2), + nn.Flatten(start_dim=1), + ) + mu_layers = nn.Sequential( + nn.Linear(feature_size, latent_space_size), + nn.Tanh(), + ) + log_var_layers = nn.Linear(feature_size, latent_space_size) + decoder = nn.Sequential( + nn.Linear(latent_space_size, 2 * feature_size), + nn.ReLU(), + nn.Unflatten( + dim=1, + unflattened_size=( + 256, + self.input_size[1] // 2**n_conv, + self.input_size[2] // 2**n_conv, + self.input_size[3] // 2**n_conv, + ), + ), + nn.ConvTranspose3d(256, 128, 3, stride=2, padding=1, output_padding=1), + nn.BatchNorm3d(128), + nn.ReLU(), + nn.ConvTranspose3d(128, 64, 3, stride=2, padding=1, output_padding=1), + nn.BatchNorm3d(64), + nn.ReLU(), + nn.ConvTranspose3d(64, 1, 3, stride=2, padding=1, output_padding=1), + nn.Sigmoid(), + ) + super().__init__(encoder, decoder, mu_layers, log_var_layers) diff --git a/clinicadl/nn/utils.py b/clinicadl/nn/utils.py new file mode 100644 index 000000000..dc3afd71c --- /dev/null +++ b/clinicadl/nn/utils.py @@ -0,0 +1,74 @@ +from collections.abc import Iterable +from typing import Any, Callable, Dict, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from torch.nn.modules.module import _addindent + + +def torch_summarize(model, show_weights=True, show_parameters=True): + """Summarizes torch model by showing trainable parameters and weights.""" + + tmpstr = model.__class__.__name__ + " (\n" + for key, module in model._modules.items(): + # if it contains layers let call it recursively to get params and weights + if type(module) in [ + torch.nn.modules.container.Container, + torch.nn.modules.container.Sequential, + ]: + modstr = torch_summarize(module) + else: + modstr = module.__repr__() + modstr = _addindent(modstr, 2) + + params = sum([np.prod(p.size()) for p in module.parameters()]) + weights = tuple([tuple(p.size()) for p in module.parameters()]) + + tmpstr += " (" + key + "): " + modstr + if show_weights: + tmpstr += ", weights={}".format(weights) + if show_parameters: + tmpstr += ", parameters={}".format(params) + tmpstr += "\n" + + tmpstr = tmpstr + ")" + return tmpstr + + +def multiply_list(L, factor): + product = 1 + for x in L: + product = product * x / factor + return product + + +def compute_output_size( + input_size: Union[torch.Size, Tuple], layer: nn.Module +) -> Tuple: + """ + Computes the output size of a layer. + + Parameters + ---------- + input_size : Union[torch.Size, Tuple] + The unbatched input size (i.e. C, H, W(, D)) + layer : nn.Module + The layer. + + Returns + ------- + Tuple + The unbatched size of the output. + """ + input_ = torch.randn(input_size).unsqueeze(0) + if isinstance(layer, nn.MaxUnpool3d) or isinstance(layer, nn.MaxUnpool2d): + indices = torch.zeros_like(input_, dtype=int) + print(indices) + output = layer(input_, indices) + else: + output = layer(input_) + if isinstance(layer, nn.MaxPool3d) or isinstance(layer, nn.MaxPool2d): + if layer.return_indices: + output = output[0] + return tuple(output.shape[1:]) From d6b5593ffd906be9a9f6159aa95d33487fb34340 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:12:30 +0200 Subject: [PATCH 31/43] unittests --- .../network/pythae/nn/layers/test_layers.py | 43 -------- .../pythae => }/nn/blocks/test_decoder.py | 2 +- .../pythae => }/nn/blocks/test_encoder.py | 2 +- tests/unittests/nn/blocks/test_residual.py | 9 ++ tests/unittests/nn/blocks/test_se.py | 42 ++++++++ tests/unittests/nn/blocks/test_unet_blocks.py | 36 +++++++ .../nn/layers/factory/test_factories.py | 12 +-- tests/unittests/nn/layers/test_layers.py | 101 ++++++++++++++++++ .../nn/networks/factory/test_ae_factory.py | 68 ++++++++++++ .../networks/factory/test_resnet_factory.py | 70 ++++++++++++ .../nn/networks/factory/test_secnn_factory.py | 30 ++++++ tests/unittests/nn/networks/test_ae.py | 25 +++++ .../pythae => }/nn/networks/test_cnn.py | 4 +- tests/unittests/nn/networks/test_ssda.py | 11 ++ tests/unittests/nn/networks/test_unet.py | 9 ++ tests/unittests/nn/networks/test_vae.py | 87 +++++++++++++++ tests/unittests/nn/test_utils.py | 49 +++++++++ 17 files changed, 547 insertions(+), 53 deletions(-) delete mode 100644 tests/unittests/network/pythae/nn/layers/test_layers.py rename tests/unittests/{network/pythae => }/nn/blocks/test_decoder.py (95%) rename tests/unittests/{network/pythae => }/nn/blocks/test_encoder.py (94%) create mode 100644 tests/unittests/nn/blocks/test_residual.py create mode 100644 tests/unittests/nn/blocks/test_se.py create mode 100644 tests/unittests/nn/blocks/test_unet_blocks.py rename tests/unittests/{network/pythae => }/nn/layers/factory/test_factories.py (60%) create mode 100644 tests/unittests/nn/layers/test_layers.py create mode 100644 tests/unittests/nn/networks/factory/test_ae_factory.py create mode 100644 tests/unittests/nn/networks/factory/test_resnet_factory.py create mode 100644 tests/unittests/nn/networks/factory/test_secnn_factory.py create mode 100644 tests/unittests/nn/networks/test_ae.py rename tests/unittests/{network/pythae => }/nn/networks/test_cnn.py (88%) create mode 100644 tests/unittests/nn/networks/test_ssda.py create mode 100644 tests/unittests/nn/networks/test_unet.py create mode 100644 tests/unittests/nn/networks/test_vae.py create mode 100644 tests/unittests/nn/test_utils.py diff --git a/tests/unittests/network/pythae/nn/layers/test_layers.py b/tests/unittests/network/pythae/nn/layers/test_layers.py deleted file mode 100644 index dcea8a65c..000000000 --- a/tests/unittests/network/pythae/nn/layers/test_layers.py +++ /dev/null @@ -1,43 +0,0 @@ -import pytest -import torch - -import clinicadl.network.pythae.nn.layers as layers - - -@pytest.fixture -def input_2d(): - return torch.randn(2, 1, 5, 5) - - -@pytest.fixture -def input_3d(): - return torch.randn(2, 1, 5, 5, 5) - - -def test_pool_layers(input_2d, input_3d): # TODO : test unpool - output_3d = layers.PadMaxPool3d(kernel_size=2, stride=1)(input_3d) - output_2d = layers.PadMaxPool2d(kernel_size=2, stride=1)(input_2d) - - assert len(output_3d.shape) == 5 # TODO : test more precisely - assert output_3d.shape[0] == 2 - assert len(output_2d.shape) == 4 - assert output_2d.shape[0] == 2 - - -def test_flatten_layers(input_2d, input_3d): - output_3d = layers.Flatten()(input_3d) - output_2d = layers.Flatten()(input_2d) - - assert output_3d.shape == torch.Size((2, 1 * 5 * 5 * 5)) - assert output_2d.shape == torch.Size((2, 1 * 5 * 5)) - - -def test_unflatten_layers(): - flattened_2d = torch.randn(2, 1 * 5 * 4) - flattened_3d = torch.randn(2, 1 * 5 * 4 * 3) - - output_3d = layers.Unflatten3D(channel=1, height=5, width=4, depth=3)(flattened_3d) - output_2d = layers.Unflatten2D(channel=1, height=5, width=4)(flattened_2d) - - assert output_3d.shape == torch.Size((2, 1, 5, 4, 3)) - assert output_2d.shape == torch.Size((2, 1, 5, 4)) diff --git a/tests/unittests/network/pythae/nn/blocks/test_decoder.py b/tests/unittests/nn/blocks/test_decoder.py similarity index 95% rename from tests/unittests/network/pythae/nn/blocks/test_decoder.py rename to tests/unittests/nn/blocks/test_decoder.py index dc9c05e4a..01bf7aef1 100644 --- a/tests/unittests/network/pythae/nn/blocks/test_decoder.py +++ b/tests/unittests/nn/blocks/test_decoder.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.network.pythae.nn.blocks.decoder as decoder +import clinicadl.nn.blocks.decoder as decoder @pytest.fixture diff --git a/tests/unittests/network/pythae/nn/blocks/test_encoder.py b/tests/unittests/nn/blocks/test_encoder.py similarity index 94% rename from tests/unittests/network/pythae/nn/blocks/test_encoder.py rename to tests/unittests/nn/blocks/test_encoder.py index 34574e8fa..dcb676f96 100644 --- a/tests/unittests/network/pythae/nn/blocks/test_encoder.py +++ b/tests/unittests/nn/blocks/test_encoder.py @@ -1,7 +1,7 @@ import pytest import torch -import clinicadl.network.pythae.nn.blocks.encoder as encoder +import clinicadl.nn.blocks.encoder as encoder @pytest.fixture diff --git a/tests/unittests/nn/blocks/test_residual.py b/tests/unittests/nn/blocks/test_residual.py new file mode 100644 index 000000000..302051ee3 --- /dev/null +++ b/tests/unittests/nn/blocks/test_residual.py @@ -0,0 +1,9 @@ +import torch + +from clinicadl.nn.blocks import ResBlock + + +def test_resblock(): + input_ = torch.randn((2, 4, 5, 5, 5)) + resblock = ResBlock(block_number=1, input_size=4) + assert resblock(input_).shape == torch.Size((2, 8, 5, 5, 5)) diff --git a/tests/unittests/nn/blocks/test_se.py b/tests/unittests/nn/blocks/test_se.py new file mode 100644 index 000000000..b3c3a962a --- /dev/null +++ b/tests/unittests/nn/blocks/test_se.py @@ -0,0 +1,42 @@ +import pytest +import torch + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 6, 10, 10, 10) + + +def test_SE_Block(input_3d): + from clinicadl.nn.blocks import SE_Block + + layer = SE_Block(num_channels=input_3d.shape[1], ratio_channel=4) + out = layer(input_3d) + assert out.shape == input_3d.shape + + +def test_ResBlock_SE(input_3d, helpers): + from clinicadl.nn.blocks import ResBlock_SE + + layer = ResBlock_SE( + num_channels=input_3d.shape[1], + block_number=1, + input_size=input_3d.shape[1], + ratio_channel=4, + ) + out = layer(input_3d) + expected_out_shape = helpers.compute_conv_output_size( + in_size=input_3d.shape[-1], kernel_size=3, stride=1, padding=1 + ) + expected_out_shape = helpers.compute_conv_output_size( + in_size=expected_out_shape, kernel_size=3, stride=1, padding=1 + ) + assert out.shape == torch.Size( + ( + input_3d.shape[0], + 2**3, + expected_out_shape, + expected_out_shape, + expected_out_shape, + ) + ) diff --git a/tests/unittests/nn/blocks/test_unet_blocks.py b/tests/unittests/nn/blocks/test_unet_blocks.py new file mode 100644 index 000000000..681688b12 --- /dev/null +++ b/tests/unittests/nn/blocks/test_unet_blocks.py @@ -0,0 +1,36 @@ +import pytest +import torch + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 4, 10, 10, 10) + + +@pytest.fixture +def skip_input(): + return torch.randn(2, 4, 10, 10, 10) + + +def test_UNetDown(input_3d, helpers): + from clinicadl.nn.blocks import UNetDown + + layer = UNetDown(in_size=input_3d.shape[1], out_size=8) + out = layer(input_3d) + assert out.shape[:2] == torch.Size((input_3d.shape[0], 8)) + + +def test_UNetUp(input_3d, skip_input, helpers): + from clinicadl.nn.blocks import UNetUp + + layer = UNetUp(in_size=input_3d.shape[1] * 2, out_size=2) + out = layer(input_3d, skip_input=skip_input) + assert out.shape[:2] == torch.Size((input_3d.shape[0], 2)) + + +def test_UNetFinalLayer(input_3d, skip_input, helpers): + from clinicadl.nn.blocks import UNetFinalLayer + + layer = UNetFinalLayer(in_size=input_3d.shape[1] * 2, out_size=2) + out = layer(input_3d, skip_input=skip_input) + assert out.shape[:2] == torch.Size((input_3d.shape[0], 2)) diff --git a/tests/unittests/network/pythae/nn/layers/factory/test_factories.py b/tests/unittests/nn/layers/factory/test_factories.py similarity index 60% rename from tests/unittests/network/pythae/nn/layers/factory/test_factories.py rename to tests/unittests/nn/layers/factory/test_factories.py index 40eea7b14..7036cc724 100644 --- a/tests/unittests/network/pythae/nn/layers/factory/test_factories.py +++ b/tests/unittests/nn/layers/factory/test_factories.py @@ -3,7 +3,7 @@ def test_get_conv_layer(): - from clinicadl.network.pythae.nn.layers.factory import get_conv_layer + from clinicadl.nn.layers.factory import get_conv_layer assert get_conv_layer(2) == nn.Conv2d assert get_conv_layer(3) == nn.Conv3d @@ -11,17 +11,17 @@ def test_get_conv_layer(): get_conv_layer(1) -def test_get_conv_layer(): - from clinicadl.network.pythae.nn.layers.factory import get_norm_layer +def test_get_norm_layer(): + from clinicadl.nn.layers.factory import get_norm_layer assert get_norm_layer("InstanceNorm", 2) == nn.InstanceNorm2d assert get_norm_layer("BatchNorm", 3) == nn.BatchNorm3d assert get_norm_layer("GroupNorm", 3) == nn.GroupNorm -def test_get_conv_layer(): - from clinicadl.network.pythae.nn.layers import PadMaxPool3d - from clinicadl.network.pythae.nn.layers.factory import get_pool_layer +def test_get_pool_layer(): + from clinicadl.nn.layers import PadMaxPool3d + from clinicadl.nn.layers.factory import get_pool_layer assert get_pool_layer("MaxPool", 2) == nn.MaxPool2d assert get_pool_layer("PadMaxPool", 3) == PadMaxPool3d diff --git a/tests/unittests/nn/layers/test_layers.py b/tests/unittests/nn/layers/test_layers.py new file mode 100644 index 000000000..e07eb1cf6 --- /dev/null +++ b/tests/unittests/nn/layers/test_layers.py @@ -0,0 +1,101 @@ +import pytest +import torch + +import clinicadl.nn.layers as layers + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 1, 5, 5) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 5, 5, 5) + + +def test_pool_layers(input_2d, input_3d): + output_3d = layers.PadMaxPool3d(kernel_size=2, stride=1)(input_3d) + output_2d = layers.PadMaxPool2d(kernel_size=2, stride=1)(input_2d) + + assert len(output_3d.shape) == 5 # TODO : test more precisely and test padding + assert output_3d.shape[0] == 2 + assert len(output_2d.shape) == 4 + assert output_2d.shape[0] == 2 + + +def test_unpool_layers(): # TODO : test padding + import torch.nn as nn + + pool = nn.MaxPool2d(2, stride=2, return_indices=True) + unpool = layers.CropMaxUnpool2d(2, stride=2) + input_ = torch.tensor( + [ + [ + [ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + [13.0, 14.0, 15.0, 16.0], + ] + ] + ] + ) + excpected_output = torch.tensor( + [ + [ + [ + [0.0, 0.0, 0.0, 0.0], + [0.0, 6.0, 0.0, 8.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 14.0, 0.0, 16.0], + ] + ] + ] + ) + output, indices = pool(input_) + assert (unpool(output, indices) == excpected_output).all() + + pool = nn.MaxPool3d(2, stride=1, return_indices=True) + unpool = layers.CropMaxUnpool3d(2, stride=1) + input_ = torch.tensor([[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]]]) + excpected_output = torch.tensor( + [[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 8.0]]]] + ) + output, indices = pool(input_) + assert (unpool(output, indices) == excpected_output).all() + + +def test_unflatten_layers(): + flattened_2d = torch.randn(2, 1 * 5 * 4) + flattened_3d = torch.randn(2, 1 * 5 * 4 * 3) + + output_3d = layers.Unflatten3D(channel=1, height=5, width=4, depth=3)(flattened_3d) + output_2d = layers.Unflatten2D(channel=1, height=5, width=4)(flattened_2d) + + assert output_3d.shape == torch.Size((2, 1, 5, 4, 3)) + assert output_2d.shape == torch.Size((2, 1, 5, 4)) + + +def test_reshape_layers(input_2d): + reshape = layers.Reshape((2, 1, 25)) + assert reshape(input_2d).shape == torch.Size((2, 1, 25)) + + +def test_gradient_reversal(input_3d): + from copy import deepcopy + + import torch.nn as nn + + input_ = torch.randn(2, 5) + ref_ = torch.randn(2, 3) + layer = nn.Linear(5, 3) + reversed_layer = nn.Sequential(deepcopy(layer), layers.GradientReversal(alpha=2.0)) + criterion = torch.nn.MSELoss() + + criterion(layer(input_), ref_).backward() + criterion(reversed_layer(input_), ref_).backward() + assert all( + (p2.grad == -2.0 * p1.grad).all() + for p1, p2 in zip(layer.parameters(), reversed_layer.parameters()) + ) diff --git a/tests/unittests/nn/networks/factory/test_ae_factory.py b/tests/unittests/nn/networks/factory/test_ae_factory.py new file mode 100644 index 000000000..a4fe1a762 --- /dev/null +++ b/tests/unittests/nn/networks/factory/test_ae_factory.py @@ -0,0 +1,68 @@ +import pytest +import torch +import torch.nn as nn + +from clinicadl.nn.layers import ( + PadMaxPool2d, + PadMaxPool3d, +) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 4, 10, 10, 10) + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 4, 10, 10) + + +@pytest.fixture +def cnn3d(): + class CNN(nn.Module): + def __init__(self, input_size): + super().__init__() + self.convolutions = nn.Sequential( + nn.Conv3d(in_channels=input_size[0], out_channels=4, kernel_size=3), + nn.BatchNorm3d(num_features=4), + nn.LeakyReLU(), + PadMaxPool3d(kernel_size=2, stride=1, return_indices=False), + ) + self.fc = nn.Sequential( + nn.Flatten(), + nn.Linear(42, 2), + ) + + return CNN + + +@pytest.fixture +def cnn2d(): + class CNN(nn.Module): + def __init__(self, input_size): + super().__init__() + self.convolutions = nn.Sequential( + nn.Conv2d(in_channels=input_size[0], out_channels=4, kernel_size=3), + nn.BatchNorm2d(num_features=4), + nn.LeakyReLU(), + PadMaxPool2d(kernel_size=2, stride=1, return_indices=False), + ) + self.fc = nn.Sequential( + nn.Flatten(), + nn.Linear(42, 2), # should not raise an error + ) + + return CNN + + +@pytest.mark.parametrize("input, cnn", [("input_3d", "cnn3d"), ("input_2d", "cnn2d")]) +def test_autoencoder_from_cnn(input, cnn, request): + from clinicadl.nn.networks.ae import AE + from clinicadl.nn.networks.factory import autoencoder_from_cnn + + input_ = request.getfixturevalue(input) + cnn = request.getfixturevalue(cnn)(input_size=input_.shape[1:]) + encoder, decoder = autoencoder_from_cnn(cnn) + autoencoder = AE(encoder, decoder) + assert autoencoder(input_).shape == input_.shape diff --git a/tests/unittests/nn/networks/factory/test_resnet_factory.py b/tests/unittests/nn/networks/factory/test_resnet_factory.py new file mode 100644 index 000000000..1468d37ad --- /dev/null +++ b/tests/unittests/nn/networks/factory/test_resnet_factory.py @@ -0,0 +1,70 @@ +import torch +import torch.nn as nn + + +def test_ResNetDesigner(): + from torchvision.models.resnet import BasicBlock + + from clinicadl.nn.networks.factory import ResNetDesigner + + input_ = torch.randn(2, 3, 100, 100) + + class Model(nn.Module): + def __init__(self): + super().__init__() + model = ResNetDesigner( + input_size=input_.shape[1:], + block=BasicBlock, + layers=[1, 2, 3, 4], + num_classes=2, + ) + self.convolutions = nn.Sequential( + model.conv1, + model.bn1, + model.relu, + model.maxpool, + model.layer1, + model.layer2, + model.layer3, + model.layer4, + model.avgpool, + ) + self.fc = nn.Sequential( + nn.Flatten(), + model.fc, + ) + + def forward(self, x): + return self.fc(self.convolutions(x)) + + model = Model() + + assert model(input_).shape == torch.Size([2, 2]) + + +def test_ResNetDesigner3D(): + from clinicadl.nn.networks.factory import ResNetDesigner3D + + input_ = torch.randn(2, 3, 100, 100, 100) + + class Model(nn.Module): + def __init__(self): + super().__init__() + model = ResNetDesigner3D( + input_size=input_.shape[1:], output_size=2, dropout=0.5 + ) + self.convolutions = nn.Sequential( + model.layer0, + model.layer1, + model.layer2, + model.layer3, + model.layer4, + ) + self.fc = model.fc + + def forward(self, x): + return self.fc(self.convolutions(x)) + + model = Model() + + assert model(input_).shape == torch.Size([2, 2]) diff --git a/tests/unittests/nn/networks/factory/test_secnn_factory.py b/tests/unittests/nn/networks/factory/test_secnn_factory.py new file mode 100644 index 000000000..96be92620 --- /dev/null +++ b/tests/unittests/nn/networks/factory/test_secnn_factory.py @@ -0,0 +1,30 @@ +import torch +import torch.nn as nn + + +def test_SECNNDesigner3D(): + from clinicadl.nn.networks.factory import SECNNDesigner3D + + input_ = torch.randn(2, 3, 100, 100, 100) + + class Model(nn.Module): + def __init__(self): + super().__init__() + model = SECNNDesigner3D( + input_size=input_.shape[1:], output_size=2, dropout=0.5 + ) + self.convolutions = nn.Sequential( + model.layer0, + model.layer1, + model.layer2, + model.layer3, + model.layer4, + ) + self.fc = model.fc + + def forward(self, x): + return self.fc(self.convolutions(x)) + + model = Model() + + assert model(input_).shape == torch.Size([2, 2]) diff --git a/tests/unittests/nn/networks/test_ae.py b/tests/unittests/nn/networks/test_ae.py new file mode 100644 index 000000000..9c6152d35 --- /dev/null +++ b/tests/unittests/nn/networks/test_ae.py @@ -0,0 +1,25 @@ +import pytest +import torch + +import clinicadl.nn.networks.ae as ae + + +@pytest.mark.parametrize("network", [net.value for net in ae.AE2d]) +def test_2d_ae(network): + input_ = torch.randn(2, 3, 100, 100) + network = getattr(ae, network)(input_size=input_.shape[1:], dropout=0.5) + output = network(input_) + assert output.shape == input_.shape + + +@pytest.mark.parametrize("network", [net.value for net in ae.AE3d]) +def test_3d_ae(network): + input_ = torch.randn(2, 1, 49, 49, 49) + if network == "CAE_half": + network = getattr(ae, network)( + input_size=input_.shape[1:], latent_space_size=10 + ) + else: + network = getattr(ae, network)(input_size=input_.shape[1:], dropout=0.5) + output = network(input_) + assert output.shape == input_.shape diff --git a/tests/unittests/network/pythae/nn/networks/test_cnn.py b/tests/unittests/nn/networks/test_cnn.py similarity index 88% rename from tests/unittests/network/pythae/nn/networks/test_cnn.py rename to tests/unittests/nn/networks/test_cnn.py index ff48c9a3a..3f6a0cb87 100644 --- a/tests/unittests/network/pythae/nn/networks/test_cnn.py +++ b/tests/unittests/nn/networks/test_cnn.py @@ -1,12 +1,12 @@ import pytest import torch -import clinicadl.network.pythae.nn.networks.cnn as cnn +import clinicadl.nn.networks.cnn as cnn @pytest.fixture def input_2d(): - return torch.randn(2, 1, 50, 100) + return torch.randn(2, 3, 100, 100) @pytest.fixture diff --git a/tests/unittests/nn/networks/test_ssda.py b/tests/unittests/nn/networks/test_ssda.py new file mode 100644 index 000000000..06da85ff2 --- /dev/null +++ b/tests/unittests/nn/networks/test_ssda.py @@ -0,0 +1,11 @@ +import torch + +from clinicadl.nn.networks.ssda import Conv5_FC3_SSDA + + +def test_UNet(): + input_ = torch.randn(2, 1, 64, 63, 62) + network = Conv5_FC3_SSDA(input_size=(1, 64, 63, 62), output_size=3) + output = network(input_) + for out in output: + assert out.shape == torch.Size((2, 3)) diff --git a/tests/unittests/nn/networks/test_unet.py b/tests/unittests/nn/networks/test_unet.py new file mode 100644 index 000000000..ba0408cdb --- /dev/null +++ b/tests/unittests/nn/networks/test_unet.py @@ -0,0 +1,9 @@ +import torch + +from clinicadl.nn.networks.unet import UNet + + +def test_UNet(): + input_ = torch.randn(2, 1, 64, 64, 64) # TODO : specify the size that works + network = UNet() + assert network(input_).shape == input_.shape diff --git a/tests/unittests/nn/networks/test_vae.py b/tests/unittests/nn/networks/test_vae.py new file mode 100644 index 000000000..308a2f185 --- /dev/null +++ b/tests/unittests/nn/networks/test_vae.py @@ -0,0 +1,87 @@ +import pytest +import torch + +import clinicadl.nn.networks.vae as vae + + +@pytest.fixture +def input_2d(): + return torch.randn(2, 3, 100, 100) + + +@pytest.fixture +def input_3d(): + return torch.randn(2, 1, 50, 50, 50) + + +@pytest.mark.parametrize( + "input_,network,latent_space_size", + [ + ( + torch.randn(2, 3, 100, 100), + vae.VanillaDenseVAE( + input_size=(3, 100, 100), latent_space_size=10, feature_size=100 + ), + 10, + ), + ( + torch.randn(2, 1, 80, 96, 80), + vae.VanillaDenseVAE3D( + size_reduction_factor=2, + latent_space_size=10, + feature_size=100, + ), + 10, + ), + # ( + # torch.randn(2, 1, 50, 50, 50), # TODO : only work with certain size + # vae.CVAE_3D( + # input_size=(3, 50, 50, 50), + # latent_space_size=10, + # ), + # 10, + # ), + ( + torch.randn(2, 1, 56, 64, 56), + vae.CVAE_3D_final_conv( + size_reduction_factor=3, + latent_space_size=10, + ), + 10, + ), + ( + torch.randn(2, 1, 32, 40, 32), + vae.CVAE_3D_half( + size_reduction_factor=5, + latent_space_size=10, + ), + 10, + ), + ], +) +def test_DenseVAEs(input_, network, latent_space_size): + output = network(input_) + + assert output[0].shape == torch.Size((input_.shape[0], latent_space_size)) + assert output[1].shape == torch.Size((input_.shape[0], latent_space_size)) + assert output[2].shape == input_.shape + + +@pytest.mark.parametrize( + "input_,network", + [ + ( + torch.randn(2, 3, 100, 100), + vae.VanillaSpatialVAE(input_size=(3, 100, 100)), + ), + # (torch.randn(2, 3, 100, 100, 100), vae.VanillaSpatialVAE3D(input_size=(3, 100, 100, 100))), # TODO : output doesn't have the same size + ], +) +def test_SpatialVAEs(input_, network): + output = network(input_) + + assert output[0].shape[:2] == torch.Size((input_.shape[0], 1)) + assert len(output[0].shape) == len(input_.shape) + assert output[1].shape[:2] == torch.Size((input_.shape[0], 1)) + assert len(output[0].shape) == len(input_.shape) + assert output[2].shape == input_.shape diff --git a/tests/unittests/nn/test_utils.py b/tests/unittests/nn/test_utils.py new file mode 100644 index 000000000..bcd379613 --- /dev/null +++ b/tests/unittests/nn/test_utils.py @@ -0,0 +1,49 @@ +import torch +import torch.nn as nn + + +def test_compute_output_size(): + from clinicadl.nn.utils import compute_output_size + + input_2d = torch.randn(3, 2, 100, 100) + input_3d = torch.randn(3, 1, 100, 100, 100) + indices_2d = torch.randint(0, 100, size=(3, 2, 100, 100)) + + conv3d = nn.Conv3d( + in_channels=1, + out_channels=1, + kernel_size=7, + stride=2, + padding=(1, 2, 3), + dilation=3, + ) + max_pool3d = nn.MaxPool3d(kernel_size=(9, 8, 7), stride=1, padding=3, dilation=2) + conv_transpose2d = nn.ConvTranspose2d( + in_channels=2, + out_channels=1, + kernel_size=7, + stride=(4, 3), + padding=0, + dilation=(2, 1), + output_padding=1, + ) + max_unpool2d = nn.MaxUnpool2d(kernel_size=7, stride=(2, 1), padding=(1, 1)) + sequential = nn.Sequential( + conv3d, nn.Dropout(p=0.5), nn.BatchNorm3d(num_features=1), max_pool3d + ) + + assert compute_output_size(input_3d.shape[1:], conv3d) == tuple( + conv3d(input_3d).shape[1:] + ) + assert compute_output_size(input_3d.shape[1:], max_pool3d) == tuple( + max_pool3d(input_3d).shape[1:] + ) + assert compute_output_size(input_2d.shape[1:], conv_transpose2d) == tuple( + conv_transpose2d(input_2d).shape[1:] + ) + assert compute_output_size(input_2d.shape[1:], max_unpool2d) == tuple( + max_unpool2d(input_2d, indices_2d).shape[1:] + ) + assert compute_output_size(tuple(input_3d.shape[1:]), sequential) == tuple( + sequential(input_3d).shape[1:] + ) From 4b2cb1f48f489335c1db065963fb24b3ab2f38dc Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:23:32 +0200 Subject: [PATCH 32/43] remove losses --- clinicadl/losses/__init__.py | 3 +- clinicadl/losses/config.py | 61 ++++++++++++++++++++++++++++++++++++ clinicadl/losses/factory.py | 51 ++++++++---------------------- 3 files changed, 76 insertions(+), 39 deletions(-) create mode 100644 clinicadl/losses/config.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py index 4bd74d41e..a7710a00a 100644 --- a/clinicadl/losses/__init__.py +++ b/clinicadl/losses/__init__.py @@ -1 +1,2 @@ -from .factory import ImplementedLoss, get_loss_function +from .config import LossConfig +from .factory import get_loss_function diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py new file mode 100644 index 000000000..c3765d67b --- /dev/null +++ b/clinicadl/losses/config.py @@ -0,0 +1,61 @@ +from typing import List, Optional + +from pydantic import BaseModel, ConfigDict, NonNegativeFloat, PositiveFloat + +from clinicadl.utils.enum import BaseEnum + + +class ClassificationLoss(str, BaseEnum): + """Losses that can be used only for classification.""" + + CROSS_ENTROPY = "CrossEntropyLoss" + MULTI_MARGIN = "MultiMarginLoss" + + +class ImplementedLoss(str, BaseEnum): + """Implemented losses in ClinicaDL.""" + + CROSS_ENTROPY = "CrossEntropyLoss" + MULTI_MARGIN = "MultiMarginLoss" + L1 = "L1Loss" + MSE = "MSELoss" + HUBER = "HuberLoss" + SMOOTH_L1 = "SmoothL1Loss" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented losses are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +class Reduction(str, BaseEnum): + """Supported reduction method in ClinicaDL.""" + + NONE = "none" + MEAN = "mean" + SUM = "sum" + + +class Order(int, BaseEnum): + """Supported order of L-norm in MultiMarginLoss.""" + + ONE = 1 + TWO = 2 + + +class LossConfig(BaseModel): + """Config class to configure the loss function.""" + + loss: ImplementedLoss = ImplementedLoss.MSE + reduction: Reduction = Reduction.MEAN + delta: PositiveFloat = 1.0 + beta: PositiveFloat = 1.0 + p: Order = Order.ONE + margin: float = 1.0 + weight: Optional[List[NonNegativeFloat]] = None + # pydantic config + model_config = ConfigDict( + validate_assignment=True, use_enum_values=True, validate_default=True + ) diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py index a6e192b30..940ee371b 100644 --- a/clinicadl/losses/factory.py +++ b/clinicadl/losses/factory.py @@ -1,55 +1,30 @@ -from __future__ import annotations +import inspect -from enum import Enum -from typing import TYPE_CHECKING, Type, Union +import torch.nn as nn -if TYPE_CHECKING: - import torch.nn as nn +from .config import LossConfig -class ClassificationLoss(str, Enum): - """Losses that can be used only for classification.""" - - CrossENTROPY = "CrossEntropyLoss" - MultiMargin = "MultiMarginLoss" - - -class ImplementedLoss(str, Enum): - """Implemented losses in ClinicaDL.""" - - CrossENTROPY = "CrossEntropyLoss" - MultiMargin = "MultiMarginLoss" - L1 = "L1Loss" - MSE = "MSELoss" - HUBER = "HuberLoss" - SmoothL1 = "SmoothL1Loss" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented losses are: " - + ", ".join([repr(m.value) for m in cls]) - ) - - -def get_loss_function(loss: Union[str, ImplementedLoss]) -> Type[nn.Module]: +def get_loss_function(config: LossConfig) -> nn.Module: """ Factory function to get a loss function from its name. Parameters ---------- - loss : Union[str, ImplementedLoss] - The name of the loss. + loss : LossConfig + The config class with the parameters of the loss function. Returns ------- - Type[nn.Module] - The loss function object. + nn.Module + The loss function. """ - import torch.nn as nn + loss_class = getattr(nn, config.loss) + expected_args = inspect.getfullargspec(loss_class).args + config = {arg: v for arg, v in config.model_dump().items() if arg in expected_args} + loss = loss_class(**config) - loss = ImplementedLoss(loss) - return getattr(nn, loss.value) + return loss # TODO : what about them? From 391eafd51559d689698a77de596b30f85aa715f0 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:25:14 +0200 Subject: [PATCH 33/43] remove losses --- clinicadl/losses/__init__.py | 2 -- clinicadl/losses/config.py | 61 ------------------------------------ clinicadl/losses/factory.py | 35 --------------------- 3 files changed, 98 deletions(-) delete mode 100644 clinicadl/losses/__init__.py delete mode 100644 clinicadl/losses/config.py delete mode 100644 clinicadl/losses/factory.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py deleted file mode 100644 index a7710a00a..000000000 --- a/clinicadl/losses/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .config import LossConfig -from .factory import get_loss_function diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py deleted file mode 100644 index c3765d67b..000000000 --- a/clinicadl/losses/config.py +++ /dev/null @@ -1,61 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel, ConfigDict, NonNegativeFloat, PositiveFloat - -from clinicadl.utils.enum import BaseEnum - - -class ClassificationLoss(str, BaseEnum): - """Losses that can be used only for classification.""" - - CROSS_ENTROPY = "CrossEntropyLoss" - MULTI_MARGIN = "MultiMarginLoss" - - -class ImplementedLoss(str, BaseEnum): - """Implemented losses in ClinicaDL.""" - - CROSS_ENTROPY = "CrossEntropyLoss" - MULTI_MARGIN = "MultiMarginLoss" - L1 = "L1Loss" - MSE = "MSELoss" - HUBER = "HuberLoss" - SMOOTH_L1 = "SmoothL1Loss" - - @classmethod - def _missing_(cls, value): - raise ValueError( - f"{value} is not implemented. Implemented losses are: " - + ", ".join([repr(m.value) for m in cls]) - ) - - -class Reduction(str, BaseEnum): - """Supported reduction method in ClinicaDL.""" - - NONE = "none" - MEAN = "mean" - SUM = "sum" - - -class Order(int, BaseEnum): - """Supported order of L-norm in MultiMarginLoss.""" - - ONE = 1 - TWO = 2 - - -class LossConfig(BaseModel): - """Config class to configure the loss function.""" - - loss: ImplementedLoss = ImplementedLoss.MSE - reduction: Reduction = Reduction.MEAN - delta: PositiveFloat = 1.0 - beta: PositiveFloat = 1.0 - p: Order = Order.ONE - margin: float = 1.0 - weight: Optional[List[NonNegativeFloat]] = None - # pydantic config - model_config = ConfigDict( - validate_assignment=True, use_enum_values=True, validate_default=True - ) diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py deleted file mode 100644 index 940ee371b..000000000 --- a/clinicadl/losses/factory.py +++ /dev/null @@ -1,35 +0,0 @@ -import inspect - -import torch.nn as nn - -from .config import LossConfig - - -def get_loss_function(config: LossConfig) -> nn.Module: - """ - Factory function to get a loss function from its name. - - Parameters - ---------- - loss : LossConfig - The config class with the parameters of the loss function. - - Returns - ------- - nn.Module - The loss function. - """ - loss_class = getattr(nn, config.loss) - expected_args = inspect.getfullargspec(loss_class).args - config = {arg: v for arg, v in config.model_dump().items() if arg in expected_args} - loss = loss_class(**config) - - return loss - - -# TODO : what about them? -# "KLDivLoss", -# "BCEWithLogitsLoss", -# "VAEGaussianLoss", -# "VAEBernoulliLoss", -# "VAEContinuousBernoulliLoss", From 456431d4d630603a154de2dffbffb9809a37a890 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:27:36 +0200 Subject: [PATCH 34/43] remove losses --- tests/unittests/losses/test_factory.py | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 tests/unittests/losses/test_factory.py diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py deleted file mode 100644 index 43df2831c..000000000 --- a/tests/unittests/losses/test_factory.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest - - -def test_get_loss_function(): - from clinicadl.losses import ImplementedLoss, get_loss_function - - for loss in [e.value for e in ImplementedLoss]: - get_loss_function(loss) - with pytest.raises(ValueError): - get_loss_function("abc") From 03ec7b92a283c2a2dedcbed2485239ace41274d2 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 09:41:55 +0200 Subject: [PATCH 35/43] solve problem in unittests (two files with the same name) --- tests/unittests/nn/__init__.py | 0 tests/unittests/nn/blocks/__init__.py | 0 tests/unittests/nn/blocks/test_se.py | 13 ++----------- .../nn/blocks/{test_unet_blocks.py => test_unet.py} | 6 +++--- tests/unittests/nn/layers/__init__.py | 0 tests/unittests/nn/layers/factory/__init__.py | 0 tests/unittests/nn/networks/__init__.py | 0 tests/unittests/nn/networks/factory/__init__.py | 0 8 files changed, 5 insertions(+), 14 deletions(-) create mode 100644 tests/unittests/nn/__init__.py create mode 100644 tests/unittests/nn/blocks/__init__.py rename tests/unittests/nn/blocks/{test_unet_blocks.py => test_unet.py} (85%) create mode 100644 tests/unittests/nn/layers/__init__.py create mode 100644 tests/unittests/nn/layers/factory/__init__.py create mode 100644 tests/unittests/nn/networks/__init__.py create mode 100644 tests/unittests/nn/networks/factory/__init__.py diff --git a/tests/unittests/nn/__init__.py b/tests/unittests/nn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/nn/blocks/__init__.py b/tests/unittests/nn/blocks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/nn/blocks/test_se.py b/tests/unittests/nn/blocks/test_se.py index b3c3a962a..2444bcc3a 100644 --- a/tests/unittests/nn/blocks/test_se.py +++ b/tests/unittests/nn/blocks/test_se.py @@ -15,7 +15,7 @@ def test_SE_Block(input_3d): assert out.shape == input_3d.shape -def test_ResBlock_SE(input_3d, helpers): +def test_ResBlock_SE(input_3d): from clinicadl.nn.blocks import ResBlock_SE layer = ResBlock_SE( @@ -25,18 +25,9 @@ def test_ResBlock_SE(input_3d, helpers): ratio_channel=4, ) out = layer(input_3d) - expected_out_shape = helpers.compute_conv_output_size( - in_size=input_3d.shape[-1], kernel_size=3, stride=1, padding=1 - ) - expected_out_shape = helpers.compute_conv_output_size( - in_size=expected_out_shape, kernel_size=3, stride=1, padding=1 - ) - assert out.shape == torch.Size( + assert out.shape[:2] == torch.Size( ( input_3d.shape[0], 2**3, - expected_out_shape, - expected_out_shape, - expected_out_shape, ) ) diff --git a/tests/unittests/nn/blocks/test_unet_blocks.py b/tests/unittests/nn/blocks/test_unet.py similarity index 85% rename from tests/unittests/nn/blocks/test_unet_blocks.py rename to tests/unittests/nn/blocks/test_unet.py index 681688b12..4e7170d77 100644 --- a/tests/unittests/nn/blocks/test_unet_blocks.py +++ b/tests/unittests/nn/blocks/test_unet.py @@ -12,7 +12,7 @@ def skip_input(): return torch.randn(2, 4, 10, 10, 10) -def test_UNetDown(input_3d, helpers): +def test_UNetDown(input_3d): from clinicadl.nn.blocks import UNetDown layer = UNetDown(in_size=input_3d.shape[1], out_size=8) @@ -20,7 +20,7 @@ def test_UNetDown(input_3d, helpers): assert out.shape[:2] == torch.Size((input_3d.shape[0], 8)) -def test_UNetUp(input_3d, skip_input, helpers): +def test_UNetUp(input_3d, skip_input): from clinicadl.nn.blocks import UNetUp layer = UNetUp(in_size=input_3d.shape[1] * 2, out_size=2) @@ -28,7 +28,7 @@ def test_UNetUp(input_3d, skip_input, helpers): assert out.shape[:2] == torch.Size((input_3d.shape[0], 2)) -def test_UNetFinalLayer(input_3d, skip_input, helpers): +def test_UNetFinalLayer(input_3d, skip_input): from clinicadl.nn.blocks import UNetFinalLayer layer = UNetFinalLayer(in_size=input_3d.shape[1] * 2, out_size=2) diff --git a/tests/unittests/nn/layers/__init__.py b/tests/unittests/nn/layers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/nn/layers/factory/__init__.py b/tests/unittests/nn/layers/factory/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/nn/networks/__init__.py b/tests/unittests/nn/networks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/nn/networks/factory/__init__.py b/tests/unittests/nn/networks/factory/__init__.py new file mode 100644 index 000000000..e69de29bb From 77345280347d93c54c5f18b5bdcb788fcec77c9b Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:48:34 +0200 Subject: [PATCH 36/43] add loss module --- clinicadl/losses/__init__.py | 2 ++ clinicadl/losses/config.py | 61 ++++++++++++++++++++++++++++++++++++ clinicadl/losses/factory.py | 48 ++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 clinicadl/losses/__init__.py create mode 100644 clinicadl/losses/config.py create mode 100644 clinicadl/losses/factory.py diff --git a/clinicadl/losses/__init__.py b/clinicadl/losses/__init__.py new file mode 100644 index 000000000..5f0998372 --- /dev/null +++ b/clinicadl/losses/__init__.py @@ -0,0 +1,2 @@ +from .config import ClassificationLoss, ImplementedLoss, LossConfig +from .factory import get_loss_function diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py new file mode 100644 index 000000000..c3765d67b --- /dev/null +++ b/clinicadl/losses/config.py @@ -0,0 +1,61 @@ +from typing import List, Optional + +from pydantic import BaseModel, ConfigDict, NonNegativeFloat, PositiveFloat + +from clinicadl.utils.enum import BaseEnum + + +class ClassificationLoss(str, BaseEnum): + """Losses that can be used only for classification.""" + + CROSS_ENTROPY = "CrossEntropyLoss" + MULTI_MARGIN = "MultiMarginLoss" + + +class ImplementedLoss(str, BaseEnum): + """Implemented losses in ClinicaDL.""" + + CROSS_ENTROPY = "CrossEntropyLoss" + MULTI_MARGIN = "MultiMarginLoss" + L1 = "L1Loss" + MSE = "MSELoss" + HUBER = "HuberLoss" + SMOOTH_L1 = "SmoothL1Loss" + + @classmethod + def _missing_(cls, value): + raise ValueError( + f"{value} is not implemented. Implemented losses are: " + + ", ".join([repr(m.value) for m in cls]) + ) + + +class Reduction(str, BaseEnum): + """Supported reduction method in ClinicaDL.""" + + NONE = "none" + MEAN = "mean" + SUM = "sum" + + +class Order(int, BaseEnum): + """Supported order of L-norm in MultiMarginLoss.""" + + ONE = 1 + TWO = 2 + + +class LossConfig(BaseModel): + """Config class to configure the loss function.""" + + loss: ImplementedLoss = ImplementedLoss.MSE + reduction: Reduction = Reduction.MEAN + delta: PositiveFloat = 1.0 + beta: PositiveFloat = 1.0 + p: Order = Order.ONE + margin: float = 1.0 + weight: Optional[List[NonNegativeFloat]] = None + # pydantic config + model_config = ConfigDict( + validate_assignment=True, use_enum_values=True, validate_default=True + ) diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py new file mode 100644 index 000000000..dc2204c52 --- /dev/null +++ b/clinicadl/losses/factory.py @@ -0,0 +1,48 @@ +import inspect +from copy import deepcopy +from typing import Any, Dict, Tuple + +import torch + +from .config import LossConfig + + +def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, Any]]: + """ + Factory function to get a loss function from its name. + + Parameters + ---------- + loss : LossConfig + The config class with the parameters of the loss function. + + Returns + ------- + nn.Module + The loss function. + Dict[str, Any] + The config dict with only the parameters relevant to the selected + loss function. + """ + loss_class = getattr(torch.nn, config.loss) + expected_args = inspect.getfullargspec(loss_class).args + config_dict = { + arg: v for arg, v in config.model_dump().items() if arg in expected_args + } + + config_dict_ = deepcopy(config_dict) + if config.weight is not None: + config_dict_["weight"] = torch.Tensor(config_dict_["weight"]) + loss = loss_class(**config_dict_) + + config_dict["loss"] = config.loss + + return loss, config_dict + + +# TODO : what about them? +# "KLDivLoss", +# "BCEWithLogitsLoss", +# "VAEGaussianLoss", +# "VAEBernoulliLoss", +# "VAEContinuousBernoulliLoss", From ed466b4ac26c232ba85c7cefcd0400be12c8547d Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:48:51 +0200 Subject: [PATCH 37/43] add unittest --- tests/unittests/losses/__init__.py | 0 tests/unittests/losses/test_config.py | 11 +++++++++++ tests/unittests/losses/test_factory.py | 22 ++++++++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 tests/unittests/losses/__init__.py create mode 100644 tests/unittests/losses/test_config.py create mode 100644 tests/unittests/losses/test_factory.py diff --git a/tests/unittests/losses/__init__.py b/tests/unittests/losses/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unittests/losses/test_config.py b/tests/unittests/losses/test_config.py new file mode 100644 index 000000000..b18ba3e4c --- /dev/null +++ b/tests/unittests/losses/test_config.py @@ -0,0 +1,11 @@ +import pytest + + +def test_LossConfig(): + from clinicadl.losses import LossConfig + + LossConfig(reduction="none", p=2, weight=[0.1, 0.1, 0.8]) + with pytest.raises(ValueError): + LossConfig(loss="abc") + with pytest.raises(ValueError): + LossConfig(weight=[0.1, -0.1, 0.8]) diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py new file mode 100644 index 000000000..f76e43199 --- /dev/null +++ b/tests/unittests/losses/test_factory.py @@ -0,0 +1,22 @@ +import pytest + + +def test_get_loss_function(): + from torch.nn import MultiMarginLoss + + from clinicadl.losses import ImplementedLoss, LossConfig, get_loss_function + + for loss in [e.value for e in ImplementedLoss]: + config = LossConfig(loss=loss) + get_loss_function(config) + + config = LossConfig(loss="MultiMarginLoss", reduction="sum", weight=[1, 2, 3]) + loss, config_dict = get_loss_function(config) + assert isinstance(loss, MultiMarginLoss) + assert config_dict == { + "loss": "MultiMarginLoss", + "reduction": "sum", + "p": 1, + "margin": 1.0, + "weight": [1, 2, 3], + } From 4545ac7aff8abb89d49a4ff4d257bc4f572eaec4 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 13:59:30 +0200 Subject: [PATCH 38/43] modify docstring --- clinicadl/losses/config.py | 2 +- clinicadl/losses/factory.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py index c3765d67b..b4ebbb931 100644 --- a/clinicadl/losses/config.py +++ b/clinicadl/losses/config.py @@ -39,7 +39,7 @@ class Reduction(str, BaseEnum): class Order(int, BaseEnum): - """Supported order of L-norm in MultiMarginLoss.""" + """Supported order of L-norm for MultiMarginLoss.""" ONE = 1 TWO = 2 diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py index dc2204c52..b1a2f3269 100644 --- a/clinicadl/losses/factory.py +++ b/clinicadl/losses/factory.py @@ -9,7 +9,7 @@ def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, Any]]: """ - Factory function to get a loss function from its name. + Factory function to get a loss function. Parameters ---------- From 8e7f9211d57e449bf0d795df7918ff6f6e139f2d Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Mon, 22 Jul 2024 14:00:35 +0200 Subject: [PATCH 39/43] modify docstring --- clinicadl/losses/factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py index b1a2f3269..f918b842e 100644 --- a/clinicadl/losses/factory.py +++ b/clinicadl/losses/factory.py @@ -9,7 +9,7 @@ def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, Any]]: """ - Factory function to get a loss function. + Factory function to get a loss function from PyTorch. Parameters ---------- From 032f6ba62b7dc6eaa6bd342ee381110ec8ac72e7 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:02:40 +0200 Subject: [PATCH 40/43] update config with package default --- clinicadl/losses/config.py | 22 +++++++----- clinicadl/losses/factory.py | 12 ++++--- clinicadl/utils/factories.py | 34 +++++++++++++++++++ tests/unittests/losses/test_config.py | 1 + tests/unittests/losses/test_factory.py | 19 +++++++---- tests/unittests/utils/test_factories_utils.py | 10 ++++++ 6 files changed, 77 insertions(+), 21 deletions(-) create mode 100644 clinicadl/utils/factories.py create mode 100644 tests/unittests/utils/test_factories_utils.py diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py index b4ebbb931..0bd658577 100644 --- a/clinicadl/losses/config.py +++ b/clinicadl/losses/config.py @@ -1,8 +1,10 @@ -from typing import List, Optional +from enum import Enum +from typing import List, Optional, Union from pydantic import BaseModel, ConfigDict, NonNegativeFloat, PositiveFloat from clinicadl.utils.enum import BaseEnum +from clinicadl.utils.factories import DefaultFromLibrary class ClassificationLoss(str, BaseEnum): @@ -30,7 +32,7 @@ def _missing_(cls, value): ) -class Reduction(str, BaseEnum): +class Reduction(str, Enum): """Supported reduction method in ClinicaDL.""" NONE = "none" @@ -38,7 +40,7 @@ class Reduction(str, BaseEnum): SUM = "sum" -class Order(int, BaseEnum): +class Order(int, Enum): """Supported order of L-norm for MultiMarginLoss.""" ONE = 1 @@ -49,12 +51,14 @@ class LossConfig(BaseModel): """Config class to configure the loss function.""" loss: ImplementedLoss = ImplementedLoss.MSE - reduction: Reduction = Reduction.MEAN - delta: PositiveFloat = 1.0 - beta: PositiveFloat = 1.0 - p: Order = Order.ONE - margin: float = 1.0 - weight: Optional[List[NonNegativeFloat]] = None + reduction: Union[Reduction, DefaultFromLibrary] = DefaultFromLibrary.YES + delta: Union[PositiveFloat, DefaultFromLibrary] = DefaultFromLibrary.YES + beta: Union[PositiveFloat, DefaultFromLibrary] = DefaultFromLibrary.YES + p: Union[Order, DefaultFromLibrary] = DefaultFromLibrary.YES + margin: Union[float, DefaultFromLibrary] = DefaultFromLibrary.YES + weight: Union[ + Optional[List[NonNegativeFloat]], DefaultFromLibrary + ] = DefaultFromLibrary.YES # pydantic config model_config = ConfigDict( validate_assignment=True, use_enum_values=True, validate_default=True diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py index f918b842e..eab95bb4f 100644 --- a/clinicadl/losses/factory.py +++ b/clinicadl/losses/factory.py @@ -4,6 +4,8 @@ import torch +from clinicadl.utils.factories import DefaultFromLibrary, get_args_and_defaults + from .config import LossConfig @@ -25,13 +27,13 @@ def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, An loss function. """ loss_class = getattr(torch.nn, config.loss) - expected_args = inspect.getfullargspec(loss_class).args - config_dict = { - arg: v for arg, v in config.model_dump().items() if arg in expected_args - } + expected_args, config_dict = get_args_and_defaults(loss_class.__init__) + for arg, value in config.model_dump().items(): + if arg in expected_args and value != DefaultFromLibrary.YES: + config_dict[arg] = value config_dict_ = deepcopy(config_dict) - if config.weight is not None: + if "weight" in config_dict and config_dict["weight"] is not None: config_dict_["weight"] = torch.Tensor(config_dict_["weight"]) loss = loss_class(**config_dict_) diff --git a/clinicadl/utils/factories.py b/clinicadl/utils/factories.py new file mode 100644 index 000000000..055507aaf --- /dev/null +++ b/clinicadl/utils/factories.py @@ -0,0 +1,34 @@ +import inspect +from enum import Enum +from typing import Any, Callable, Dict, List, Tuple + + +class DefaultFromLibrary(str, Enum): + YES = "DefaultFromLibrary" + + +def get_args_and_defaults(func: Callable) -> Tuple[List[str], Dict[str, Any]]: + """ + Gets the arguments of a function, as well as the default + values possibly attached to them. + + Parameters + ---------- + func : Callable + The function. + + Returns + ------- + List[str] + The names of the arguments. + Dict[str, Any] + The default values in a dict. + """ + signature = inspect.signature(func) + args = list(signature.parameters.keys()) + defaults = { + k: v.default + for k, v in signature.parameters.items() + if v.default is not inspect.Parameter.empty + } + return args, defaults diff --git a/tests/unittests/losses/test_config.py b/tests/unittests/losses/test_config.py index b18ba3e4c..6ba2de8dc 100644 --- a/tests/unittests/losses/test_config.py +++ b/tests/unittests/losses/test_config.py @@ -5,6 +5,7 @@ def test_LossConfig(): from clinicadl.losses import LossConfig LossConfig(reduction="none", p=2, weight=[0.1, 0.1, 0.8]) + LossConfig(loss="SmoothL1Loss", margin=10.0, delta=2.0, beta=3.0) with pytest.raises(ValueError): LossConfig(loss="abc") with pytest.raises(ValueError): diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py index f76e43199..9ddb2b70c 100644 --- a/tests/unittests/losses/test_factory.py +++ b/tests/unittests/losses/test_factory.py @@ -1,22 +1,27 @@ -import pytest +from torch import Tensor +from torch.nn import MultiMarginLoss +from clinicadl.losses import ImplementedLoss, LossConfig, get_loss_function -def test_get_loss_function(): - from torch.nn import MultiMarginLoss - - from clinicadl.losses import ImplementedLoss, LossConfig, get_loss_function +def test_get_loss_function(): for loss in [e.value for e in ImplementedLoss]: config = LossConfig(loss=loss) get_loss_function(config) - config = LossConfig(loss="MultiMarginLoss", reduction="sum", weight=[1, 2, 3]) + config = LossConfig(loss="MultiMarginLoss", reduction="sum", weight=[1, 2, 3], p=2) loss, config_dict = get_loss_function(config) assert isinstance(loss, MultiMarginLoss) + assert loss.reduction == "sum" + assert loss.p == 2 + assert loss.margin == 1.0 + assert (loss.weight == Tensor([1, 2, 3])).all() assert config_dict == { "loss": "MultiMarginLoss", "reduction": "sum", - "p": 1, + "p": 2, "margin": 1.0, "weight": [1, 2, 3], + "size_average": None, + "reduce": None, } diff --git a/tests/unittests/utils/test_factories_utils.py b/tests/unittests/utils/test_factories_utils.py new file mode 100644 index 000000000..4cb864150 --- /dev/null +++ b/tests/unittests/utils/test_factories_utils.py @@ -0,0 +1,10 @@ +from clinicadl.utils.factories import get_args_and_defaults + + +def test_get_default_args(): + def f(a, b="b", c=0, d=None): + return None + + args, defaults = get_args_and_defaults(f) + assert args == ["a", "b", "c", "d"] + assert defaults == {"b": "b", "c": 0, "d": None} From 3989db52b591d5ea82b5728772c334c2427c6fa9 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:55:34 +0200 Subject: [PATCH 41/43] add last params in config --- clinicadl/losses/config.py | 22 +++++++++++++++++++++- tests/unittests/losses/test_config.py | 7 ++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py index 0bd658577..26d5067ab 100644 --- a/clinicadl/losses/config.py +++ b/clinicadl/losses/config.py @@ -1,7 +1,14 @@ from enum import Enum from typing import List, Optional, Union -from pydantic import BaseModel, ConfigDict, NonNegativeFloat, PositiveFloat +from pydantic import ( + BaseModel, + ConfigDict, + NonNegativeFloat, + NonNegativeInt, + PositiveFloat, + field_validator, +) from clinicadl.utils.enum import BaseEnum from clinicadl.utils.factories import DefaultFromLibrary @@ -59,7 +66,20 @@ class LossConfig(BaseModel): weight: Union[ Optional[List[NonNegativeFloat]], DefaultFromLibrary ] = DefaultFromLibrary.YES + ignore_index: Union[NonNegativeInt, DefaultFromLibrary] = DefaultFromLibrary.YES + label_smoothing: Union[ + NonNegativeFloat, DefaultFromLibrary + ] = DefaultFromLibrary.YES # pydantic config model_config = ConfigDict( validate_assignment=True, use_enum_values=True, validate_default=True ) + + @field_validator("label_smoothing") + @classmethod + def validator_label_smoothing(cls, v): + if isinstance(v, float): + assert ( + 0 <= v <= 1 + ), f"label_smoothing must be between 0 and 1 but it has been set to {v}." + return v diff --git a/tests/unittests/losses/test_config.py b/tests/unittests/losses/test_config.py index 6ba2de8dc..d105a0da5 100644 --- a/tests/unittests/losses/test_config.py +++ b/tests/unittests/losses/test_config.py @@ -1,12 +1,17 @@ import pytest +from pydantic import ValidationError def test_LossConfig(): from clinicadl.losses import LossConfig LossConfig(reduction="none", p=2, weight=[0.1, 0.1, 0.8]) - LossConfig(loss="SmoothL1Loss", margin=10.0, delta=2.0, beta=3.0) + LossConfig( + loss="SmoothL1Loss", margin=10.0, delta=2.0, beta=3.0, label_smoothing=0.5 + ) with pytest.raises(ValueError): LossConfig(loss="abc") with pytest.raises(ValueError): LossConfig(weight=[0.1, -0.1, 0.8]) + with pytest.raises(ValidationError): + LossConfig(label_smoothing=1.1) From b8d0204b09cbe2cd0292897a77c1a29bb32c9a4a Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 2 Aug 2024 12:01:52 +0200 Subject: [PATCH 42/43] change dict to config and add losses --- clinicadl/losses/config.py | 45 ++++++++++++++++++++++---- clinicadl/losses/factory.py | 24 ++++++-------- tests/unittests/losses/test_config.py | 28 +++++++++++++--- tests/unittests/losses/test_factory.py | 17 ++++------ 4 files changed, 78 insertions(+), 36 deletions(-) diff --git a/clinicadl/losses/config.py b/clinicadl/losses/config.py index 26d5067ab..f476f8d35 100644 --- a/clinicadl/losses/config.py +++ b/clinicadl/losses/config.py @@ -5,9 +5,9 @@ BaseModel, ConfigDict, NonNegativeFloat, - NonNegativeInt, PositiveFloat, field_validator, + model_validator, ) from clinicadl.utils.enum import BaseEnum @@ -17,19 +17,24 @@ class ClassificationLoss(str, BaseEnum): """Losses that can be used only for classification.""" - CROSS_ENTROPY = "CrossEntropyLoss" - MULTI_MARGIN = "MultiMarginLoss" + CROSS_ENTROPY = "CrossEntropyLoss" # for multi-class classification, inputs are unormalized logits and targets are int (same dimension without the class channel) + MULTI_MARGIN = "MultiMarginLoss" # no particular restriction on the input, targets are int (same dimension without th class channel) + BCE = "BCELoss" # for binary classification, targets and inputs should be probabilities and have same shape + BCE_LOGITS = "BCEWithLogitsLoss" # for binary classification, targets should be probabilities and inputs logits, and have the same shape. More stable numerically -class ImplementedLoss(str, BaseEnum): +class ImplementedLoss(str, Enum): """Implemented losses in ClinicaDL.""" CROSS_ENTROPY = "CrossEntropyLoss" MULTI_MARGIN = "MultiMarginLoss" + BCE = "BCELoss" + BCE_LOGITS = "BCEWithLogitsLoss" L1 = "L1Loss" MSE = "MSELoss" HUBER = "HuberLoss" SMOOTH_L1 = "SmoothL1Loss" + KLDIV = "KLDivLoss" # if log_target=False, target must be positive @classmethod def _missing_(cls, value): @@ -65,11 +70,15 @@ class LossConfig(BaseModel): margin: Union[float, DefaultFromLibrary] = DefaultFromLibrary.YES weight: Union[ Optional[List[NonNegativeFloat]], DefaultFromLibrary - ] = DefaultFromLibrary.YES - ignore_index: Union[NonNegativeInt, DefaultFromLibrary] = DefaultFromLibrary.YES + ] = DefaultFromLibrary.YES # a weight for each class + ignore_index: Union[int, DefaultFromLibrary] = DefaultFromLibrary.YES label_smoothing: Union[ NonNegativeFloat, DefaultFromLibrary ] = DefaultFromLibrary.YES + log_target: Union[bool, DefaultFromLibrary] = DefaultFromLibrary.YES + pos_weight: Union[ + Optional[List[NonNegativeFloat]], DefaultFromLibrary + ] = DefaultFromLibrary.YES # a positive weight for each class # pydantic config model_config = ConfigDict( validate_assignment=True, use_enum_values=True, validate_default=True @@ -83,3 +92,27 @@ def validator_label_smoothing(cls, v): 0 <= v <= 1 ), f"label_smoothing must be between 0 and 1 but it has been set to {v}." return v + + @field_validator("ignore_index") + @classmethod + def validator_ignore_index(cls, v): + if isinstance(v, int): + assert ( + v == -100 or 0 <= v + ), "ignore_index must be a positive int (or -100 when disabled)." + return v + + @model_validator(mode="after") + def model_validator(self): + if ( + self.loss == ImplementedLoss.BCE_LOGITS + and self.weight is not None + and self.weight != DefaultFromLibrary.YES + ): + raise ValueError("Cannot use weight with BCEWithLogitsLoss.") + elif ( + self.loss == ImplementedLoss.BCE + and self.weight is not None + and self.weight != DefaultFromLibrary.YES + ): + raise ValueError("Cannot use weight with BCELoss.") diff --git a/clinicadl/losses/factory.py b/clinicadl/losses/factory.py index eab95bb4f..beec18cac 100644 --- a/clinicadl/losses/factory.py +++ b/clinicadl/losses/factory.py @@ -1,4 +1,3 @@ -import inspect from copy import deepcopy from typing import Any, Dict, Tuple @@ -9,7 +8,7 @@ from .config import LossConfig -def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, Any]]: +def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, LossConfig]: """ Factory function to get a loss function from PyTorch. @@ -22,9 +21,10 @@ def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, An ------- nn.Module The loss function. - Dict[str, Any] - The config dict with only the parameters relevant to the selected - loss function. + LossConfig + The updated config class: the arguments set to default will be updated + with their effective values (the default values from the library). + Useful for reproducibility. """ loss_class = getattr(torch.nn, config.loss) expected_args, config_dict = get_args_and_defaults(loss_class.__init__) @@ -35,16 +35,10 @@ def get_loss_function(config: LossConfig) -> Tuple[torch.nn.Module, Dict[str, An config_dict_ = deepcopy(config_dict) if "weight" in config_dict and config_dict["weight"] is not None: config_dict_["weight"] = torch.Tensor(config_dict_["weight"]) + if "pos_weight" in config_dict and config_dict["pos_weight"] is not None: + config_dict_["pos_weight"] = torch.Tensor(config_dict_["pos_weight"]) loss = loss_class(**config_dict_) - config_dict["loss"] = config.loss + updated_config = LossConfig(loss=config.loss, **config_dict) - return loss, config_dict - - -# TODO : what about them? -# "KLDivLoss", -# "BCEWithLogitsLoss", -# "VAEGaussianLoss", -# "VAEBernoulliLoss", -# "VAEContinuousBernoulliLoss", + return loss, updated_config diff --git a/tests/unittests/losses/test_config.py b/tests/unittests/losses/test_config.py index d105a0da5..cb00adbf3 100644 --- a/tests/unittests/losses/test_config.py +++ b/tests/unittests/losses/test_config.py @@ -1,17 +1,35 @@ import pytest from pydantic import ValidationError +from clinicadl.losses import LossConfig -def test_LossConfig(): - from clinicadl.losses import LossConfig - LossConfig(reduction="none", p=2, weight=[0.1, 0.1, 0.8]) - LossConfig( - loss="SmoothL1Loss", margin=10.0, delta=2.0, beta=3.0, label_smoothing=0.5 +def test_LossConfig(): + config = LossConfig( + loss="SmoothL1Loss", margin=10.0, delta=2.0, reduction="none", weight=None ) + assert config.loss == "SmoothL1Loss" + assert config.margin == 10.0 + assert config.delta == 2.0 + assert config.reduction == "none" + assert config.p == "DefaultFromLibrary" + with pytest.raises(ValueError): LossConfig(loss="abc") with pytest.raises(ValueError): LossConfig(weight=[0.1, -0.1, 0.8]) + with pytest.raises(ValueError): + LossConfig(p=3) + with pytest.raises(ValueError): + LossConfig(reduction="abc") with pytest.raises(ValidationError): LossConfig(label_smoothing=1.1) + with pytest.raises(ValidationError): + LossConfig(ignore_index=-1) + with pytest.raises(ValidationError): + LossConfig(loss="BCEWithLogitsLoss", weight=[1, 2, 3]) + with pytest.raises(ValidationError): + LossConfig(loss="BCELoss", weight=[1, 2, 3]) + + LossConfig(loss="BCELoss") + LossConfig(loss="BCEWithLogitsLoss", weight=None) diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py index 9ddb2b70c..b4a2c1880 100644 --- a/tests/unittests/losses/test_factory.py +++ b/tests/unittests/losses/test_factory.py @@ -10,18 +10,15 @@ def test_get_loss_function(): get_loss_function(config) config = LossConfig(loss="MultiMarginLoss", reduction="sum", weight=[1, 2, 3], p=2) - loss, config_dict = get_loss_function(config) + loss, updated_config = get_loss_function(config) assert isinstance(loss, MultiMarginLoss) assert loss.reduction == "sum" assert loss.p == 2 assert loss.margin == 1.0 assert (loss.weight == Tensor([1, 2, 3])).all() - assert config_dict == { - "loss": "MultiMarginLoss", - "reduction": "sum", - "p": 2, - "margin": 1.0, - "weight": [1, 2, 3], - "size_average": None, - "reduce": None, - } + + assert updated_config.loss == "MultiMarginLoss" + assert updated_config.reduction == "sum" + assert updated_config.p == 2 + assert updated_config.margin == 1.0 + assert updated_config.weight == [1, 2, 3] From 5490ab5239c4814e2ac34a1b49027470409e8239 Mon Sep 17 00:00:00 2001 From: thibaultdvx <154365476+thibaultdvx@users.noreply.github.com> Date: Fri, 2 Aug 2024 12:09:22 +0200 Subject: [PATCH 43/43] add unit test --- tests/unittests/losses/test_factory.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/unittests/losses/test_factory.py b/tests/unittests/losses/test_factory.py index b4a2c1880..5ac786deb 100644 --- a/tests/unittests/losses/test_factory.py +++ b/tests/unittests/losses/test_factory.py @@ -1,5 +1,5 @@ from torch import Tensor -from torch.nn import MultiMarginLoss +from torch.nn import BCEWithLogitsLoss, MultiMarginLoss from clinicadl.losses import ImplementedLoss, LossConfig, get_loss_function @@ -22,3 +22,9 @@ def test_get_loss_function(): assert updated_config.p == 2 assert updated_config.margin == 1.0 assert updated_config.weight == [1, 2, 3] + + config = LossConfig(loss="BCEWithLogitsLoss", pos_weight=[1, 2, 3]) + loss, updated_config = get_loss_function(config) + assert isinstance(loss, BCEWithLogitsLoss) + assert (loss.pos_weight == Tensor([1, 2, 3])).all() + assert updated_config.pos_weight == [1, 2, 3]