From 7e35b448737bbc18cc4060bdb014e4270cc9d581 Mon Sep 17 00:00:00 2001 From: camillebrianceau <57992134+camillebrianceau@users.noreply.github.com> Date: Mon, 24 Jun 2024 16:48:27 +0200 Subject: [PATCH] Some cleaning (#629) * remove check_gpu from cmdline utils and adapt qc to config --- clinicadl/cmdline.py | 2 +- .../modules_options/computational.py | 4 +- .../pipelines/quality_check/cli.py} | 0 .../quality_check/pet_linear/cli.py | 4 +- .../pipelines}/quality_check/t1_linear/cli.py | 35 +++++++++------- .../pipelines}/quality_check/t1_volume/cli.py | 2 +- clinicadl/config/config/computational.py | 10 +++-- .../quality_check/t1_linear/quality_check.py | 40 ++++++++----------- clinicadl/utils/cmdline_utils.py | 10 ----- clinicadl/utils/maps_manager/maps_manager.py | 7 ---- 10 files changed, 51 insertions(+), 63 deletions(-) rename clinicadl/{quality_check/qc_cli.py => commandline/pipelines/quality_check/cli.py} (100%) rename clinicadl/{ => commandline/pipelines}/quality_check/pet_linear/cli.py (94%) rename clinicadl/{ => commandline/pipelines}/quality_check/t1_linear/cli.py (66%) rename clinicadl/{ => commandline/pipelines}/quality_check/t1_volume/cli.py (89%) delete mode 100644 clinicadl/utils/cmdline_utils.py diff --git a/clinicadl/cmdline.py b/clinicadl/cmdline.py index f38d08ace..279788998 100644 --- a/clinicadl/cmdline.py +++ b/clinicadl/cmdline.py @@ -10,9 +10,9 @@ from clinicadl.commandline.pipelines.prepare_data.prepare_data_from_bids_cli import ( cli as prepare_data_from_bids_cli, ) +from clinicadl.commandline.pipelines.quality_check.cli import cli as qc_cli from clinicadl.commandline.pipelines.train.cli import cli as train_cli from clinicadl.hugging_face.hugging_face_cli import cli as hf_cli -from clinicadl.quality_check.qc_cli import cli as qc_cli from clinicadl.random_search.random_search_cli import cli as random_search_cli from clinicadl.tsvtools.cli import cli as tsvtools_cli from clinicadl.utils.logger import setup_logging diff --git a/clinicadl/commandline/modules_options/computational.py b/clinicadl/commandline/modules_options/computational.py index 82333e9c9..221d25f8a 100644 --- a/clinicadl/commandline/modules_options/computational.py +++ b/clinicadl/commandline/modules_options/computational.py @@ -19,8 +19,8 @@ "this flag is already set to FSDP to that the zero flag is never actually removed.", ) gpu = click.option( - "--gpu/--no-gpu", - default=get_default("gpu", ComputationalConfig), + "--no-gpu", + is_flag=True, help="Use GPU by default. Please specify `--no-gpu` to force using CPU.", show_default=True, ) diff --git a/clinicadl/quality_check/qc_cli.py b/clinicadl/commandline/pipelines/quality_check/cli.py similarity index 100% rename from clinicadl/quality_check/qc_cli.py rename to clinicadl/commandline/pipelines/quality_check/cli.py diff --git a/clinicadl/quality_check/pet_linear/cli.py b/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py similarity index 94% rename from clinicadl/quality_check/pet_linear/cli.py rename to clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py index c47226a20..f6104aebb 100644 --- a/clinicadl/quality_check/pet_linear/cli.py +++ b/clinicadl/commandline/pipelines/quality_check/pet_linear/cli.py @@ -47,7 +47,9 @@ def cli( """ from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig - from .quality_check import quality_check as pet_linear_qc + from .....quality_check.pet_linear.quality_check import ( + quality_check as pet_linear_qc, + ) config = CapsDatasetConfig.from_preprocessing_and_extraction_method( caps_directory=caps_directory, diff --git a/clinicadl/quality_check/t1_linear/cli.py b/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py similarity index 66% rename from clinicadl/quality_check/t1_linear/cli.py rename to clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py index 32c7b74d5..eff8900f8 100755 --- a/clinicadl/quality_check/t1_linear/cli.py +++ b/clinicadl/commandline/pipelines/quality_check/t1_linear/cli.py @@ -2,8 +2,11 @@ import click +from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig from clinicadl.commandline import arguments from clinicadl.commandline.modules_options import computational, data, dataloader +from clinicadl.config.config.computational import ComputationalConfig +from clinicadl.utils.enum import ExtractionMethod, Preprocessing @click.command(name="t1-linear", no_args_is_help=True) @@ -31,7 +34,6 @@ @click.option( "--use_tensor", type=bool, - default=False, is_flag=True, help="Flag allowing the pipeline to run on the extracted tensors and not on the nifti images", ) @@ -42,7 +44,7 @@ def cli( threshold, batch_size, n_proc, - gpu, + no_gpu, amp, network, use_tensor, @@ -54,23 +56,28 @@ def cli( OUTPUT_TSV is the path to the tsv file where results will be saved. """ - from clinicadl.utils.cmdline_utils import check_gpu - - if gpu: - check_gpu() + from clinicadl.quality_check.t1_linear.quality_check import ( + quality_check as linear_qc, + ) - from .quality_check import quality_check as linear_qc + computational_config = ComputationalConfig(amp=amp, gpu=not no_gpu) + config = CapsDatasetConfig.from_preprocessing_and_extraction_method( + caps_directory=caps_directory, + extraction=ExtractionMethod.IMAGE, + preprocessing_type=Preprocessing.T1_LINEAR, + preprocessing=Preprocessing.T1_LINEAR, + use_uncropped_image=use_uncropped_image, + data_tsv=participants_tsv, + n_proc=n_proc, + batch_size=batch_size, + use_tensor=use_tensor, + ) linear_qc( - caps_directory, output_path=results_tsv, - tsv_path=participants_tsv, threshold=threshold, - batch_size=batch_size, - n_proc=n_proc, - gpu=gpu, - amp=amp, network=network, use_tensor=use_tensor, - use_uncropped_image=use_uncropped_image, + config=config, + computational_config=computational_config, ) diff --git a/clinicadl/quality_check/t1_volume/cli.py b/clinicadl/commandline/pipelines/quality_check/t1_volume/cli.py similarity index 89% rename from clinicadl/quality_check/t1_volume/cli.py rename to clinicadl/commandline/pipelines/quality_check/t1_volume/cli.py index 67e064bb2..2e2ea3c18 100644 --- a/clinicadl/quality_check/t1_volume/cli.py +++ b/clinicadl/commandline/pipelines/quality_check/t1_volume/cli.py @@ -23,7 +23,7 @@ def cli( GROUP_LABEL is the group associated to the gray matter DARTEL template in CAPS_DIRECTORY. """ - from .quality_check import quality_check as volume_qc + from .....quality_check.t1_volume.quality_check import quality_check as volume_qc volume_qc( caps_directory, diff --git a/clinicadl/config/config/computational.py b/clinicadl/config/config/computational.py index 5112b48ef..fb765c7ad 100644 --- a/clinicadl/config/config/computational.py +++ b/clinicadl/config/config/computational.py @@ -3,7 +3,6 @@ from pydantic import BaseModel, ConfigDict, model_validator from typing_extensions import Self -from clinicadl.utils.cmdline_utils import check_gpu from clinicadl.utils.exceptions import ClinicaDLArgumentError logger = getLogger("clinicadl.computational_config") @@ -19,9 +18,14 @@ class ComputationalConfig(BaseModel): model_config = ConfigDict(validate_assignment=True) @model_validator(mode="after") - def validator_gpu(self) -> Self: + def check_gpu(self) -> Self: if self.gpu: - check_gpu() + import torch + + if not torch.cuda.is_available(): + raise ClinicaDLArgumentError( + "No GPU is available. To run on CPU, please set gpu to false or add the --no-gpu flag if you use the commandline." + ) elif self.amp: raise ClinicaDLArgumentError( "AMP is designed to work with modern GPUs. Please add the --gpu flag." diff --git a/clinicadl/quality_check/t1_linear/quality_check.py b/clinicadl/quality_check/t1_linear/quality_check.py index 598ae6f28..f0d82f980 100755 --- a/clinicadl/quality_check/t1_linear/quality_check.py +++ b/clinicadl/quality_check/t1_linear/quality_check.py @@ -4,6 +4,7 @@ from logging import getLogger from pathlib import Path +from typing import Optional import pandas as pd import torch @@ -11,9 +12,9 @@ from torch.utils.data import DataLoader from clinicadl.caps_dataset.caps_dataset_config import CapsDatasetConfig +from clinicadl.config.config.computational import ComputationalConfig from clinicadl.generate.generate_utils import load_and_check_tsv from clinicadl.utils.clinica_utils import RemoteFileStructure, fetch_file -from clinicadl.utils.enum import ExtractionMethod, Preprocessing from clinicadl.utils.exceptions import ClinicaDLArgumentError from .models import resnet_darq_qc_18 as darq_r18 @@ -25,17 +26,12 @@ def quality_check( - caps_dir: Path, + config: CapsDatasetConfig, output_path: Path, - tsv_path: Path = None, threshold: float = 0.5, - batch_size: int = 1, - n_proc: int = 0, - gpu: bool = True, - amp: bool = False, network: str = "darq", - use_tensor: bool = False, - use_uncropped_image: bool = True, + use_tensor: bool = True, + computational_config: Optional[ComputationalConfig] = None, ): """ Performs t1-linear quality-check @@ -64,16 +60,9 @@ def quality_check( To use uncropped images instead of the cropped ones. """ - + if computational_config is None: + computational_config = ComputationalConfig() logger = getLogger("clinicadl.quality_check") - config = CapsDatasetConfig.from_preprocessing_and_extraction_method( - caps_directory=caps_dir, - extraction=ExtractionMethod.IMAGE, - preprocessing_type=Preprocessing.T1_LINEAR, - preprocessing=Preprocessing.T1_LINEAR, - use_uncropped_image=use_uncropped_image, - data_tsv=tsv_path, - ) if output_path.suffix != ".tsv": raise ValueError("please enter a tsv path") @@ -124,10 +113,10 @@ def quality_check( logger.debug("Loading quality check model.") model.load_state_dict(torch.load(model_file)) model.eval() - if gpu: + if computational_config.gpu: logger.debug("Working on GPU.") model = model.cuda() - elif amp: + elif computational_config.amp: raise ClinicaDLArgumentError( "AMP is designed to work with modern GPUs. Please add the --gpu flag." ) @@ -139,12 +128,15 @@ def quality_check( # Load DataFrame logger.debug("Loading data to check.") config.data.data_df = load_and_check_tsv( - tsv_path, caps_dict, output_path.resolve().parent + config.data.data_tsv, caps_dict, output_path.resolve().parent ) dataset = QCDataset(config, use_extracted_tensors=use_tensor) dataloader = DataLoader( - dataset, num_workers=n_proc, batch_size=batch_size, pin_memory=True + dataset, + num_workers=config.dataloader.n_proc, + batch_size=config.dataloader.batch_size, + pin_memory=True, ) columns = ["participant_id", "session_id", "pass_probability", "pass"] @@ -159,9 +151,9 @@ def quality_check( for data in dataloader: logger.debug(f"Processing subject {data['participant_id']}.") inputs = data["image"] - if gpu: + if computational_config.gpu: inputs = inputs.cuda() - with autocast(enabled=amp): + with autocast(enabled=computational_config.amp): outputs = softmax(model(inputs)) # We cast back to 32bits. It should be a no-op as softmax is not eligible # to fp16 and autocast is forbidden on CPU (output would be bf16 otherwise). diff --git a/clinicadl/utils/cmdline_utils.py b/clinicadl/utils/cmdline_utils.py deleted file mode 100644 index 4c31684c1..000000000 --- a/clinicadl/utils/cmdline_utils.py +++ /dev/null @@ -1,10 +0,0 @@ -from clinicadl.utils.exceptions import ClinicaDLArgumentError - - -def check_gpu(): - import torch - - if not torch.cuda.is_available(): - raise ClinicaDLArgumentError( - "No GPU is available. To run on CPU, please set gpu to false or add the --no-gpu flag if you use the commandline." - ) diff --git a/clinicadl/utils/maps_manager/maps_manager.py b/clinicadl/utils/maps_manager/maps_manager.py index 25601aefd..9888af3f8 100644 --- a/clinicadl/utils/maps_manager/maps_manager.py +++ b/clinicadl/utils/maps_manager/maps_manager.py @@ -15,7 +15,6 @@ ) from clinicadl.caps_dataset.extraction.utils import path_encoder from clinicadl.transforms.config import TransformsConfig -from clinicadl.utils.cmdline_utils import check_gpu from clinicadl.utils.exceptions import ( ClinicaDLArgumentError, ClinicaDLConfigurationError, @@ -423,12 +422,6 @@ def _check_args(self, parameters): f"No value was given for {arg}." ) self.parameters = add_default_values(parameters) - if self.parameters["gpu"]: - check_gpu() - elif self.parameters["amp"]: - raise ClinicaDLArgumentError( - "AMP is designed to work with modern GPUs. Please add the --gpu flag." - ) transfo_config = TransformsConfig( normalize=self.normalize,