Skip to content

Commit

Permalink
MAINT: Use ruff for linting and formatting (aramis-lab#1032)
Browse files Browse the repository at this point in the history
* MAINT: Replace black, flake8 and isort by ruff

* MAINT: Apply fixes suggested by ruff

* STY: Drop duplicate module import
  • Loading branch information
ghisvail authored Dec 14, 2023
1 parent 1916259 commit f1079e0
Show file tree
Hide file tree
Showing 11 changed files with 47 additions and 68 deletions.
18 changes: 5 additions & 13 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,12 @@ repos:
rev: v4.4.0
hooks:
- id: check-yaml
- repo: https://github.com/psf/black-pre-commit-mirror
rev: '23.11.0'
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.7
hooks:
- id: black
- repo: https://github.com/PyCQA/isort
rev: '5.12.0'
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: '6.1.0'
hooks:
- id: flake8
additional_dependencies:
- flake8-pyproject
- id: ruff
args: [ --fix ]
- id: ruff-format
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
Expand Down
2 changes: 1 addition & 1 deletion clinica/iotools/bids_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def create_scans_dict(
# Some flutemeta lines contain a non-coded string value at the second-to-last position. This value
# contains a comma which adds an extra column and shifts the remaining values to the right. In this
# case, we just remove the erroneous content and replace it with -4 which AIBL uses as n/a value.
on_bad_lines = (
on_bad_lines = ( # noqa: E731
lambda bad_line: bad_line[:-3] + [-4, bad_line[-1]]
if "flutemeta" in file_path and study_name == "AIBL"
else "error"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,11 +245,11 @@ def build_core_nodes(self):
import nipype.interfaces.mrtrix3 as mrtrix3
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as npe
from nipype.interfaces.mrtrix.preprocess import MRTransform
from nipype.interfaces.mrtrix3 import (
ConstrainedSphericalDeconvolution,
Tractography,
)
from nipype.interfaces.mrtrix.preprocess import MRTransform

import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils
from clinica.utils.exceptions import ClinicaCAPSError
Expand Down
2 changes: 1 addition & 1 deletion clinica/pipelines/dwi_dti/dwi_dti_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,8 +213,8 @@ def build_core_nodes(self):
import nipype.interfaces.utility as nutil
import nipype.pipeline.engine as npe
from nipype.interfaces.ants import ApplyTransforms, RegistrationSynQuick
from nipype.interfaces.mrtrix3 import TensorMetrics
from nipype.interfaces.mrtrix.preprocess import DWI2Tensor
from nipype.interfaces.mrtrix3 import TensorMetrics

from clinica.utils.check_dependency import check_environment_variable
from clinica.utils.dwi import extract_bids_identifier_from_filename
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,6 @@ def build_core_nodes(self):
import nipype.interfaces.fsl as fsl
import nipype.interfaces.mrtrix3 as mrtrix3
import nipype.interfaces.utility as nutil
import nipype.interfaces.utility as niu
import nipype.pipeline.engine as npe

from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_workflows import (
Expand Down Expand Up @@ -341,7 +340,7 @@ def build_core_nodes(self):
# =======================
# Compute average b0 on corrected dataset (for brain mask extraction)
compute_avg_b0 = npe.Node(
niu.Function(
nutil.Function(
input_names=["in_dwi", "in_bval"],
output_names=["out_b0_average"],
function=compute_average_b0,
Expand Down
4 changes: 2 additions & 2 deletions clinica/pipelines/machine_learning/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,9 +457,9 @@ def _select_best_parameter(self, async_result):
best_min_samples_split = int(round(np.mean([x[2] for x in params_list])))

def max_feature_to_float(m):
if type(m) is float:
if isinstance(m, float):
return m
if type(m) is int:
if isinstance(m, int):
return float(m) / float(self._x.shape[1])
if m == "auto" or m == "sqrt":
return np.sqrt(self._x.shape[1]) / float(self._x.shape[1])
Expand Down
4 changes: 1 addition & 3 deletions clinica/pipelines/machine_learning/vertex_based_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ def load_data(mgh_list):

# Construct 0-matrix with the good size, based on the size of the surfaces
# provided by the first subject
N_vertex = (
[]
) # array containing the surface size of the different surfaces of a subject
N_vertex = [] # array containing the surface size of the different surfaces of a subject
sample = mgh_list[0]
for i in range(len(sample)):
N_vertex.append(np.max(nib.load(sample[i]).header.get_data_shape()))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,8 @@ def roots_poly(C):
]
)
# two roots
rts1 = (-C[1, :] + delta) * (1 / ((2 * C[0, :])))
rts2 = (-C[1, :] - delta) * (1 / ((2 * C[0, :])))
rts1 = (-C[1, :] + delta) * (1 / (2 * C[0, :]))
rts2 = (-C[1, :] - delta) * (1 / (2 * C[0, :]))
rts = np.array([rts1, rts2])

elif C.shape[0] < 5:
Expand Down
39 changes: 13 additions & 26 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -93,35 +93,22 @@ clinica = "clinica.cmdline:main"
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

[tool.black]
[tool.ruff]
target-version = "py38"
line-length = 88
target-version = ['py36', 'py37', 'py38']
include = '\.pyi?$'
force-exclude = '''
/(
\.eggs
| \.git
| \.hg
| \.mypy_cache
| \.tox
| \.venv
| \.pytest_cache
| _build
| buck-out
| build
| dist
| docs
| clinica/lib
)/
'''

[tool.isort]
profile = "black"
[tool.ruff.lint]
select = ["E", "W", "I001"]
ignore = ["E203", "E501"]

[tool.flake8]
max-line-length = 88
select = ["E", "W"]
extend-ignore = ["E203", "E501", "W503"]
[tool.ruff.lint.isort]
known-first-party = ["clinica"]

[tool.ruff.format]
quote-style = "double"
indent-style = "space"
skip-magic-trailing-comma = false
line-ending = "auto"

[tool.codespell]
summary = ''
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -360,22 +360,25 @@ def test_prepare_reference_b0(tmp_path, mocker):
assert reference_dataset.dwi == tmp_path / "sub-foo_ses-bar_dwi_merged.nii.gz"
assert reference_dataset.b_values == tmp_path / "sub-foo_ses-bar_dwi_merged.bval"
assert reference_dataset.b_vectors == tmp_path / "sub-foo_ses-bar_dwi_merged.bvec"
assert sorted([p.name for p in tmp_path.iterdir()]) == [
"reference_b0_volume.nii.gz", # This is the 3D volume corresponding to the co-registered volumes for b<=low_b
"sub-foo_ses-bar_dwi.bval", # Initial bvalue file
"sub-foo_ses-bar_dwi.bvec", # Initial bvectors file
"sub-foo_ses-bar_dwi.nii.gz", # Initial dwi image file
"sub-foo_ses-bar_dwi_large_b.bval", # bvalue file corresponding to DWI volumes with b>low_b
"sub-foo_ses-bar_dwi_large_b.bvec", # bvectors file corresponding to DWI volumes with b>low_b
"sub-foo_ses-bar_dwi_large_b.nii.gz", # DWI image file holding volumes for which b>low_b
"sub-foo_ses-bar_dwi_merged.bval", # bvalue file corresponding to the merged DWI dataset
"sub-foo_ses-bar_dwi_merged.bvec", # bvectors file corresponding to the merged DWI dataset
"sub-foo_ses-bar_dwi_merged.nii.gz", # image file holding the merged DWI volumes
"sub-foo_ses-bar_dwi_small_b.bval", # bvalue file corresponding to the volumes for which b<=low_b
"sub-foo_ses-bar_dwi_small_b.bvec", # bvectors file corresponding to the volumes for which b<=low_b
"sub-foo_ses-bar_dwi_small_b.nii.gz", # DWI image file holding volumes for which b<=low_b
"tmp", # Working directory containing all the stuff generated by b0_flirt_pipeline
]
assert (
sorted([p.name for p in tmp_path.iterdir()])
== [
"reference_b0_volume.nii.gz", # This is the 3D volume corresponding to the co-registered volumes for b<=low_b
"sub-foo_ses-bar_dwi.bval", # Initial bvalue file
"sub-foo_ses-bar_dwi.bvec", # Initial bvectors file
"sub-foo_ses-bar_dwi.nii.gz", # Initial dwi image file
"sub-foo_ses-bar_dwi_large_b.bval", # bvalue file corresponding to DWI volumes with b>low_b
"sub-foo_ses-bar_dwi_large_b.bvec", # bvectors file corresponding to DWI volumes with b>low_b
"sub-foo_ses-bar_dwi_large_b.nii.gz", # DWI image file holding volumes for which b>low_b
"sub-foo_ses-bar_dwi_merged.bval", # bvalue file corresponding to the merged DWI dataset
"sub-foo_ses-bar_dwi_merged.bvec", # bvectors file corresponding to the merged DWI dataset
"sub-foo_ses-bar_dwi_merged.nii.gz", # image file holding the merged DWI volumes
"sub-foo_ses-bar_dwi_small_b.bval", # bvalue file corresponding to the volumes for which b<=low_b
"sub-foo_ses-bar_dwi_small_b.bvec", # bvectors file corresponding to the volumes for which b<=low_b
"sub-foo_ses-bar_dwi_small_b.nii.gz", # DWI image file holding volumes for which b<=low_b
"tmp", # Working directory containing all the stuff generated by b0_flirt_pipeline
]
)
ref_b0_volume = nib.load(tmp_path / "reference_b0_volume.nii.gz")
assert_array_equal(ref_b0_volume.get_fdata(), 5.0 * np.ones((5, 5, 5, 1)))
large_b_image = nib.load(tmp_path / "sub-foo_ses-bar_dwi_large_b.nii.gz")
Expand Down
2 changes: 1 addition & 1 deletion test/unittests/utils/test_freesurfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
import pandas as pd
import pytest

from clinica.utils.freesurfer import _get_prefix # noqa
from clinica.utils.freesurfer import (
ColumnType,
InfoType,
_get_prefix, # noqa
extract_image_id_from_longitudinal_segmentation,
)

Expand Down

0 comments on commit f1079e0

Please sign in to comment.