From 4a4df326d032807d53bd16443504251a8ae2f6de Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 17 Apr 2024 21:34:57 +0200 Subject: [PATCH 1/7] MNT: Fix deprectation error in CI (#866) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit matplotlib.cm.get_cmap → matplotlib.colormaps https://github.com/matplotlib/matplotlib/pull/23668 Fixes #865. --- niworkflows/viz/plots.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/niworkflows/viz/plots.py b/niworkflows/viz/plots.py index b1ffb219097..c194307d1ad 100644 --- a/niworkflows/viz/plots.py +++ b/niworkflows/viz/plots.py @@ -27,6 +27,7 @@ import pandas as pd import matplotlib.pyplot as plt +from matplotlib import colormaps from matplotlib import gridspec as mgs import matplotlib.cm as cm from matplotlib.colors import Normalize @@ -200,9 +201,9 @@ def plot_carpet( legend = False if cmap is None: - colors = cm.get_cmap("tab10").colors + colors = colormaps["tab10"].colors elif cmap == "paired": - colors = list(cm.get_cmap("Paired").colors) + colors = list(colormaps["Paired"].colors) colors[0], colors[1] = colors[1], colors[0] colors[2], colors[7] = colors[7], colors[2] @@ -397,7 +398,7 @@ def spikesplot( ntsteps = ts_z.shape[1] # Load a colormap - my_cmap = cm.get_cmap(cmap) + my_cmap = colormaps[cmap] norm = Normalize(vmin=0, vmax=float(nslices - 1)) colors = [my_cmap(norm(sl)) for sl in range(nslices)] @@ -525,7 +526,7 @@ def spikesplot_cb(position, cmap="viridis", fig=None): cax = fig.add_axes(position) cb = ColorbarBase( cax, - cmap=cm.get_cmap(cmap), + cmap=colormaps[cmap], spacing="proportional", orientation="horizontal", drawedges=False, From 37b53d2c857a05e38e36fbc5f8c0335e7767bd15 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 17 Apr 2024 21:35:30 +0200 Subject: [PATCH 2/7] STY: Apply assorted ruff/refurb rules (#867) * STY: Apply ruff/refurb rule (FURB118) FURB118 Use `operator.or_` instead of defining a lambda FURB118 Use `operator.add` instead of defining a lambda * STY: Apply ruff/refurb rule (FURB148) FURB148 `enumerate` index is unused, use `for x in y` instead * STY: Apply ruff/refurb rule (FURB168) FURB168 Prefer `is` operator over `isinstance` to check if an object is `None` --- niworkflows/interfaces/confounds.py | 11 ++++++----- niworkflows/utils/connections.py | 2 +- niworkflows/viz/plots.py | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/niworkflows/interfaces/confounds.py b/niworkflows/interfaces/confounds.py index 38df70f8fb2..6fa60c86f1c 100644 --- a/niworkflows/interfaces/confounds.py +++ b/niworkflows/interfaces/confounds.py @@ -24,6 +24,7 @@ import os import re import numpy as np +import operator import pandas as pd from functools import reduce from collections import deque, OrderedDict @@ -295,7 +296,7 @@ def spike_regressors( mask[metric] = set(np.where(data[metric] < threshold)[0]) elif criterion == ">": mask[metric] = set(np.where(data[metric] > threshold)[0]) - mask = reduce((lambda x, y: x | y), mask.values()) + mask = reduce(operator.or_, mask.values()) for lag in lags: mask = set([m + lag for m in mask]) | mask @@ -362,7 +363,7 @@ def temporal_derivatives(order, variables, data): variables_deriv[o] = ["{}_derivative{}".format(v, o) for v in variables] data_deriv[o] = np.tile(np.nan, data[variables].shape) data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0) - variables_deriv = reduce((lambda x, y: x + y), variables_deriv.values()) + variables_deriv = reduce(operator.add, variables_deriv.values()) data_deriv = pd.DataFrame( columns=variables_deriv, data=np.concatenate([*data_deriv.values()], axis=1) ) @@ -404,7 +405,7 @@ def exponential_terms(order, variables, data): for o in order: variables_exp[o] = ["{}_power{}".format(v, o) for v in variables] data_exp[o] = data[variables] ** o - variables_exp = reduce((lambda x, y: x + y), variables_exp.values()) + variables_exp = reduce(operator.add, variables_exp.values()) data_exp = pd.DataFrame( columns=variables_exp, data=np.concatenate([*data_exp.values()], axis=1) ) @@ -570,7 +571,7 @@ def _unscramble_regressor_columns(parent_data, data): var[col].appendleft(c) else: var[col].append(c) - unscrambled = reduce((lambda x, y: x + y), var.values()) + unscrambled = reduce(operator.add, var.values()) return data[[*unscrambled]] @@ -649,7 +650,7 @@ def parse_formula(model_formula, parent_data, unscramble=False): (variables[expression], data[expression]) = parse_expression( expression, parent_data ) - variables = list(set(reduce((lambda x, y: x + y), variables.values()))) + variables = list(set(reduce(operator.add, variables.values()))) data = pd.concat((data.values()), axis=1) if unscramble: diff --git a/niworkflows/utils/connections.py b/niworkflows/utils/connections.py index ba98ef0aa46..f579ae83607 100644 --- a/niworkflows/utils/connections.py +++ b/niworkflows/utils/connections.py @@ -71,7 +71,7 @@ def listify(value): """ from pathlib import Path from nipype.interfaces.base import isdefined - if not isdefined(value) or isinstance(value, type(None)): + if not isdefined(value) or value is None: return value if isinstance(value, (str, bytes, Path)): return [str(value)] diff --git a/niworkflows/viz/plots.py b/niworkflows/viz/plots.py index c194307d1ad..a4659ed5b58 100644 --- a/niworkflows/viz/plots.py +++ b/niworkflows/viz/plots.py @@ -692,7 +692,7 @@ def confoundplot( if cutoff is None: cutoff = [] - for i, thr in enumerate(cutoff): + for thr in cutoff: ax_ts.plot((0, ntsteps - 1), [thr] * 2, linewidth=0.2, color="dimgray") ax_ts.annotate( From fd29274589a7590a16a5ea25ffb5a35c5fd9a650 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 17 Apr 2024 21:52:15 +0200 Subject: [PATCH 3/7] STY: Remove extraneous quotes (#859) Left over by black when folding multiple lines into a single line. See #518. --- niworkflows/interfaces/confounds.py | 4 ++-- niworkflows/interfaces/header.py | 2 +- niworkflows/interfaces/images.py | 2 +- niworkflows/interfaces/plotting.py | 4 ++-- niworkflows/interfaces/reportlets/segmentation.py | 4 ++-- niworkflows/interfaces/utility.py | 4 ++-- niworkflows/interfaces/workbench.py | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/niworkflows/interfaces/confounds.py b/niworkflows/interfaces/confounds.py index 6fa60c86f1c..0a7e7c1ac7b 100644 --- a/niworkflows/interfaces/confounds.py +++ b/niworkflows/interfaces/confounds.py @@ -155,7 +155,7 @@ class _SpikeRegressorsInputSpec(BaseInterfaceInputSpec): dvars_thresh = traits.Float( 1.5, usedefault=True, - desc="Minimum standardised DVARS threshold for flagging a frame as " "a spike.", + desc="Minimum standardised DVARS threshold for flagging a frame as a spike.", ) header_prefix = traits.Str( "motion_outlier", @@ -166,7 +166,7 @@ class _SpikeRegressorsInputSpec(BaseInterfaceInputSpec): traits.Int, value=[0], usedefault=True, - desc="Relative indices of lagging frames to flag for " "each flagged frame", + desc="Relative indices of lagging frames to flag for each flagged frame", ) minimum_contiguous = traits.Either( None, diff --git a/niworkflows/interfaces/header.py b/niworkflows/interfaces/header.py index 1be07bd0495..18808ed6a5b 100644 --- a/niworkflows/interfaces/header.py +++ b/niworkflows/interfaces/header.py @@ -382,7 +382,7 @@ class _SanitizeImageInputSpec(BaseInterfaceInputSpec): max_32bit = traits.Bool( False, usedefault=True, - desc="cast data to float32 if higher " "precision is encountered", + desc="cast data to float32 if higher precision is encountered", ) diff --git a/niworkflows/interfaces/images.py b/niworkflows/interfaces/images.py index aafb791fb4b..c1b2eaee76c 100644 --- a/niworkflows/interfaces/images.py +++ b/niworkflows/interfaces/images.py @@ -672,7 +672,7 @@ class _SignalExtractionInputSpec(BaseInterfaceInputSpec): "signals.tsv", usedefault=True, exists=False, - desc="The name of the file to output to. " "signals.tsv by default", + desc="The name of the file to output to. signals.tsv by default", ) diff --git a/niworkflows/interfaces/plotting.py b/niworkflows/interfaces/plotting.py index 79fe0a29042..0961f3cb5fb 100644 --- a/niworkflows/interfaces/plotting.py +++ b/niworkflows/interfaces/plotting.py @@ -120,7 +120,7 @@ class _CompCorVariancePlotInputSpec(BaseInterfaceInputSpec): metadata_files = traits.List( File(exists=True), mandatory=True, - desc="List of files containing component " "metadata", + desc="List of files containing component metadata", ) metadata_sources = traits.List( traits.Str, @@ -133,7 +133,7 @@ class _CompCorVariancePlotInputSpec(BaseInterfaceInputSpec): traits.Float(0.7), traits.Float(0.9), usedefault=True, - desc="Levels of explained variance to include in " "plot", + desc="Levels of explained variance to include in plot", ) out_file = traits.Either( None, File, value=None, usedefault=True, desc="Path to save plot" diff --git a/niworkflows/interfaces/reportlets/segmentation.py b/niworkflows/interfaces/reportlets/segmentation.py index 072c0874497..918372771e0 100644 --- a/niworkflows/interfaces/reportlets/segmentation.py +++ b/niworkflows/interfaces/reportlets/segmentation.py @@ -113,7 +113,7 @@ class _MELODICInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.model.MELODICInpu out_report = File( "melodic_reportlet.svg", usedefault=True, - desc="Filename for the visual" " report generated " "by Nipype.", + desc="Filename for the visual report generated by Nipype.", ) report_mask = File( desc="Mask used to draw the outline on the reportlet. " @@ -197,7 +197,7 @@ class _ICA_AROMAInputSpecRPT( out_report = File( "ica_aroma_reportlet.svg", usedefault=True, - desc="Filename for the visual" " report generated " "by Nipype.", + desc="Filename for the visual report generated by Nipype.", ) report_mask = File( desc="Mask used to draw the outline on the reportlet. " diff --git a/niworkflows/interfaces/utility.py b/niworkflows/interfaces/utility.py index fdca1f23a5b..b0c6684dd5d 100644 --- a/niworkflows/interfaces/utility.py +++ b/niworkflows/interfaces/utility.py @@ -445,12 +445,12 @@ class _TSV2JSONInputSpec(BaseInterfaceInputSpec): None, traits.List(), usedefault=True, - desc="List of columns in the TSV to be " "dropped from the JSON.", + desc="List of columns in the TSV to be dropped from the JSON.", ) enforce_case = traits.Bool( True, usedefault=True, - desc="Enforce snake case for top-level keys " "and camel case for nested keys", + desc="Enforce snake case for top-level keys and camel case for nested keys", ) diff --git a/niworkflows/interfaces/workbench.py b/niworkflows/interfaces/workbench.py index fb33e2e602c..a2d3a8639e0 100644 --- a/niworkflows/interfaces/workbench.py +++ b/niworkflows/interfaces/workbench.py @@ -282,7 +282,7 @@ def _format_arg(self, opt, spec, val): if opt in ["current_area", "new_area"]: if not self.inputs.area_surfs and not self.inputs.area_metrics: raise ValueError( - "{} was set but neither area_surfs or" " area_metrics were set".format(opt) + "{} was set but neither area_surfs or area_metrics were set".format(opt) ) if opt == "method": if ( @@ -290,7 +290,7 @@ def _format_arg(self, opt, spec, val): and not self.inputs.area_surfs and not self.inputs.area_metrics ): - raise ValueError("Exactly one of area_surfs or area_metrics" " must be specified") + raise ValueError("Exactly one of area_surfs or area_metrics must be specified") if opt == "valid_roi_out" and val: # generate a filename and add it to argstr roi_out = self._gen_filename(self.inputs.in_file, suffix="_roi") From 36775f2f5dcd866a8cf71e57cf53fa31a713954f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Wed, 17 Apr 2024 21:52:49 +0200 Subject: [PATCH 4/7] MNT: update vendored docs files (#834) * MNT: update vendored docs files Copied from numpydoc 1.7.0. * MNT: Ignore vendored files in CI spellchecking --- .codespellrc | 2 +- docs/sphinxext/docscrape.py | 41 ++++++++++++++++++-------- docs/sphinxext/docscrape_sphinx.py | 38 ++++++++++-------------- docs/sphinxext/numpydoc.py | 47 ++++++++++++++++++++---------- 4 files changed, 77 insertions(+), 51 deletions(-) diff --git a/.codespellrc b/.codespellrc index 0aac70a604c..c3df6c4ef1d 100644 --- a/.codespellrc +++ b/.codespellrc @@ -1,5 +1,5 @@ [codespell] -skip = .git,*.pdf,*.svg,viz-report.html +skip = .git,*.pdf,*.svg,numpydoc.py,viz-report.html # objekt - used in the code purposefully different from object # nd - import scipy.ndimage as nd ignore-words-list = objekt,nd diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py index e5c07f59ded..fb3a0b6347e 100644 --- a/docs/sphinxext/docscrape.py +++ b/docs/sphinxext/docscrape.py @@ -1,6 +1,7 @@ """Extract reference documentation from the NumPy source tree. """ + import inspect import textwrap import re @@ -11,12 +12,7 @@ import copy import sys - -# TODO: Remove try-except when support for Python 3.7 is dropped -try: - from functools import cached_property -except ImportError: # cached_property added in Python 3.8 - cached_property = property +from functools import cached_property def strip_blank_lines(l): @@ -408,7 +404,7 @@ def _parse(self): msg = "Docstring contains a Receives section but not Yields." raise ValueError(msg) - for (section, content) in sections: + for section, content in sections: if not section.startswith(".."): section = (s.capitalize() for s in section.split(" ")) section = " ".join(section) @@ -631,7 +627,6 @@ def __init__(self, obj, doc=None, config=None): class ClassDoc(NumpyDocString): - extra_public_methods = ["__call__"] def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None): @@ -711,6 +706,7 @@ def properties(self): for name, func in inspect.getmembers(self._cls) if ( not name.startswith("_") + and not self._should_skip_member(name, self._cls) and ( func is None or isinstance(func, (property, cached_property)) @@ -720,6 +716,19 @@ def properties(self): ) ] + @staticmethod + def _should_skip_member(name, klass): + if ( + # Namedtuples should skip everything in their ._fields as the + # docstrings for each of the members is: "Alias for field number X" + issubclass(klass, tuple) + and hasattr(klass, "_asdict") + and hasattr(klass, "_fields") + and name in klass._fields + ): + return True + return False + def _is_show_member(self, name): if self.show_inherited_members: return True # show all class members @@ -728,7 +737,15 @@ def _is_show_member(self, name): return True -def get_doc_object(obj, what=None, doc=None, config=None): +def get_doc_object( + obj, + what=None, + doc=None, + config=None, + class_doc=ClassDoc, + func_doc=FunctionDoc, + obj_doc=ObjDoc, +): if what is None: if inspect.isclass(obj): what = "class" @@ -742,10 +759,10 @@ def get_doc_object(obj, what=None, doc=None, config=None): config = {} if what == "class": - return ClassDoc(obj, func_doc=FunctionDoc, doc=doc, config=config) + return class_doc(obj, func_doc=func_doc, doc=doc, config=config) elif what in ("function", "method"): - return FunctionDoc(obj, doc=doc, config=config) + return func_doc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) - return ObjDoc(obj, doc, config=config) + return obj_doc(obj, doc, config=config) diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py index 9a62cff9ce7..771c1ea445d 100644 --- a/docs/sphinxext/docscrape_sphinx.py +++ b/docs/sphinxext/docscrape_sphinx.py @@ -11,6 +11,7 @@ from sphinx.jinja2glue import BuiltinTemplateLoader from .docscrape import NumpyDocString, FunctionDoc, ClassDoc, ObjDoc +from .docscrape import get_doc_object as get_doc_object_orig from .xref import make_xref @@ -372,9 +373,11 @@ def __str__(self, indent=0, func_role="obj"): "notes": self._str_section("Notes"), "references": self._str_references(), "examples": self._str_examples(), - "attributes": self._str_param_list("Attributes", fake_autosummary=True) - if self.attributes_as_param_list - else self._str_member_list("Attributes"), + "attributes": ( + self._str_param_list("Attributes", fake_autosummary=True) + if self.attributes_as_param_list + else self._str_member_list("Attributes") + ), "methods": self._str_member_list("Methods"), } ns = {k: "\n".join(v) for k, v in ns.items()} @@ -407,20 +410,10 @@ def __init__(self, obj, doc=None, config=None): ObjDoc.__init__(self, obj, doc=doc, config=config) -# TODO: refactor to use docscrape.get_doc_object def get_doc_object(obj, what=None, doc=None, config=None, builder=None): - if what is None: - if inspect.isclass(obj): - what = "class" - elif inspect.ismodule(obj): - what = "module" - elif isinstance(obj, Callable): - what = "function" - else: - what = "object" - if config is None: config = {} + template_dirs = [os.path.join(os.path.dirname(__file__), "templates")] if builder is not None: template_loader = BuiltinTemplateLoader() @@ -430,11 +423,12 @@ def get_doc_object(obj, what=None, doc=None, config=None, builder=None): template_env = SandboxedEnvironment(loader=template_loader) config["template"] = template_env.get_template("numpydoc_docstring.rst") - if what == "class": - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) - elif what in ("function", "method"): - return SphinxFunctionDoc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return SphinxObjDoc(obj, doc, config=config) + return get_doc_object_orig( + obj, + what=what, + doc=doc, + config=config, + class_doc=SphinxClassDoc, + func_doc=SphinxFunctionDoc, + obj_doc=SphinxObjDoc, + ) diff --git a/docs/sphinxext/numpydoc.py b/docs/sphinxext/numpydoc.py index e5bc563d444..3513f95c987 100644 --- a/docs/sphinxext/numpydoc.py +++ b/docs/sphinxext/numpydoc.py @@ -16,6 +16,7 @@ .. [1] https://github.com/numpy/numpydoc """ + from copy import deepcopy import re import pydoc @@ -24,17 +25,17 @@ import hashlib import itertools -from docutils.nodes import citation, Text, section, comment, reference +from docutils.nodes import citation, Text, section, comment, reference, inline import sphinx from sphinx.addnodes import pending_xref, desc_content from sphinx.util import logging from sphinx.errors import ExtensionError -if sphinx.__version__ < "4.2": - raise RuntimeError("Sphinx 4.2 or newer is required") +if sphinx.__version__ < "5": + raise RuntimeError("Sphinx 5 or newer is required") from .docscrape_sphinx import get_doc_object -from .validate import validate, ERROR_MSGS +from .validate import validate, ERROR_MSGS, get_validation_checks from .xref import DEFAULT_LINKS from . import __version__ @@ -149,6 +150,10 @@ def clean_backrefs(app, doc, docname): for ref in _traverse_or_findall(doc, reference, descend=True): for id_ in ref["ids"]: known_ref_ids.add(id_) + # some extensions produce backrefs to inline elements + for ref in _traverse_or_findall(doc, inline, descend=True): + for id_ in ref["ids"]: + known_ref_ids.add(id_) for citation_node in _traverse_or_findall(doc, citation, descend=True): # remove backrefs to non-existent refs citation_node["backrefs"] = [ @@ -207,7 +212,19 @@ def mangle_docstrings(app, what, name, obj, options, lines): # TODO: Currently, all validation checks are run and only those # selected via config are reported. It would be more efficient to # only run the selected checks. - errors = validate(doc)["errors"] + report = validate(doc) + errors = [ + err + for err in report["errors"] + if not ( + ( + overrides := app.config.numpydoc_validation_overrides.get( + err[0] + ) + ) + and re.search(overrides, report["docstring"]) + ) + ] if {err[0] for err in errors} & app.config.numpydoc_validation_checks: msg = ( f"[numpydoc] Validation warnings while processing " @@ -285,6 +302,7 @@ def setup(app, get_doc_object_=get_doc_object): app.add_config_value("numpydoc_xref_ignore", set(), True) app.add_config_value("numpydoc_validation_checks", set(), True) app.add_config_value("numpydoc_validation_exclude", set(), False) + app.add_config_value("numpydoc_validation_overrides", dict(), False) # Extra mangling domains app.add_domain(NumpyPythonDomain) @@ -310,17 +328,9 @@ def update_config(app, config=None): # Processing to determine whether numpydoc_validation_checks is treated # as a blocklist or allowlist - valid_error_codes = set(ERROR_MSGS.keys()) - if "all" in config.numpydoc_validation_checks: - block = deepcopy(config.numpydoc_validation_checks) - config.numpydoc_validation_checks = valid_error_codes - block - # Ensure that the validation check set contains only valid error codes - invalid_error_codes = config.numpydoc_validation_checks - valid_error_codes - if invalid_error_codes: - raise ValueError( - f"Unrecognized validation code(s) in numpydoc_validation_checks " - f"config value: {invalid_error_codes}" - ) + config.numpydoc_validation_checks = get_validation_checks( + config.numpydoc_validation_checks + ) # Generate the regexp for docstrings to ignore during validation if isinstance(config.numpydoc_validation_exclude, str): @@ -335,6 +345,11 @@ def update_config(app, config=None): ) config.numpydoc_validation_excluder = exclude_expr + for check, patterns in config.numpydoc_validation_overrides.items(): + config.numpydoc_validation_overrides[check] = re.compile( + r"|".join(exp for exp in patterns) + ) + # ------------------------------------------------------------------------------ # Docstring-mangling domains From 82d9a27cba6abd82a3b491ad2b22c91fe5d0668f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 4 May 2024 22:38:42 +0200 Subject: [PATCH 5/7] STY: Apply ruff/flake8-implicit-str-concat rule ISC001 (#870) ISC001 Implicitly concatenated string literals on one line This rule is currently disabled because it conflicts with the formatter: https://github.com/astral-sh/ruff/issues/8272 --- niworkflows/interfaces/nibabel.py | 2 +- niworkflows/viz/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/niworkflows/interfaces/nibabel.py b/niworkflows/interfaces/nibabel.py index d7d7546b22d..37668703329 100644 --- a/niworkflows/interfaces/nibabel.py +++ b/niworkflows/interfaces/nibabel.py @@ -234,7 +234,7 @@ def _run_interface(self, runtime): continue else: raise ValueError( - "Input image has an incorrect number of dimensions" f" ({ndim})." + f"Input image has an incorrect number of dimensions ({ndim})." ) img_4d = nb.concat_images( diff --git a/niworkflows/viz/utils.py b/niworkflows/viz/utils.py index c3ba7e668d9..cc932b9e2d0 100644 --- a/niworkflows/viz/utils.py +++ b/niworkflows/viz/utils.py @@ -566,7 +566,7 @@ def plot_melodic_components( tr = tr / 1000000.0 elif units[-1] != "sec": NIWORKFLOWS_LOG.warning( - "Unknown repetition time units " "specified - assuming seconds" + "Unknown repetition time units specified - assuming seconds" ) else: NIWORKFLOWS_LOG.warning( From efd29c6096ca34c9a6d75790e53beb9ea2d1811b Mon Sep 17 00:00:00 2001 From: Mathias Goncalves Date: Wed, 5 Jun 2024 16:10:07 -0400 Subject: [PATCH 6/7] PATCH: Modify FSSource to output T2 (#868) --- niworkflows/interfaces/patches.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/niworkflows/interfaces/patches.py b/niworkflows/interfaces/patches.py index 32dcde711b7..d72764a2335 100644 --- a/niworkflows/interfaces/patches.py +++ b/niworkflows/interfaces/patches.py @@ -27,6 +27,8 @@ from numpy.linalg.linalg import LinAlgError from nipype.algorithms import confounds as nac +from nipype.interfaces import io as nio +from nipype.interfaces.base import File class RobustACompCor(nac.ACompCor): @@ -73,3 +75,15 @@ def _run_interface(self, runtime): sleep(randint(start + 4, start + 10)) return runtime + + +class _FSSourceOutputSpec(nio.FSSourceOutputSpec): + T2 = File(desc='Intensity normalized whole-head volume', loc='mri') + + +class FreeSurferSource(nio.FreeSurferSource): + """ + Patch to allow grabbing the T2 volume, if available + """ + + output_spec = _FSSourceOutputSpec From 6ff29bab4e8adb04dae237d050d0fee2a8afcf04 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 10 Jun 2024 17:27:01 -0400 Subject: [PATCH 7/7] REL: 1.10.2 --- CHANGES.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index fa295110dea..ff4d69ae78e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,14 @@ +1.10.2 (June 10, 2024) +====================== +Bug-fix release in the 1.10.x series + +* ENH: Modify FSSource to output T2 (#868) +* STY: Apply ruff/flake8-implicit-str-concat rule ISC001 (#870) +* STY: Remove extraneous quotes (#859) +* STY: Apply assorted ruff/refurb rules (#867) +* MNT: update vendored docs files (#834) +* MNT: Fix deprectation error in CI (#866) + 1.10.1 (March 06, 2024) ======================= Bug-fix release in the 1.10.x series