diff --git a/.cruft.json b/.cruft.json index c0037708..82367dad 100644 --- a/.cruft.json +++ b/.cruft.json @@ -1,6 +1,6 @@ { "template": "https://github.com/sunpy/package-template", - "commit": "aec53b81aed2e7e534045e59303d82712fe82fb1", + "commit": "4ca8e60aac805d5f736de80c45ae0aba96b4cb85", "checkout": null, "context": { "cookiecutter": { @@ -16,7 +16,8 @@ "enable_dynamic_dev_versions": "y", "include_example_code": "n", "include_cruft_update_github_workflow": "y", - "_sphinx_theme": "alabaster", + "use_extended_ruff_linting": "y", + "_sphinx_theme": "sunpy", "_parent_project": "", "_install_requires": "", "_copy_without_render": [ diff --git a/.github/workflows/sub_package_update.yml b/.github/workflows/sub_package_update.yml index 74558476..0b657f24 100644 --- a/.github/workflows/sub_package_update.yml +++ b/.github/workflows/sub_package_update.yml @@ -21,14 +21,6 @@ jobs: runs-on: ubuntu-latest strategy: fail-fast: true - matrix: - include: - - add-paths: . - body: apply the changes to this repo. - branch: cruft/update - commit-message: "Automatic package template update" - title: Updates from the package template - steps: - uses: actions/checkout@v4 @@ -55,25 +47,47 @@ jobs: echo "has_changes=$CHANGES" >> "$GITHUB_OUTPUT" - name: Run update if available + id: cruft_update if: steps.check.outputs.has_changes == '1' run: | git config --global user.email "${{ github.actor }}@users.noreply.github.com" git config --global user.name "${{ github.actor }}" - cruft update --skip-apply-ask --refresh-private-variables + cruft_output=$(cruft update --skip-apply-ask --refresh-private-variables) + echo $cruft_output git restore --staged . - - name: Create pull request + if [[ "$cruft_output" == *"Failed to cleanly apply the update, there may be merge conflicts."* ]]; then + echo merge_conflicts=1 >> $GITHUB_OUTPUT + else + echo merge_conflicts=0 >> $GITHUB_OUTPUT + fi + + - name: Check if only .cruft.json is modified + id: cruft_json if: steps.check.outputs.has_changes == '1' + run: | + git status --porcelain=1 + if [[ "$(git status --porcelain=1)" == " M .cruft.json" ]]; then + echo "Only .cruft.json is modified. Exiting workflow early." + echo "has_changes=0" >> "$GITHUB_OUTPUT" + else + echo "has_changes=1" >> "$GITHUB_OUTPUT" + fi + + - name: Create pull request + if: steps.cruft_json.outputs.has_changes == '1' uses: peter-evans/create-pull-request@v7 with: token: ${{ secrets.GITHUB_TOKEN }} - add-paths: ${{ matrix.add-paths }} - commit-message: ${{ matrix.commit-message }} - branch: ${{ matrix.branch }} + add-paths: "." + commit-message: "Automatic package template update" + branch: "cruft/update" delete-branch: true - branch-suffix: timestamp - title: ${{ matrix.title }} + draft: ${{ steps.cruft_update.outputs.merge_conflicts == '1' }} + title: "Updates from the package template" body: | - This is an autogenerated PR, which will ${{ matrix.body }}. - [Cruft](https://cruft.github.io/cruft/) has detected updates from the Package Template + This is an autogenerated PR, which will applies the latest changes from the [SunPy Package Template](https://github.com/sunpy/package-template). + If this pull request has been opened as a draft there are conflicts which need fixing. + + **To run the CI on this pull request you will need to close it and reopen it.** diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 17f78d5f..87f55668 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,10 @@ repos: + # This should be before any formatting hooks like isort + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.7.1" + hooks: + - id: ruff + args: ["--fix"] - repo: https://github.com/PyCQA/autoflake rev: v2.3.1 hooks: diff --git a/.ruff.toml b/.ruff.toml index b7350f70..17e4216d 100644 --- a/.ruff.toml +++ b/.ruff.toml @@ -1,5 +1,5 @@ target-version = "py310" -line-length = 110 +line-length = 120 exclude = [ ".git,", "__pycache__", @@ -8,30 +8,75 @@ exclude = [ ] [lint] -select = ["E", "F", "W", "UP", "PT"] +select = [ + "E", + "F", + "W", + "UP", + "PT", + "BLE", + "A", + "C4", + "INP", + "PIE", + "T20", + "RET", + "TID", + "PTH", + "PD", + "PLC", + "PLE", + "FLY", + "NPY", + "PERF", + "RUF", +] extend-ignore = [ # pycodestyle (E, W) - "E501", # LineTooLong # TODO! fix + "E501", # ignore line length will use a formatter instead + # pyupgrade (UP) + "UP038", # Use | in isinstance - not compatible with models and is slower # pytest (PT) "PT001", # Always use pytest.fixture() "PT004", # Fixtures which don't return anything should have leading _ - "PT007", # Parametrize should be lists of tuples # TODO! fix - "PT011", # Too broad exception assert # TODO! fix "PT023", # Always use () on pytest decorators + # flake8-pie (PIE) + "PIE808", # Disallow passing 0 as the first argument to range + # flake8-use-pathlib (PTH) + "PTH123", # open() should be replaced by Path.open() + # Ruff (RUF) + "RUF003", # Ignore ambiguous quote marks, doesn't allow ' in comments + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` + "RUF013", # PEP 484 prohibits implicit `Optional` + "RUF015", # Prefer `next(iter(...))` over single element slice ] [lint.per-file-ignores] -# Part of configuration, not a package. -"setup.py" = ["INP001"] -"conftest.py" = ["INP001"] +"setup.py" = [ + "INP001", # File is part of an implicit namespace package. +] +"conftest.py" = [ + "INP001", # File is part of an implicit namespace package. +] "docs/conf.py" = [ - "E402" # Module imports not at top of file + "E402" # Module imports not at top of file ] "docs/*.py" = [ - "INP001", # Implicit-namespace-package. The examples are not a package. + "INP001", # File is part of an implicit namespace package. +] +"examples/**.py" = [ + "T201", # allow use of print in examples + "INP001", # File is part of an implicit namespace package. +] +"__init__.py" = [ + "E402", # Module level import not at top of cell + "F401", # Unused import + "F403", # from {name} import * used; unable to detect undefined names + "F405", # {name} may be undefined, or defined from star imports +] +"test_*.py" = [ + "E402", # Module level import not at top of cell ] -"__init__.py" = ["E402", "F401", "F403"] -"test_*.py" = ["B011", "D", "E402", "PGH001", "S101"] [lint.pydocstyle] convention = "numpy" diff --git a/docs/conf.py b/docs/conf.py index 1f2f2532..bff4b00b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -53,7 +53,7 @@ automodapi_toctreedirnm = "generated/api" # Add any paths that contain templates here, relative to this directory. -# templates_path = ["_templates"] # NOQA: ERA001 +# templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -103,7 +103,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ["_static"] # NOQA: ERA001 +# html_static_path = ["_static"] # By default, when rendering docstrings for classes, sphinx.ext.autodoc will # make docs with the class-level docstring and the class-method docstrings, diff --git a/pyproject.toml b/pyproject.toml index a7b7d95e..d2ee7cc1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,9 @@ tests = [ ] docs = [ "sphinx", + "sphinx-automodapi", + "sunpy-sphinx-theme", + "packaging", "sphinx_automodapi", "sphinx-changelog", "sphinx-gallery", diff --git a/pytest.ini b/pytest.ini index 997f496b..2dee6a5c 100644 --- a/pytest.ini +++ b/pytest.ini @@ -48,3 +48,4 @@ filterwarnings = ignore: invalid value encountered in true_divide # https://github.com/pytest-dev/pytest-cov/issues/557 ignore:The --rsyncdir command line argument and rsyncdirs config variable are deprecated.:DeprecationWarning + ignore:Please use astropy.wcs.wcsapi.high_level_api.values_to_high_level_objects:DeprecationWarning diff --git a/sunraster/_dev/scm_version.py b/sunraster/_dev/scm_version.py index b9afb1d9..d0c4c1f1 100644 --- a/sunraster/_dev/scm_version.py +++ b/sunraster/_dev/scm_version.py @@ -1,11 +1,11 @@ # Try to use setuptools_scm to get the current version; this is only used # in development installations from the git repository. -import os.path +from pathlib import Path try: from setuptools_scm import get_version - version = get_version(root=os.path.join("..", ".."), relative_to=__file__) + version = get_version(root=Path("../.."), relative_to=__file__) except ImportError: raise except Exception as e: diff --git a/sunraster/extern/meta.py b/sunraster/extern/meta.py index abb89fac..fd64c7d3 100644 --- a/sunraster/extern/meta.py +++ b/sunraster/extern/meta.py @@ -78,7 +78,7 @@ def __init__(self, header=None, comments=None, axes=None, data_shape=None): axes = dict(axes) if not set(axes.keys()).issubset(set(header_keys)): raise ValueError("All axes must correspond to a value in header under the same key.") - self._axes = dict([(key, self._sanitize_axis_value(axis, header[key], key)) for key, axis in axes.items()]) + self._axes = {key: self._sanitize_axis_value(axis, header[key], key) for key, axis in axes.items()} def _sanitize_axis_value(self, axis, value, key): if axis is None: @@ -100,7 +100,7 @@ def _sanitize_axis_value(self, axis, value, key): shape_error_msg = f"{key} must have shape {tuple(self.shape[axis])} as it is associated with axes {axis}" if len(axis) == 1 and not hasattr(value, "__len__") or len(axis) != 1 and not hasattr(value, "shape"): raise TypeError(shape_error_msg) - elif len(axis) == 1: + if len(axis) == 1: meta_shape = (len(value),) else: meta_shape = value.shape @@ -189,51 +189,50 @@ def __getitem__(self, item): # If item is single string, slicing is simple. if isinstance(item, str): return super().__getitem__(item) - elif self.shape is None: + if self.shape is None: raise TypeError("Meta object does not have a shape and so cannot be sliced.") - else: - new_meta = copy.deepcopy(self) - # Convert item to array of ints and slices for consistent behaviour. - if isinstance(item, (numbers.Integral, slice)): - item = [item] - item = np.array(list(item) + [slice(None)] * (len(self.shape) - len(item)), dtype=object) - - # Edit data shape and calculate which axis will be dropped. - dropped_axes = np.zeros(len(self.shape), dtype=bool) - new_shape = new_meta.shape - for i, axis_item in enumerate(item): - if isinstance(axis_item, numbers.Integral): - dropped_axes[i] = True - elif isinstance(axis_item, slice): - start = axis_item.start - if start is None: - start = 0 - if start < 0: - start = self.shape[i] - start - stop = axis_item.stop - if stop is None: - stop = self.shape[i] - if stop < 0: - stop = self.shape[i] - stop - new_shape[i] = stop - start - else: - raise TypeError("Unrecognized slice type. " "Must be an int, slice and tuple of the same.") - new_meta._data_shape = new_shape[np.invert(dropped_axes)] - - # Calculate the cumulative number of dropped axes. - cumul_dropped_axes = np.cumsum(dropped_axes) - - # Slice all metadata associated with axes. - for key, value in self.items(): - axis = self.axes.get(key, None) - if axis is not None: - new_item = tuple(item[axis]) - new_value = value[new_item[0]] if len(new_item) == 1 else value[new_item] - new_axis = np.array([-1 if isinstance(i, numbers.Integral) else a for i, a in zip(new_item, axis)]) - new_axis -= cumul_dropped_axes[axis] - new_axis = new_axis[new_axis >= 0] - if len(new_axis) == 0: - new_axis = None - new_meta.add(key, new_value, self.comments.get(key, None), new_axis, overwrite=True) - - return new_meta + new_meta = copy.deepcopy(self) + # Convert item to array of ints and slices for consistent behaviour. + if isinstance(item, (numbers.Integral, slice)): + item = [item] + item = np.array(list(item) + [slice(None)] * (len(self.shape) - len(item)), dtype=object) + + # Edit data shape and calculate which axis will be dropped. + dropped_axes = np.zeros(len(self.shape), dtype=bool) + new_shape = new_meta.shape + for i, axis_item in enumerate(item): + if isinstance(axis_item, numbers.Integral): + dropped_axes[i] = True + elif isinstance(axis_item, slice): + start = axis_item.start + if start is None: + start = 0 + if start < 0: + start = self.shape[i] - start + stop = axis_item.stop + if stop is None: + stop = self.shape[i] + if stop < 0: + stop = self.shape[i] - stop + new_shape[i] = stop - start + else: + raise TypeError("Unrecognized slice type. " "Must be an int, slice and tuple of the same.") + new_meta._data_shape = new_shape[np.invert(dropped_axes)] + + # Calculate the cumulative number of dropped axes. + cumul_dropped_axes = np.cumsum(dropped_axes) + + # Slice all metadata associated with axes. + for key, value in self.items(): + axis = self.axes.get(key, None) + if axis is not None: + new_item = tuple(item[axis]) + new_value = value[new_item[0]] if len(new_item) == 1 else value[new_item] + new_axis = np.array([-1 if isinstance(i, numbers.Integral) else a for i, a in zip(new_item, axis)]) + new_axis -= cumul_dropped_axes[axis] + new_axis = new_axis[new_axis >= 0] + if len(new_axis) == 0: + new_axis = None + new_meta.add(key, new_value, self.comments.get(key, None), new_axis, overwrite=True) + + return new_meta diff --git a/sunraster/extern/tests/test_meta.py b/sunraster/extern/tests/test_meta.py index c6054af0..8a3a420e 100644 --- a/sunraster/extern/tests/test_meta.py +++ b/sunraster/extern/tests/test_meta.py @@ -26,7 +26,7 @@ def assert_metas_equal(test_input, expected_output): for test_value, expected_value in zip(test_input.values(), expected_output.values()): try: assert test_value == expected_value - except ValueError as err: + except ValueError as err: # NOQA: PERF203 if multi_element_msg in err.args[0]: assert np.allclose(test_value, expected_value) # Check axes are the same. @@ -109,7 +109,6 @@ def test_slice_away_independent_axis(basic_meta): axes["c"] -= 1 axes["d"] -= 1 shape = meta.shape[1:] - print(values, comments, axes, shape) expected = Meta(values, comments, axes, shape) # Compare output and expected. assert_metas_equal(output, expected) @@ -117,10 +116,8 @@ def test_slice_away_independent_axis(basic_meta): def test_slice_dependent_axes(basic_meta): meta = basic_meta - print(meta["a"]) # Get output output = meta[:, 1:3, 1] - print(meta["a"]) # Build expected result. values = dict(list(meta.items())) values["c"] = values["c"][1:3, 1] @@ -175,13 +172,13 @@ def test_add_overwrite(basic_meta): def test_add_overwrite_error(basic_meta): meta = basic_meta - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="A"): meta.add("a", "world", None, None) def test_add_axis_without_shape(no_shape_meta): meta = no_shape_meta - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="A"): meta.add("z", [100], axis=0) diff --git a/sunraster/instr/spice.py b/sunraster/instr/spice.py index b974592a..2ef6bbd2 100644 --- a/sunraster/instr/spice.py +++ b/sunraster/instr/spice.py @@ -62,7 +62,7 @@ def read_spice_l2_fits(filenames, windows=None, memmap=True, read_dumbbells=Fals if len(filenames) > 1: # Wrap windows from first file in lists # so windows from other files can be appended. - cube_lists = dict([(key, [value]) for key, value in first_cubes.items()]) + cube_lists = {key: [value] for key, value in first_cubes.items()} # Get info from first file for consistency checks between files. first_meta = _get_meta_from_last_added(cube_lists) first_obs_id = _get_obsid(first_meta) @@ -79,7 +79,7 @@ def read_spice_l2_fits(filenames, windows=None, memmap=True, read_dumbbells=Fals output=cube_lists, spice_id=first_obs_id, ) - except ValueError as err: + except ValueError as err: # NOQA: PERF203 err_message = err.args[0] if INCORRECT_OBSID_MESSAGE in err_message: this_obs_id = err_message.split()[-1] @@ -291,7 +291,7 @@ def __str__(self): ) def __repr__(self): - return f"{object.__repr__(self)}\n{str(self)}" + return f"{object.__repr__(self)}\n{self!s}" # ---------- Inherited ABC properties ---------- @property diff --git a/sunraster/instr/tests/test_spice.py b/sunraster/instr/tests/test_spice.py index e8d740a8..5533e342 100644 --- a/sunraster/instr/tests/test_spice.py +++ b/sunraster/instr/tests/test_spice.py @@ -1,5 +1,3 @@ -import os.path - import numpy as np import pytest @@ -13,7 +11,7 @@ from sunraster import RasterSequence, SpectrogramCube, SpectrogramSequence from sunraster.instr.spice import SPICEMeta, read_spice_l2_fits -from sunraster.tests import test_data_dir +from sunraster.tests import TEST_DATA_PATH READ_SPICE_L2_FITS_RETURN_TYPE = NDCollection SPECTRAL_WINDOW = ("WINDOW0_74.73", "Extension name") @@ -53,35 +51,35 @@ @pytest.fixture def spice_fits_header(): hdr = fits.Header() - hdr.append(tuple(["EXTNAME"] + list(SPECTRAL_WINDOW))) - hdr.append(tuple(["DETECTOR"] + list(DETECTOR))) - hdr.append(tuple(["INSTRUME"] + list(INSTRUMENT))) - hdr.append(tuple(["OBSRVTRY"] + list(OBSERVATORY))) - hdr.append(tuple(["LEVEL"] + list(PROCESSING_LEVEL))) - hdr.append(tuple(["RSUN_REF"] + list(RSUN_METERS))) - hdr.append(tuple(["RSUN_ARC"] + list(RSUN_ANGULAR))) - hdr.append(tuple(["OBS_ID"] + list(OBSERVING_MODE_ID))) - hdr.append(tuple(["OBS_VR"] + list(OBSERVATORY_RADIAL_VELOCITY))) - hdr.append(tuple(["DSUN_OBS"] + list(DISTANCE_TO_SUN))) - hdr.append(tuple(["DATE-OBS"] + list(DATE_REFERENCE))) - hdr.append(tuple(["DATE-BEG"] + list(DATE_START))) - hdr.append(tuple(["DATE-END"] + list(DATE_END))) - hdr.append(tuple(["HGLN_OBS"] + list(HGLN_OBS))) - hdr.append(tuple(["HGLT_OBS"] + list(HGLT_OBS))) - hdr.append(tuple(["SPIOBSID"] + list(SPICE_OBSERVING_MODE_ID))) - hdr.append(tuple(["DARKMAP"] + list(DARKMAP))) - hdr.append(tuple(["BLACKLEV"] + list(BLACKLEV))) - hdr.append(tuple(["WIN_TYPE"] + list(WINDOW_TYPE))) - hdr.append(tuple(["WINTABID"] + list(WINDOW_TABLE_ID))) - hdr.append(tuple(["SLIT_ID"] + list(SLIT_ID))) - hdr.append(tuple(["SLIT_WID"] + list(SLIT_WIDTH))) - hdr.append(tuple(["DUMBBELL"] + list(DUMBBELL))) - hdr.append(tuple(["SOLAR_B0"] + list(SOLAR_B0))) - hdr.append(tuple(["SOLAR_P0"] + list(SOLAR_P0))) - hdr.append(tuple(["SOLAR_EP"] + list(SOLAR_EP))) - hdr.append(tuple(["CAR_ROT"] + list(CARRINGTON_ROTATION_NUMBER))) - hdr.append(tuple(["DATE_EAR"] + list(DATE_START_EARTH))) - hdr.append(tuple(["DATE_SUN"] + list(DATE_START_SUN))) + hdr.append(("EXTNAME", *list(SPECTRAL_WINDOW))) + hdr.append(("DETECTOR", *list(DETECTOR))) + hdr.append(("INSTRUME", *list(INSTRUMENT))) + hdr.append(("OBSRVTRY", *list(OBSERVATORY))) + hdr.append(("LEVEL", *list(PROCESSING_LEVEL))) + hdr.append(("RSUN_REF", *list(RSUN_METERS))) + hdr.append(("RSUN_ARC", *list(RSUN_ANGULAR))) + hdr.append(("OBS_ID", *list(OBSERVING_MODE_ID))) + hdr.append(("OBS_VR", *list(OBSERVATORY_RADIAL_VELOCITY))) + hdr.append(("DSUN_OBS", *list(DISTANCE_TO_SUN))) + hdr.append(("DATE-OBS", *list(DATE_REFERENCE))) + hdr.append(("DATE-BEG", *list(DATE_START))) + hdr.append(("DATE-END", *list(DATE_END))) + hdr.append(("HGLN_OBS", *list(HGLN_OBS))) + hdr.append(("HGLT_OBS", *list(HGLT_OBS))) + hdr.append(("SPIOBSID", *list(SPICE_OBSERVING_MODE_ID))) + hdr.append(("DARKMAP", *list(DARKMAP))) + hdr.append(("BLACKLEV", *list(BLACKLEV))) + hdr.append(("WIN_TYPE", *list(WINDOW_TYPE))) + hdr.append(("WINTABID", *list(WINDOW_TABLE_ID))) + hdr.append(("SLIT_ID", *list(SLIT_ID))) + hdr.append(("SLIT_WID", *list(SLIT_WIDTH))) + hdr.append(("DUMBBELL", *list(DUMBBELL))) + hdr.append(("SOLAR_B0", *list(SOLAR_B0))) + hdr.append(("SOLAR_P0", *list(SOLAR_P0))) + hdr.append(("SOLAR_EP", *list(SOLAR_EP))) + hdr.append(("CAR_ROT", *list(CARRINGTON_ROTATION_NUMBER))) + hdr.append(("DATE_EAR", *list(DATE_START_EARTH))) + hdr.append(("DATE_SUN", *list(DATE_START_SUN))) return hdr @@ -101,20 +99,21 @@ def spice_rasdb_filename(tmp_path): A new FITS file is saved in a tmp file path. """ + rng_gen = np.random.default_rng() filename = "solo_L2_spice-n-ras-db_20200602T081733_V01_12583760-000.fits" - with fits.open(os.path.join(test_data_dir, filename)) as hdulist: + with fits.open(TEST_DATA_PATH / filename) as hdulist: new_hdulist = fits.HDUList() - new_hdulist.append(fits.PrimaryHDU(np.random.rand(1, 48, 832, 30), header=hdulist[0].header)) - new_hdulist.append(fits.ImageHDU(np.random.rand(1, 48, 832, 30), header=hdulist[1].header)) - new_hdulist.append(fits.ImageHDU(np.random.rand(1, 56, 64, 30), header=hdulist[2].header)) - new_hdulist.append(fits.ImageHDU(np.random.rand(1, 56, 64, 30), header=hdulist[3].header)) + new_hdulist.append(fits.PrimaryHDU(rng_gen.random((1, 48, 832, 30)), header=hdulist[0].header)) + new_hdulist.append(fits.ImageHDU(rng_gen.random((1, 48, 832, 30)), header=hdulist[1].header)) + new_hdulist.append(fits.ImageHDU(rng_gen.random((1, 56, 64, 30)), header=hdulist[2].header)) + new_hdulist.append(fits.ImageHDU(rng_gen.random((1, 56, 64, 30)), header=hdulist[3].header)) new_hdulist.append(hdulist[-1]) tmp_spice_path = tmp_path / "spice" - if not os.path.exists(tmp_spice_path): + if not tmp_spice_path.exists(): tmp_spice_path.mkdir() - new_filename = os.path.join(tmp_spice_path, filename) + new_filename = tmp_spice_path / filename new_hdulist.writeto(new_filename, overwrite=True) - return new_filename + return str(new_filename) @pytest.fixture @@ -124,16 +123,17 @@ def spice_sns_filename(tmp_path): A new FITS file is saved in a tmp file path. """ + rng_gen = np.random.default_rng() filename = "solo_L2_spice-n-sit_20200620T235901_V01_16777431-000.fits" - with fits.open(os.path.join(test_data_dir, filename)) as hdulist: + with fits.open(TEST_DATA_PATH / filename) as hdulist: new_hdulist = fits.HDUList() - new_hdulist.append(fits.PrimaryHDU(np.random.rand(32, 48, 1024, 1), header=hdulist[0].header)) - new_hdulist.append(fits.ImageHDU(np.random.rand(32, 48, 1024, 1), header=hdulist[1].header)) + new_hdulist.append(fits.PrimaryHDU(rng_gen.random((32, 48, 1024, 1)), header=hdulist[0].header)) + new_hdulist.append(fits.ImageHDU(rng_gen.random((32, 48, 1024, 1)), header=hdulist[1].header)) new_hdulist.append(hdulist[-1]) tmp_spice_path = tmp_path / "spice" - if not os.path.exists(tmp_spice_path): + if not tmp_spice_path.exists(): tmp_spice_path.mkdir() - new_filename = os.path.join(tmp_spice_path, filename) + new_filename = tmp_spice_path / filename new_hdulist.writeto(new_filename, output_verify="fix+ignore", overwrite=True) return new_filename @@ -216,11 +216,11 @@ def test_meta_observing_mode_id_solar_orbiter(spice_meta): def test_meta_darkmap_subtracted_onboard(spice_meta): - assert spice_meta.darkmap_subtracted_onboard == False + assert spice_meta.darkmap_subtracted_onboard is False def test_meta_bias_frame_subtracted_onboard(spice_meta): - assert spice_meta.bias_frame_subtracted_onboard == False + assert spice_meta.bias_frame_subtracted_onboard is False def test_meta_window_type(spice_meta): @@ -345,6 +345,6 @@ def test_read_spice_l2_fits_multiple_files_dumbbells(spice_rasdb_filename): def test_read_spice_l2_fits_incompatible_files(spice_rasdb_filename, spice_sns_filename): - with pytest.raises(ValueError): - filenames = [spice_rasdb_filename, spice_sns_filename] + filenames = [spice_rasdb_filename, spice_sns_filename] + with pytest.raises(ValueError, match="A"): read_spice_l2_fits(filenames) diff --git a/sunraster/spectrogram.py b/sunraster/spectrogram.py index d7e5d373..4f41b4b2 100644 --- a/sunraster/spectrogram.py +++ b/sunraster/spectrogram.py @@ -268,7 +268,7 @@ def __str__(self): raise err try: sc = self.celestial - component_names = dict([(item, key) for key, item in sc.representation_component_names.items()]) + component_names = {item: key for key, item in sc.representation_component_names.items()} lon = getattr(sc, component_names["lon"]) lat = getattr(sc, component_names["lat"]) if sc.isscalar: @@ -309,7 +309,7 @@ def __str__(self): ) def __repr__(self): - return f"{object.__repr__(self)}\n{str(self)}" + return f"{object.__repr__(self)}\n{self!s}" def __getitem__(self, item): # Slice SpectrogramCube using parent slicing. @@ -445,36 +445,33 @@ def apply_exposure_time_correction(self, undo=False, force=False): def _get_axis_coord(self, axis_name, coord_loc): if coord_loc == "wcs": return self.axis_world_coords(axis_name)[0] - elif coord_loc == "extra_coords": + if coord_loc == "extra_coords": return self.axis_world_coords(wcs=self.extra_coords[axis_name])[0] - elif coord_loc == "global_coords": + if coord_loc == "global_coords": return self.global_coords[axis_name] - elif coord_loc == "meta": + if coord_loc == "meta": return self.meta[axis_name] - else: - raise ValueError(f"{coord_loc} is not a valid coordinate location.") + raise ValueError(f"{coord_loc} is not a valid coordinate location.") def _get_axis_coord_index(self, axis_name, coord_loc): if coord_loc == "wcs": coord_pix_axes = nuw.physical_type_to_pixel_axes(axis_name, self.wcs) coord_array_axes = nuw.convert_between_array_and_pixel_axes(coord_pix_axes, len(self.dimensions)) return coord_array_axes.tolist()[0] - elif coord_loc == "extra_coords": + if coord_loc == "extra_coords": return self.extra_coords[axis_name].mapping[0] - elif coord_loc == "meta": + if coord_loc == "meta": return self.meta.axes[axis_name] - else: - raise ValueError(f"{coord_loc} is not a valid coordinate location.") + raise ValueError(f"{coord_loc} is not a valid coordinate location.") def _get_axis_coord_values(self, axis_name, coord_loc): if coord_loc == "wcs": return self.axis_world_coords_values(axis_name)[0] - elif coord_loc == "extra_coords": + if coord_loc == "extra_coords": return self.axis_world_coords_values(wcs=self.extra_coords[axis_name])[0] - elif coord_loc == "global_coords": + if coord_loc == "global_coords": return self.global_coords[axis_name] - else: - raise ValueError(f"{coord_loc} is not a valid coordinate location.") + raise ValueError(f"{coord_loc} is not a valid coordinate location.") def _find_axis_name(supported_names, world_axis_physical_types, extra_coords, meta): diff --git a/sunraster/spectrogram_sequence.py b/sunraster/spectrogram_sequence.py index 5c08a42d..7d959cca 100644 --- a/sunraster/spectrogram_sequence.py +++ b/sunraster/spectrogram_sequence.py @@ -66,7 +66,7 @@ def exposure_time(self): exposure_time = np.concatenate([raster.exposure_time for raster in self.data]) try: return exposure_type(exposure_time) - except Exception: + except Exception: # NOQA: BLE001 return exposure_time @property @@ -113,8 +113,8 @@ def apply_exposure_time_correction(self, undo=False, copy=False, force=False): converted_data_list = [cube.apply_exposure_time_correction(undo=undo, force=force) for cube in self.data] if copy is True: return self.__class__(converted_data_list, meta=self.meta, common_axis=self._common_axis) - else: - self.data = converted_data_list + self.data = converted_data_list + return None def __str__(self): data0 = self.data[0] @@ -163,7 +163,7 @@ def __str__(self): time_period = None if data0._longitude_name or data0._latitude_name: sc = self.celestial - component_names = dict([(item, key) for key, item in sc.representation_component_names.items()]) + component_names = {item: key for key, item in sc.representation_component_names.items()} lon = getattr(sc, component_names["lon"]) lat = getattr(sc, component_names["lat"]) if sc.isscalar: @@ -199,7 +199,7 @@ def __str__(self): ) def __repr__(self): - return f"{object.__repr__(self)}\n{str(self)}" + return f"{object.__repr__(self)}\n{self!s}" class RasterSequence(SpectrogramSequence): @@ -270,7 +270,7 @@ def _set_single_scan_instrument_axes_types(self): if len(spectral_raster_index) == 1: self._single_scan_instrument_axes_types[spectral_raster_index] = self._spectral_axis_name # Slit axis name. - w = self._single_scan_instrument_axes_types == None + w = self._single_scan_instrument_axes_types == None # NOQA: E711 if w.sum() > 1: raise ValueError( "Unable to parse the WCS or common_axis to work out either or both the slit-step axis nor the spectral (aka the slit) axis." @@ -312,17 +312,17 @@ def __getitem__(self, item): @property def raster_instrument_axes_types(self): - return tuple([self._raster_axis_name] + list(self._single_scan_instrument_axes_types)) + return (self._raster_axis_name, *list(self._single_scan_instrument_axes_types)) @property def sns_instrument_axes_types(self): - return tuple( - [self._sns_axis_name] - + list( + return ( + self._sns_axis_name, + *list( self._single_scan_instrument_axes_types[ self._single_scan_instrument_axes_types != self._slit_step_axis_name ] - ) + ), ) diff --git a/sunraster/tests/__init__.py b/sunraster/tests/__init__.py index 81d3d64c..a94b5bd4 100644 --- a/sunraster/tests/__init__.py +++ b/sunraster/tests/__init__.py @@ -2,10 +2,11 @@ This module contains package tests. """ -import os.path +from pathlib import Path import sunraster -__all__ = ["test_data_path"] +__all__ = ["TEST_DATA_PATH"] -test_data_dir = os.path.join(os.path.dirname(sunraster.__file__), "tests", "data") + +TEST_DATA_PATH = Path(sunraster.__file__).parent / "tests" / "data" diff --git a/sunraster/tests/test_spectrogram.py b/sunraster/tests/test_spectrogram.py index f9e29d4e..114b7318 100644 --- a/sunraster/tests/test_spectrogram.py +++ b/sunraster/tests/test_spectrogram.py @@ -153,7 +153,7 @@ def test_spectral_axis(): def test_spectral_axis_error(): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="A"): spectrogram_NO_COORDS.spectral_axis @@ -162,7 +162,7 @@ def test_time(): def test_time_error(): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Time axis not found. If in extra_coords, axis"): spectrogram_NO_COORDS.time @@ -171,12 +171,12 @@ def test_exposure_time(): def test_exposure_time_error(): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Exposure time axis not found."): spectrogram_NO_COORDS.exposure_time @pytest.mark.parametrize( - "input_cube, undo, force, expected_cube", + ("input_cube", "undo", "force", "expected_cube"), [ (spectrogram_DN0, False, False, spectrogram_DN_per_s0), (spectrogram_DN_per_s0, True, False, spectrogram_DN0), @@ -190,17 +190,17 @@ def test_apply_exposure_time_correction(input_cube, undo, force, expected_cube): def test_calculate_exposure_time_correction_error(): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Exposure time correction has probably already been "): sunraster.spectrogram._calculate_exposure_time_correction(SOURCE_DATA_DN, None, u.s, EXPOSURE_TIME) def test_uncalculate_exposure_time_correction_error(): - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="Exposure time correction has probably already been undone since"): sunraster.spectrogram._uncalculate_exposure_time_correction(SOURCE_DATA_DN, None, u.ct, EXPOSURE_TIME) @pytest.mark.parametrize( - "item,expected", + ("item", "expected"), [ (0, np.array(["b", "c"])), (slice(0, 1), np.array(["a", "b", "c"])), diff --git a/sunraster/tests/test_spectrogramsequence.py b/sunraster/tests/test_spectrogramsequence.py index 21ddaae3..5c4f6ca9 100644 --- a/sunraster/tests/test_spectrogramsequence.py +++ b/sunraster/tests/test_spectrogramsequence.py @@ -244,7 +244,7 @@ def test_exposure_time(): @pytest.mark.parametrize( - "input_sequence, undo, force, expected_sequence", + ("input_sequence", "undo", "force", "expected_sequence"), [ (sequence_DN, False, False, sequence_DN_per_s), (sequence_DN_per_s, True, False, sequence_DN), @@ -258,7 +258,7 @@ def test_apply_exposure_time_correction(input_sequence, undo, force, expected_se @pytest.mark.parametrize( - "input_sequence, expected_raster_axes_types", + ("input_sequence", "expected_raster_axes_types"), [ ( sequence_DN0, @@ -300,12 +300,11 @@ def test_apply_exposure_time_correction(input_sequence, undo, force, expected_se ], ) def test_raster_instrument_axes_types(input_sequence, expected_raster_axes_types): - print(input_sequence.raster_instrument_axes_types, expected_raster_axes_types) assert input_sequence.raster_instrument_axes_types == expected_raster_axes_types @pytest.mark.parametrize( - "input_sequence, expected_sns_axes_types", + ("input_sequence", "expected_sns_axes_types"), [ ( sequence_DN0, diff --git a/sunraster/version.py b/sunraster/version.py index 523a97ab..0bffe24a 100644 --- a/sunraster/version.py +++ b/sunraster/version.py @@ -6,7 +6,7 @@ from ._dev.scm_version import version except ImportError: from ._version import version -except Exception: +except Exception: # NOQA: BLE001 import warnings warnings.warn(f'could not determine {__name__.split(".")[0]} package version; this indicates a broken installation')