diff --git a/.github/workflows/actions-versions-updater.yml b/.github/deactivated/actions-versions-updater.yml similarity index 86% rename from .github/workflows/actions-versions-updater.yml rename to .github/deactivated/actions-versions-updater.yml index 8dd02f117..b71df10c2 100644 --- a/.github/workflows/actions-versions-updater.yml +++ b/.github/deactivated/actions-versions-updater.yml @@ -27,13 +27,13 @@ jobs: github.com:443 - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: token: ${{ secrets.ACTIONS_VERSION_UPDATER_TOKEN }} persist-credentials: true - name: Run GitHub Actions Version Updater - uses: saadmk11/github-actions-version-updater@v0.8.1 + uses: saadmk11/github-actions-version-updater@64be81ba69383f81f2be476703ea6570c4c8686e # v0.8.1 with: token: ${{ secrets.ACTIONS_VERSION_UPDATER_TOKEN }} committer_email: 'bumpversion[bot]@ouranos.ca' diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..826840927 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,15 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + time: '12:00' + open-pull-requests-limit: 5 + + - package-ecosystem: pip + directory: / + schedule: + interval: daily + time: '12:00' + open-pull-requests-limit: 5 diff --git a/.github/workflows/add-to-project.yml b/.github/workflows/add-to-project.yml index f0cfbc6f0..62c0f51d6 100644 --- a/.github/workflows/add-to-project.yml +++ b/.github/workflows/add-to-project.yml @@ -5,7 +5,7 @@ on: types: - opened -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -24,7 +24,7 @@ jobs: allowed-endpoints: > api.github.com:443 - - uses: actions/add-to-project@v0.5.0 + - uses: actions/add-to-project@31b3f3ccdc584546fc445612dec3f38ff5edb41c # v0.5.0 with: project-url: https://github.com/orgs/Ouranosinc/projects/6 github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 679216e7d..7bddd3feb 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -23,7 +23,7 @@ on: - tox.ini - xclim/__init__.py -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -43,10 +43,10 @@ jobs: files.pythonhosted.org:443 github.com:443 pypi.org:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: persist-credentials: false - - uses: actions/setup-python@v5.0.0 + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.x" - name: Config Commit Bot diff --git a/.github/workflows/cache-cleaner.yml b/.github/workflows/cache-cleaner.yml index 96d1993fb..0ce283114 100644 --- a/.github/workflows/cache-cleaner.yml +++ b/.github/workflows/cache-cleaner.yml @@ -5,7 +5,7 @@ on: types: - closed -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -23,7 +23,7 @@ jobs: objects.githubusercontent.com:443 - name: Check out code - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Cleanup run: | diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 7de9ab54a..b9fe1ac1a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: pypi.org:443 uploads.github.com:443 - name: Checkout repository - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@1245696032ecf7d39f87d54daa406e22ddf769a8 diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index ba457fc32..c977388df 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -25,7 +25,7 @@ jobs: github.com:443 - name: 'Checkout Repository' - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@c74b580d73376b7750d3d2a50bfb8adc2c937507 + uses: actions/dependency-review-action@4901385134134e04cec5fbe5ddfe3b2c5bd5d976 diff --git a/.github/workflows/first_pull_request.yml b/.github/workflows/first-pull-request.yml similarity index 94% rename from .github/workflows/first_pull_request.yml rename to .github/workflows/first-pull-request.yml index 73a474813..a958bcd33 100644 --- a/.github/workflows/first_pull_request.yml +++ b/.github/workflows/first-pull-request.yml @@ -5,7 +5,7 @@ on: types: - opened -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -24,7 +24,7 @@ jobs: allowed-endpoints: > api.github.com:443 - - uses: actions/github-script@v7.0.1 + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | // Get a list of all issues created by the PR opener diff --git a/.github/workflows/label_on_approval.yml b/.github/workflows/label-on-approval.yml similarity index 85% rename from .github/workflows/label_on_approval.yml rename to .github/workflows/label-on-approval.yml index 89e5da8e6..e6bcfe0db 100644 --- a/.github/workflows/label_on_approval.yml +++ b/.github/workflows/label-on-approval.yml @@ -34,7 +34,7 @@ jobs: api.github.com:443 - name: Label Approved - uses: actions/github-script@v7.0.1 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | github.rest.issues.addLabels({ @@ -58,20 +58,23 @@ jobs: - name: Harden Runner uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: - egress-policy: audit + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 - name: Find comment - uses: peter-evans/find-comment@v2.4.0 + uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # v2.4.0 id: fc with: issue-number: ${{ github.event.pull_request.number }} comment-author: 'github-actions[bot]' - body-includes: This Pull Request is coming from a fork and must be manually tagged `approved` in order to perform additional testing + body-includes: This Pull Request is coming from a fork and must be manually tagged `approved` in order to perform additional testing. - name: Create comment if: | (steps.fc.outputs.comment-id == '') && (!contains(github.event.pull_request.labels.*.name, 'approved')) && (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) - uses: peter-evans/create-or-update-comment@v3.1.0 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} @@ -82,7 +85,7 @@ jobs: - name: Update comment if: | contains(github.event.pull_request.labels.*.name, 'approved') - uses: peter-evans/create-or-update-comment@v3.1.0 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml index bdec57e9d..c1753bc68 100644 --- a/.github/workflows/label.yml +++ b/.github/workflows/label.yml @@ -6,10 +6,15 @@ # https://github.com/actions/labeler/blob/master/README.md name: Labeler -on: [pull_request_target] +on: # Note: potential security risk from this action using pull_request_target. # Do not add actions in here which need a checkout of the repo, and do not use any caching in here. # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target + pull_request_target: + types: + - opened + - reopened + - synchronize permissions: contents: read @@ -30,6 +35,7 @@ jobs: egress-policy: block allowed-endpoints: > api.github.com:443 - - uses: actions/labeler@v5.0.0 + + - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 406014fb5..d03342841 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -52,16 +52,16 @@ jobs: files.pythonhosted.org:443 github.com:443 pypi.org:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up Python${{ matrix.python-version }} - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: ${{ matrix.python-version }} - name: Install pylint and tox run: pip install pylint tox~=4.0 - name: Run pylint run: | - python -m pylint --rcfile=pylintrc --disable=import-error --exit-zero xclim + python -m pylint --rcfile=.pylintrc.toml --disable=import-error --exit-zero xclim - name: Run linting suite run: | python -m tox -e lint @@ -88,9 +88,9 @@ jobs: github.com:443 pypi.org:443 raw.githubusercontent.com:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up Python${{ matrix.python-version }} - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: ${{ matrix.python-version }} - name: Install tox @@ -126,6 +126,9 @@ jobs: - tox-env: py311-coverage-sbck python-version: "3.11" markers: -m 'not slow' + - tox-env: py312-coverage-numba + python-version: "3.12" + markers: -m 'not slow' - tox-env: notebooks_doctests python-version: "3.10" - tox-env: offline-prefetch @@ -148,14 +151,14 @@ jobs: ppa.launchpadcontent.net:443 pypi.org:443 raw.githubusercontent.com:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install Eigen3 if: contains(matrix.tox-env, 'sbck') run: | sudo apt-get update sudo apt-get install libeigen3-dev - name: Set up Python${{ matrix.python-version }} - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: ${{ matrix.python-version }} - name: Install tox @@ -172,7 +175,7 @@ jobs: test-conda: needs: lint - name: test-conda-${{ matrix.tox-env }} (Python${{ matrix.python-version }}) + name: test-conda-Python${{ matrix.python-version }} if: | contains(github.event.pull_request.labels.*.name, 'approved') || (github.event.review.state == 'approved') || @@ -181,8 +184,8 @@ jobs: strategy: matrix: include: - - tox-env: py310 - python-version: "3.10" + - python-version: "3.9" + - python-version: "3.12" defaults: run: shell: bash -l {0} @@ -203,9 +206,9 @@ jobs: pypi.org:443 raw.githubusercontent.com:443 repo.anaconda.com:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Conda (Micromamba) with Python${{ matrix.python-version }} - uses: mamba-org/setup-micromamba@v1.7.3 + uses: mamba-org/setup-micromamba@8767fb704bd78032e9392f0386bf46950bdd1194 # v1.8.0 with: cache-downloads: true cache-environment: true diff --git a/.github/workflows/publish-mastodon.yml b/.github/workflows/publish-mastodon.yml index c1e0ddf84..37e14233b 100644 --- a/.github/workflows/publish-mastodon.yml +++ b/.github/workflows/publish-mastodon.yml @@ -15,7 +15,7 @@ on: default: true type: boolean -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -30,7 +30,7 @@ jobs: egress-policy: audit - name: Checkout - uses: actions/checkout@v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Current Version if: ${{ !github.event.inputs.version-tag }} @@ -63,7 +63,7 @@ jobs: - name: Prepare Message id: render_template - uses: chuhlomin/render-template@v1.9 + uses: chuhlomin/render-template@a473db625a96c98e519d188812dc22bcaf54ffba # v1.9 with: template: .github/publish-mastodon.template.md vars: | @@ -75,7 +75,7 @@ jobs: - name: Send toot to Mastodon if: ${{ github.event.inputs.dry-run != 'true' }} || ${{ github.event_name == 'release' }} - uses: cbrgm/mastodon-github-action@v1.0.3 + uses: cbrgm/mastodon-github-action@d98ab3376f941df14d37d5737961de431c0838c6 # v1.0.3 with: message: "${{ steps.render_template.outputs.result }}${{ env.contributors }}" visibility: "public" diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 2e6d4b891..d34fe5e37 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -5,7 +5,7 @@ on: types: - published -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -21,9 +21,9 @@ jobs: uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up Python3 - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.x" - name: Install packaging libraries @@ -33,4 +33,4 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@2f6f737ca5f74c637829c0f5c3acd0e29ea5e8bf # v1.8.11 diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index c50b2ca22..c0dd56256 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -12,7 +12,8 @@ on: schedule: - cron: '41 8 * * 4' push: - branches: [ "master" ] + branches: + - master # Declare default permissions as read only. permissions: read-all @@ -26,9 +27,6 @@ jobs: security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write - # Uncomment the permissions below if installing in a private repository. - # contents: read - # actions: read steps: - name: Harden Runner @@ -56,15 +54,12 @@ jobs: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. - # For private repositories: - # - `publish_results` will always be set to `false`, regardless - # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@1eb3cb2b3e0f29609092a73eb033bb759a334595 + uses: actions/upload-artifact@694cdabd8bdb0f10b2cea11669e1bf5453eed0a6 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/tag-testpypi.yml b/.github/workflows/tag-testpypi.yml index 910648ed8..734706ebb 100644 --- a/.github/workflows/tag-testpypi.yml +++ b/.github/workflows/tag-testpypi.yml @@ -5,7 +5,7 @@ on: tags: - 'v*' -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -21,9 +21,9 @@ jobs: uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 with: egress-policy: audit - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Set up Python3 - uses: actions/setup-python@v5.0.0 + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 with: python-version: "3.x" - name: Install packaging libraries @@ -33,7 +33,7 @@ jobs: run: | python -m flit build - name: Publish distribution 📦 to Test PyPI - uses: pypa/gh-action-pypi-publish@v1.8.11 + uses: pypa/gh-action-pypi-publish@2f6f737ca5f74c637829c0f5c3acd0e29ea5e8bf # v1.8.11 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true diff --git a/.github/workflows/testdata_version.yml b/.github/workflows/testdata-version.yml similarity index 85% rename from .github/workflows/testdata_version.yml rename to .github/workflows/testdata-version.yml index cff8527e0..b8a02da4f 100644 --- a/.github/workflows/testdata_version.yml +++ b/.github/workflows/testdata-version.yml @@ -9,13 +9,15 @@ on: paths: - .github/workflows/main.yml -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: use-latest-tag: name: Check Latest xclim-testdata Tag runs-on: ubuntu-latest + if: | + (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) permissions: contents: read pull-requests: write @@ -28,7 +30,7 @@ jobs: allowed-endpoints: > api.github.com:443 github.com:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Find xclim-testdata Tag and CI Testing Branch run: | XCLIM_TESTDATA_TAG="$( \ @@ -44,7 +46,7 @@ jobs: echo "Latest xclim-testdata tag: ${{ env.XCLIM_TESTDATA_TAG }}" echo "Tag for xclim-testdata in CI: ${{ env.XCLIM_TESTDATA_BRANCH }}" - name: Find Comment - uses: peter-evans/find-comment@v2.4.0 + uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # v2.4.0 id: fc with: issue-number: ${{ github.event.pull_request.number }} @@ -52,13 +54,13 @@ jobs: body-includes: It appears that this Pull Request modifies the `main.yml` workflow. - name: Compare Versions if: ${{( env.XCLIM_TESTDATA_TAG != env.XCLIM_TESTDATA_BRANCH )}} - uses: actions/github-script@v7.0.1 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 with: script: | core.setFailed('Configured `xclim-testdata` tag is not `latest`.') - name: Update Failure Comment if: ${{ failure() }} - uses: peter-evans/create-or-update-comment@v3.1.0 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} @@ -74,7 +76,7 @@ jobs: edit-mode: replace - name: Update Success Comment if: ${{ success() }} - uses: peter-evans/create-or-update-comment@v3.1.0 + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 with: comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/upstream.yml b/.github/workflows/upstream.yml index 2c6a39b10..7ebf57655 100644 --- a/.github/workflows/upstream.yml +++ b/.github/workflows/upstream.yml @@ -16,7 +16,7 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -permissions: # added using https://github.com/step-security/secure-repo +permissions: contents: read jobs: @@ -54,11 +54,11 @@ jobs: pypi.org:443 raw.githubusercontent.com:443 repo.anaconda.com:443 - - uses: actions/checkout@v4.1.1 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: fetch-depth: 0 # Fetch all history for all branches and tags. - name: Setup Conda (Micromamba) with Python${{ matrix.python-version }} - uses: mamba-org/setup-micromamba@v1.7.3 + uses: mamba-org/setup-micromamba@8767fb704bd78032e9392f0386bf46950bdd1194 # v1.8.0 with: cache-downloads: true cache-environment: true @@ -96,6 +96,6 @@ jobs: && steps.status.outcome == 'failure' && github.event_name == 'schedule' && github.repository_owner == 'Ouranosinc' - uses: xarray-contrib/issue-from-pytest-log@v1.2.8 + uses: xarray-contrib/issue-from-pytest-log@138db94bfe4b12ac11fc1aff307ee0835feab403 # v1.2.8 with: log-path: output-${{ matrix.python-version }}-log.jsonl diff --git a/.github/workflows/workflow-warning.yml b/.github/workflows/workflow-warning.yml new file mode 100644 index 000000000..433881bba --- /dev/null +++ b/.github/workflows/workflow-warning.yml @@ -0,0 +1,69 @@ +name: Workflow Changes Warnings + +on: + # Note: potential security risk from this action using pull_request_target. + # Do not add actions in here which need a checkout of the repo, and do not use any caching in here. + # See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target + pull_request_target: + types: + - opened + - reopened + - synchronize + paths: + - .github/workflows/*.yml + +permissions: + contents: read + +jobs: + comment-concerning-workflow-changes: + name: Comment Concerning Workflow Changes + runs-on: ubuntu-latest + if: | + (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) + permissions: + contents: read + pull-requests: write + steps: + - name: Harden Runner + uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + - name: Find comment + uses: peter-evans/find-comment@a54c31d7fa095754bfef525c0c8e5e5674c4b4b1 # v2.4.0 + id: fc + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: | + This Pull Request modifies GitHub workflows and is coming from a fork. + - name: Create comment + if: | + (steps.fc.outputs.comment-id == '') && + (!contains(github.event.pull_request.labels.*.name, 'approved')) && + (github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name) + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + > **Warning** + > This Pull Request modifies GitHub Workflows and is coming from a fork. + **It is very important for the reviewer to ensure that the workflow changes are appropriate.** + edit-mode: replace + - name: Update comment + if: | + contains(github.event.pull_request.labels.*.name, 'approved') + uses: peter-evans/create-or-update-comment@23ff15729ef2fc348714a3bb66d2f655ca9066f2 # v3.1.0 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + > **Note** + > Changes have been approved by a maintainer. + reactions: | + hooray + edit-mode: append diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2d331d8da..bbc5cde4f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,7 @@ repos: rev: v0.23.1 hooks: - id: toml-sort-fix + exclude: '.pylintrc.toml' - repo: https://github.com/adrienverge/yamllint.git rev: v1.33.0 hooks: @@ -40,9 +41,14 @@ repos: hooks: - id: isort - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.11 + rev: v0.1.13 hooks: - id: ruff + - repo: https://github.com/pylint-dev/pylint + rev: v3.0.3 + hooks: + - id: pylint + args: [ '--rcfile=.pylintrc.toml', '--errors-only', '--jobs=0', '--disable=import-error' ] - repo: https://github.com/pycqa/flake8 rev: 7.0.0 hooks: diff --git a/pylintrc b/.pylintrc.toml similarity index 56% rename from pylintrc rename to .pylintrc.toml index cdac8c797..0dccb5086 100644 --- a/pylintrc +++ b/.pylintrc.toml @@ -1,642 +1,554 @@ -[MAIN] +[tool.pylint.main] +# Analyse import fallback blocks. This can be used to support both Python 2 and 3 +# compatible code, which means that the block might have code that exists only in +# one or another interpreter, leading to false positives when analysed. +# analyse-fallback-blocks = -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no +# Clear in-memory caches upon conclusion of linting. Useful if running pylint in +# a server-like mode. +# clear-cache-post-run = -# Load and enable all available extensions. Use --list-extensions to see a list -# all available extensions. -#enable-all-extensions= - -# In error mode, messages with a category besides ERROR or FATAL are -# suppressed, and no reports are done by default. Error mode is compatible with -# disabling specific errors. -#errors-only= - -# Always return a 0 (non-error) status code, even if lint errors are found. -# This is primarily useful in continuous integration scripts. -#exit-zero= +# Always return a 0 (non-error) status code, even if lint errors are found. This +# is primarily useful in continuous integration scripts. +# exit-zero = false # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. -extension-pkg-allow-list= +# extension-pkg-allow-list = # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. (This is an alternative name to extension-pkg-allow-list # for backward compatibility.) -extension-pkg-whitelist= +# extension-pkg-whitelist = # Return non-zero exit code if any of these messages/categories are detected, # even if score is above --fail-under value. Syntax same as enable. Messages # specified are enabled, while categories only check already-enabled messages. -fail-on= +# fail-on = # Specify a score threshold under which the program will exit with error. -fail-under=10 +fail-under = 10 # Interpret the stdin as a python script, whose filename needs to be passed as # the module_or_package argument. -#from-stdin= +# from-stdin = # Files or directories to be skipped. They should be base names, not paths. -ignore= +ignore = ["CVS"] # Add files or directories matching the regular expressions patterns to the # ignore-list. The regex matches against paths and can be in Posix or Windows -# format. Because '\' represents the directory delimiter on Windows systems, it +# format. Because '\\' represents the directory delimiter on Windows systems, it # can't be used as an escape character. -ignore-paths= - docs, - xclim/testing/tests, +# ignore-paths = -# Files or directories matching the regular expression patterns are skipped. -# The regex matches against base names, not paths. The default value ignores -# Emacs file locks -ignore-patterns=^\.# +# Files or directories matching the regular expression patterns are skipped. The +# regex matches against base names, not paths. The default value ignores Emacs +# file locks +ignore-patterns = ["^\\.#"] -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis). It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= +# List of module names for which member attributes should not be checked (useful +# for modules/projects where namespaces are manipulated during runtime and thus +# existing member attributes cannot be deduced by static analysis). It supports +# qualified module names, as well as Unix pattern matching. +ignored-modules = ["xclim.indicators"] # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). -#init-hook= +# init-hook = # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use, and will cap the count on Windows to # avoid hangs. -jobs=0 +jobs = 1 -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 +# Control the amount of potential inferred values when inferring a single object. +# This can help the performance when dealing with large functions or complex, +# nested conditions. +limit-inference-results = 100 # List of plugins (as comma separated values of python module names) to load, # usually to register additional checkers. -load-plugins= +# load-plugins = # Pickle collected data for later comparisons. -persistent=yes +persistent = true -# Minimum Python version to use for version dependent checks. Will default to -# the version used to run pylint. -py-version=3.8 +# Minimum Python version to use for version dependent checks. Will default to the +# version used to run pylint. +py-version = "3.8" # Discover python modules and packages in the file system subtree. -recursive=no +# recursive = + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +# source-roots = # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. -suggestion-mode=yes +suggestion-mode = true # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# In verbose mode, extra non-checker-related info will be displayed. -#verbose= - - -[REPORTS] - -# Python expression which should return a score less than or equal to 10. You -# have access to the variables 'fatal', 'error', 'warning', 'refactor', -# 'convention', and 'info' which contain the number of messages in each -# category, as well as 'statement' which is the total number of statements -# analyzed. This score is used by the global evaluation report (RP0004). -evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio). You can also give a reporter class, e.g. -# mypackage.mymodule.MyReporterClass. -#output-format= - -# Tells whether to display a full report or only the messages. -reports=no - -# Activate the evaluation score. -score=yes - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, -# UNDEFINED. -confidence=HIGH, - CONTROL_FLOW, - INFERENCE, - INFERENCE_FAILURE, - UNDEFINED - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then re-enable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=arguments-differ, - arguments-out-of-order, - bad-inline-option, - deprecated-pragma, - file-ignored, - invalid-name, - invalid-unary-operand-type, - line-too-long, - locally-disabled, - missing-function-docstring, - missing-module-docstring, - non-ascii-name, - pointless-string-statement, - protected-access, - raw-checker-failed, - suppressed-message, - too-few-public-methods, - too-many-arguments, - too-many-branches, - too-many-lines, - too-many-locals, - too-many-nested-blocks, - too-many-statements, - unspecified-encoding, - unused-argument, - use-symbolic-message-instead, - useless-suppression, - wrong-import-order, - - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=c-extension-no-member - - -[METHOD_ARGS] - -# List of qualified names (i.e., library.method) which require a timeout -# parameter e.g. 'requests.api.get,requests.api.post' -timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when caught. -overgeneral-exceptions=builtins.BaseException, - builtins.Exception - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit,argparse.parse_error - - -[DESIGN] - -# List of regular expressions of class ancestor names to ignore when counting -# public methods (see R0903) -exclude-too-few-public-methods= - -# List of qualified class names to ignore when counting class parents (see -# R0901) -ignored-parents= - -# Maximum number of arguments for function / method. -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in an if statement (see R0916). -max-bool-expr=5 - -# Maximum number of branch for function / method body. -max-branches=12 - -# Maximum number of locals for function / method body. -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body. -max-returns=6 - -# Maximum number of statements in function / method body. -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[IMPORTS] - -# List of modules that can be imported at any level, not just the top level -# one. -allow-any-import-level= - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=yes - -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules= - -# Output a graph (.gv or any supported image format) of external dependencies -# to the given file (report RP0402 must not be disabled). -ext-import-graph= - -# Output a graph (.gv or any supported image format) of all (i.e. internal and -# external) dependencies to the given file (report RP0402 must not be -# disabled). -import-graph= - -# Output a graph (.gv or any supported image format) of internal dependencies -# to the given file (report RP0402 must not be disabled). -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Couples of modules and preferred modules, separated by a comma. -preferred-modules= - - -[CLASSES] - -# Warn about protected attribute access inside special methods -check-protected-access-in-special-methods=no - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp, - __post_init__ - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls - - -[BASIC] +# unsafe-load-any-extension = +[tool.pylint.basic] # Naming style matching correct argument names. -argument-naming-style=snake_case +argument-naming-style = "snake_case" -# Regular expression matching correct argument names. Overrides argument- -# naming-style. If left empty, argument names will be checked with the set -# naming style. -#argument-rgx= +# Regular expression matching correct argument names. Overrides argument-naming- +# style. If left empty, argument names will be checked with the set naming style. +# argument-rgx = # Naming style matching correct attribute names. -attr-naming-style=snake_case +attr-naming-style = "snake_case" # Regular expression matching correct attribute names. Overrides attr-naming- # style. If left empty, attribute names will be checked with the set naming # style. -#attr-rgx= +# attr-rgx = # Bad variable names which should always be refused, separated by a comma. -bad-names=foo, - bar, - baz, - toto, - tutu, - tata +bad-names = ["foo", "bar", "baz", "toto", "tutu", "tata"] # Bad variable names regexes, separated by a comma. If names match any regex, # they will always be refused -bad-names-rgxs= +# bad-names-rgxs = # Naming style matching correct class attribute names. -class-attribute-naming-style=any +class-attribute-naming-style = "any" # Regular expression matching correct class attribute names. Overrides class- # attribute-naming-style. If left empty, class attribute names will be checked # with the set naming style. -#class-attribute-rgx= +# class-attribute-rgx = # Naming style matching correct class constant names. -class-const-naming-style=UPPER_CASE +class-const-naming-style = "UPPER_CASE" # Regular expression matching correct class constant names. Overrides class- # const-naming-style. If left empty, class constant names will be checked with # the set naming style. -#class-const-rgx= +# class-const-rgx = # Naming style matching correct class names. -class-naming-style=PascalCase +class-naming-style = "PascalCase" -# Regular expression matching correct class names. Overrides class-naming- -# style. If left empty, class names will be checked with the set naming style. -#class-rgx= +# Regular expression matching correct class names. Overrides class-naming-style. +# If left empty, class names will be checked with the set naming style. +# class-rgx = # Naming style matching correct constant names. -const-naming-style=UPPER_CASE +const-naming-style = "UPPER_CASE" # Regular expression matching correct constant names. Overrides const-naming- -# style. If left empty, constant names will be checked with the set naming -# style. -#const-rgx= +# style. If left empty, constant names will be checked with the set naming style. +# const-rgx = -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 +# Minimum line length for functions/classes that require docstrings, shorter ones +# are exempt. +docstring-min-length = -1 # Naming style matching correct function names. -function-naming-style=snake_case +function-naming-style = "snake_case" -# Regular expression matching correct function names. Overrides function- -# naming-style. If left empty, function names will be checked with the set -# naming style. -#function-rgx= +# Regular expression matching correct function names. Overrides function-naming- +# style. If left empty, function names will be checked with the set naming style. +# function-rgx = # Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - _, - da, - ds, - +good-names = ["i", "j", "k", "ex", "Run", "_"] # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted -good-names-rgxs= +# good-names-rgxs = # Include a hint for the correct naming format with invalid-name. -include-naming-hint=no +# include-naming-hint = # Naming style matching correct inline iteration names. -inlinevar-naming-style=any +inlinevar-naming-style = "any" # Regular expression matching correct inline iteration names. Overrides # inlinevar-naming-style. If left empty, inline iteration names will be checked # with the set naming style. -#inlinevar-rgx= +# inlinevar-rgx = # Naming style matching correct method names. -method-naming-style=snake_case +method-naming-style = "snake_case" # Regular expression matching correct method names. Overrides method-naming- # style. If left empty, method names will be checked with the set naming style. -#method-rgx= +# method-rgx = # Naming style matching correct module names. -module-naming-style=snake_case +module-naming-style = "snake_case" # Regular expression matching correct module names. Overrides module-naming- # style. If left empty, module names will be checked with the set naming style. -#module-rgx= +# module-rgx = -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= +# Colon-delimited sets of names that determine each other's naming style when the +# name regexes allow several styles. +# name-group = -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ +# Regular expression which should only match function or class names that do not +# require a docstring. +no-docstring-rgx = "^_" # List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty +# to this list to register other decorators that produce valid properties. These +# decorators are taken in consideration only for invalid-name. +property-classes = ["abc.abstractproperty"] + +# Regular expression matching correct type alias names. If left empty, type alias +# names will be checked with the set naming style. +# typealias-rgx = # Regular expression matching correct type variable names. If left empty, type # variable names will be checked with the set naming style. -#typevar-rgx= +# typevar-rgx = # Naming style matching correct variable names. -variable-naming-style=snake_case +variable-naming-style = "snake_case" -# Regular expression matching correct variable names. Overrides variable- -# naming-style. If left empty, variable names will be checked with the set -# naming style. -#variable-rgx= +# Regular expression matching correct variable names. Overrides variable-naming- +# style. If left empty, variable names will be checked with the set naming style. +# variable-rgx = +[tool.pylint.classes] +# Warn about protected attribute access inside special methods +# check-protected-access-in-special-methods = -[SIMILARITIES] - -# Comments are removed from the similarity computation -ignore-comments=yes +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods = ["__init__", "__new__", "setUp", "asyncSetUp", "__post_init__"] -# Docstrings are removed from the similarity computation -ignore-docstrings=yes +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected = ["_asdict", "_fields", "_replace", "_source", "_make", "os._exit"] -# Imports are removed from the similarity computation -ignore-imports=yes +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg = ["cls"] -# Signatures are removed from the similarity computation -ignore-signatures=yes +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg = ["mcs"] -# Minimum lines number of a similarity. -min-similarity-lines=4 +[tool.pylint.design] +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +# exclude-too-few-public-methods = +# List of qualified class names to ignore when counting class parents (see R0901) +# ignored-parents = -[LOGGING] +# Maximum number of arguments for function / method. +max-args = 15 -# The type of string formatting that logging methods do. `old` means using % -# formatting, `new` is for `{}` formatting. -logging-format-style=old +# Maximum number of attributes for a class (see R0902). +max-attributes = 7 -# Logging modules to check that the string format arguments are in logging -# function parameter format. -logging-modules=logging +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr = 5 +# Maximum number of branch for function / method body. +max-branches = 30 -[VARIABLES] +# Maximum number of locals for function / method body. +max-locals = 50 -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= +# Maximum number of parents for a class (see R0901). +max-parents = 7 -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes +# Maximum number of public methods for a class (see R0904). +max-public-methods = 20 -# List of names allowed to shadow builtins -allowed-redefined-builtins= +# Maximum number of return / yield for function / method body. +max-returns = 13 -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb +# Maximum number of statements in function / method body. +max-statements = 100 -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ +# Minimum number of public methods for a class (see R0903). +min-public-methods = 2 -# Argument names that match this expression will be ignored. -ignored-argument-names=_.*|^ignored_|^unused_ +[tool.pylint.exceptions] +# Exceptions that will emit a warning when caught. +overgeneral-exceptions = ["builtins.BaseException", "builtins.Exception"] -# Tells whether we should check for unused import in __init__ files. -init-import=no +[tool.pylint.format] +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +# expected-line-ending-format = -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines = "^\\s*(# )??$" +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren = 4 -[SPELLING] +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string = " " -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 +# Maximum number of characters on a single line. +max-line-length = 150 -# Spelling dictionary name. Available dictionaries: en (aspell), en_AU -# (aspell), en_CA (aspell), en_GB (aspell), en_US (aspell). -spelling-dict= +# Maximum number of lines in a module. +max-module-lines = 1500 -# List of comma separated words that should be considered directives if they -# appear at the beginning of a comment and should not be checked. -spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +# single-line-class-stmt = -# List of comma separated words that should not be checked. -spelling-ignore-words= +# Allow the body of an if to be on the same line as the test if there is no else. +# single-line-if-stmt = -# A path to a file that contains the private dictionary; one word per line. -spelling-private-dict-file= +[tool.pylint.imports] +# List of modules that can be imported at any level, not just the top level one. +# allow-any-import-level = -# Tells whether to store unknown words to the private dictionary (see the -# --spelling-private-dict-file option) instead of raising a message. -spelling-store-unknown-words=no +# Allow explicit reexports by alias from a package __init__. +# allow-reexport-from-package = +# Allow wildcard imports from modules that define __all__. +# allow-wildcard-with-all = -[FORMAT] +# Deprecated modules which should not be used, separated by a comma. +# deprecated-modules = -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= +# Output a graph (.gv or any supported image format) of external dependencies to +# the given file (report RP0402 must not be disabled). +# ext-import-graph = -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be disabled). +# import-graph = -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 +# Output a graph (.gv or any supported image format) of internal dependencies to +# the given file (report RP0402 must not be disabled). +# int-import-graph = -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' +# Force import order to recognize a module as part of the standard compatibility +# libraries. +# known-standard-library = -# Maximum number of characters on a single line. -max-line-length=100 +# Force import order to recognize a module as part of a third party library. +known-third-party = ["enchant"] -# Maximum number of lines in a module. -max-module-lines=1000 +# Couples of modules and preferred modules, separated by a comma. +# preferred-modules = -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no +[tool.pylint.logging] +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style = "old" -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules = ["logging"] + +[tool.pylint."messages control"] +# Only show warnings with the listed confidence levels. Leave empty to show all. +# Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence = ["HIGH", "CONTROL_FLOW", "INFERENCE", "INFERENCE_FAILURE", "UNDEFINED"] + +# Disable the message, report, category or checker with the given id(s). You can +# either give multiple identifiers separated by comma (,) or put this option +# multiple times (only on the command line, not in the configuration file where +# it should appear only once). You can also use "--disable=all" to disable +# everything first and then re-enable specific checks. For example, if you want +# to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable = [ + "arguments-differ", + "bad-inline-option", + "deprecated-pragma", + "file-ignored", + "invalid-name", + "invalid-unary-operand-type", + "locally-disabled", + "missing-module-docstring", + "no-member", + "protected-access", + "raw-checker-failed", + "redefined-outer-name", + "superfluous-parens", + "suppressed-message", + "unused-argument", + "use-symbolic-message-instead", + "useless-suppression" +] +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where it +# should appear only once). See also the "--disable" option for examples. +enable = ["c-extension-no-member"] -[MISCELLANEOUS] +[tool.pylint.method_args] +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods = ["requests.api.delete", "requests.api.get", "requests.api.head", "requests.api.options", "requests.api.patch", "requests.api.post", "requests.api.put", "requests.api.request"] +[tool.pylint.miscellaneous] # List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO +notes = ["FIXME", "XXX", "TODO"] # Regular expression of note tags to take in consideration. -notes-rgx= +# notes-rgx = + +[tool.pylint.refactoring] +# Maximum number of nested blocks for function / method body +max-nested-blocks = 10 + +# Complete name of functions that never returns. When checking for inconsistent- +# return-statements if a never returning function is called then it will be +# considered as an explicit return statement and no message will be printed. +never-returning-functions = ["sys.exit", "argparse.parse_error"] + +[tool.pylint.reports] +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each category, +# as well as 'statement' which is the total number of statements analyzed. This +# score is used by the global evaluation report (RP0004). +evaluation = "max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))" + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +# msg-template = + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +# output-format = + +# Tells whether to display a full report or only the messages. +# reports = + +# Activate the evaluation score. +score = true + +[tool.pylint.similarities] +# Comments are removed from the similarity computation +ignore-comments = true + +# Docstrings are removed from the similarity computation +ignore-docstrings = true + +# Imports are removed from the similarity computation +ignore-imports = true + +# Signatures are removed from the similarity computation +ignore-signatures = true + +# Minimum lines number of a similarity. +min-similarity-lines = 10 + +[tool.pylint.spelling] +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions = 4 + +# Spelling dictionary name. No available dictionaries : You need to install both +# the python package and the system dependency for enchant to work.. +# spelling-dict = + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives = "fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:" + +# List of comma separated words that should not be checked. +# spelling-ignore-words = +# A path to a file that contains the private dictionary; one word per line. +# spelling-private-dict-file = -[TYPECHECK] +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +# spelling-store-unknown-words = +[tool.pylint.typecheck] # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. -contextmanager-decorators=contextlib.contextmanager +contextmanager-decorators = ["contextlib.contextmanager"] # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members= +# generated-members = -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes +# Tells whether missing members accessed in mixin class should be ignored. A +# class is considered mixin if its name matches the mixin-class-rgx option. +# Tells whether to warn about missing members when the owner of the attribute is +# inferred to be None. +ignore-none = true # This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes +# checks whenever an opaque object is returned when inferring. The inference can +# return multiple potential results while evaluating a Python object, but some +# branches might not be evaluated, which results in partial inference. In that +# case, it might be useful to still emit no-member and other checks for the rest +# of the inferred objects. +ignore-on-opaque-inference = true # List of symbolic message names to ignore for Mixin members. -ignored-checks-for-mixins=no-member, - not-async-context-manager, - not-context-manager, - attribute-defined-outside-init +ignored-checks-for-mixins = ["no-member", "not-async-context-manager", "not-context-manager", "attribute-defined-outside-init"] # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace +ignored-classes = ["optparse.Values", "thread._local", "_thread._local", "argparse.Namespace"] -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes +# Show a hint with possible names when a member name was not found. The aspect of +# finding the hint is based on edit distance. +missing-member-hint = true # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. -missing-member-hint-distance=1 +missing-member-hint-distance = 1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. -missing-member-max-choices=1 +missing-member-max-choices = 1 # Regex pattern to define which classes are considered mixins. -mixin-class-rgx=.*[Mm]ixin +mixin-class-rgx = ".*[Mm]ixin" # List of decorators that change the signature of a decorated function. -signature-mutators= +signature-mutators = ["xclim.sdba.base.map_groups"] + +[tool.pylint.variables] +# List of additional names supposed to be defined in builtins. Remember that you +# should avoid defining new builtins when possible. +# additional-builtins = + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables = true +# List of names allowed to shadow builtins +# allowed-redefined-builtins = + +# List of strings which can identify a callback function by name. A callback name +# must start or end with one of those strings. +callbacks = ["cb_", "_cb"] -[STRING] +# A regular expression matching the name of dummy variables (i.e. expected to not +# be used). +dummy-variables-rgx = "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_" -# This flag controls whether inconsistent-quotes generates a warning when the -# character used as a quote delimiter is used inconsistently within a module. -check-quote-consistency=no +# Argument names that match this expression will be ignored. +ignored-argument-names = "_.*|^ignored_|^unused_" -# This flag controls whether the implicit-str-concat should generate a warning -# on implicit string concatenation in sequences defined over several lines. -check-str-concat-over-line-jumps=no +# Tells whether we should check for unused import in __init__ files. +# init-import = + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules = ["six.moves", "past.builtins", "future.builtins", "builtins", "io"] diff --git a/CHANGES.rst b/CHANGES.rst index b68e341bf..4ccad83a6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -8,6 +8,7 @@ Contributors to this version: Juliette Lavoie (:user:`juliettelavoie`), Pascal B Announcements ^^^^^^^^^^^^^ +* `xclim` now officially supports Python3.12 (requires `numba>=0.59.0`). (:pull:`1613`). * `xclim` now adheres to the `Semantic Versioning 2.0.0 `_ specification. (:issue:`1556`, :pull:`1569`). * The `xclim` repository now uses `GitHub Discussions `_ to offer help for users, coordinate translation efforts, and support general Q&A for the `xclim` community. The `xclim` `Gitter` room has been deprecated in favour of GitHub Discussions. (:issue:`1571`, :pull:`1572`). * For secure correspondence, `xclim` now offers a PGP key for users to encrypt sensitive communications. For more information, see the ``SECURITY.md`. (:issue:`1181`, :pull:`1604`). @@ -29,6 +30,7 @@ Bug fixes ^^^^^^^^^ * Fixed passing ``missing=0`` to ``xclim.core.calendar.convert_calendar``. (:issue:`1562`, :pull:`1563`). * Fix wrong `window` attributes in ``xclim.indices.standardized_precipitation_index``, ``xclim.indices.standardized_precipitation_evapotranspiration_index``. (:issue:`1552` :pull:`1554`). +* Fix the daily case `freq='D'` of ``xclim.stats.preprocess_standardized_index`` (:issue:`1602` :pull:`1607`). * Several spelling mistakes have been corrected within the documentation and codebase. (:pull:`1576`). Internal changes @@ -39,7 +41,12 @@ Internal changes * Updated the CONTRIBUTING.rst directions to showcase the new versioning system. (:issue:`1557`, :pull:`1573`). * The `codespell` library is now a development dependency for the `dev` installation recipe with configurations found within `pyproject.toml`. This is also now a linting step and integrated as a `pre-commit` hook. For more information, see the `codespell documentation `_ (:pull:`1576`). * Climate indicators search page now prioritizes the "official" indicators (atmos, land, seaIce and generic), virtual submodules can be added to search through checkbox option. (:issue:`1559`, :pull:`1593`). - +* The OpenSSF StepSecurity bot has contributed some changes to the workflows and pre-commit. (:issue:`1181`, :pull:`1606`): + * Dependabot has been configured to monitor the `xclim` repository for dependency updates. The ``actions-version-updater.yml`` workflow has been deprecated. + * GitHub Actions are now pinned to their commit hashes to prevent unexpected changes in the future. + * A new GitHub Workflow (``workflow-warning.yml``) has been added to warn maintainers when a forked repository has been used to open a Pull Request that modifies GitHub Workflows. + * `pylint` has been configured to provide some overhead checks of the `xclim` codebase as well as run as part of `xclim`'s `pre-commit` hooks. + * Some small adjustments to code organization to address `pylint` errors. v0.47.0 (2023-12-01) -------------------- diff --git a/environment.yml b/environment.yml index 04ae0e482..3ce0cefa9 100644 --- a/environment.yml +++ b/environment.yml @@ -1,78 +1,79 @@ name: xclim channels: - - conda-forge - - defaults + - numba # Added to gain access to Python3.12-compatible numba release candidates. + - conda-forge + - defaults dependencies: - - python >=3.8 - - astroid - - boltons >=20.1 - - bottleneck >=1.3.1 - - cf_xarray >=0.6.1 - - cftime >=1.4.1 - - Click >=8.1 - - dask >=2.6.0 - - importlib-resources # For Python3.8 - - jsonpickle - - lmoments3 - - numba - - numpy >=1.16 - - pandas >=0.23,<2.2 - - pint >=0.9 - - poppler >=0.67 - - pyyaml - - scikit-learn >=0.21.3 - - scipy >=1.2 - - statsmodels - - xarray >=2022.06.0,<2023.11.0 - - yamale - # Extras - - eofs - - flox + - python >=3.8 + - astroid + - boltons >=20.1 + - bottleneck >=1.3.1 + - cf_xarray >=0.6.1 + - cftime >=1.4.1 + - Click >=8.1 + - dask >=2.6.0 + - importlib-resources # For Python3.8 + - jsonpickle + - lmoments3 + - numba + - numpy >=1.16 + - pandas >=0.23,<2.2 + - pint >=0.9 + - poppler >=0.67 + - pyyaml + - scikit-learn >=0.21.3 + - scipy >=1.2 + - statsmodels + - xarray >=2022.06.0,<2023.11.0 + - yamale + # Extras + - eofs + - flox # Testing and development dependencies - - black >=22.12 - - blackdoc - - bump-my-version - - cairosvg - - codespell - - coverage - - distributed >=2.0 - - filelock - - flake8 - - flake8-rst-docstrings - - flit - - h5netcdf - - ipykernel - - ipython - - matplotlib - - mypy - - nbqa - - nbsphinx - - nbval - - nc-time-axis - - netCDF4 >=1.4 - - notebook - - platformdirs - - pooch - - pre-commit - - pybtex - - pylint - - pytest - - pytest-cov - - pytest-socket - - pytest-xdist >=3.2 - - ruff >=0.1.0 - - sphinx - - sphinx-autodoc-typehints - - sphinx-codeautolink - - sphinx-copybutton - - sphinx_rtd_theme >=1.0 - - sphinxcontrib-bibtex - - tokenize-rt - - tox -# - tox-conda # Will be added when a tox@v4.0+ compatible plugin is released. - - xdoctest - - yamllint - - pip - - pip: - - flake8-alphabetize - - sphinxcontrib-svg2pdfconverter + - black >=22.12 + - blackdoc + - bump-my-version + - cairosvg + - codespell + - coverage + - distributed >=2.0 + - filelock + - flake8 + - flake8-rst-docstrings + - flit + - h5netcdf + - ipykernel + - ipython + - matplotlib + - mypy + - nbqa + - nbsphinx + - nbval + - nc-time-axis + - netCDF4 >=1.4 + - notebook + - platformdirs + - pooch + - pre-commit + - pybtex + - pylint + - pytest + - pytest-cov + - pytest-socket + - pytest-xdist >=3.2 + - ruff >=0.1.0 + - sphinx + - sphinx-autodoc-typehints + - sphinx-codeautolink + - sphinx-copybutton + - sphinx-rtd-theme >=1.0 + - sphinxcontrib-bibtex + - tokenize-rt + - tox +# - tox-conda # Will be added when a tox@v4.0+ compatible plugin is released. + - xdoctest + - yamllint + - pip + - pip: + - flake8-alphabetize + - sphinxcontrib-svg2pdfconverter diff --git a/pyproject.toml b/pyproject.toml index 631017ff5..855f13943 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,6 +27,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Atmospheric Science" ] dynamic = ["description", "version"] @@ -292,7 +293,7 @@ lines-after-imports = 1 no-lines-before = ["future", "standard-library"] [tool.ruff.mccabe] -max-complexity = 15 +max-complexity = 20 [tool.ruff.per-file-ignores] "docs/*.py" = ["D100", "D101", "D102", "D103"] diff --git a/tests/test_indicators.py b/tests/test_indicators.py index ee11a86ca..80ee3901f 100644 --- a/tests/test_indicators.py +++ b/tests/test_indicators.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# pylint: disable=unsubscriptable-object,function-redefined # Tests for the Indicator objects from __future__ import annotations @@ -685,7 +685,7 @@ def test_indicator_from_dict(): # Wrap a multi-output ind d = dict(base="wind_speed_from_vector") - ind = Indicator.from_dict(d, identifier="wsfv", module="test") + Indicator.from_dict(d, identifier="wsfv", module="test") def test_indicator_errors(): diff --git a/tests/test_indices.py b/tests/test_indices.py index 5a7e0eb5d..51fea3eac 100644 --- a/tests/test_indices.py +++ b/tests/test_indices.py @@ -21,7 +21,7 @@ import xarray as xr from xclim import indices as xci -from xclim.core.calendar import date_range, percentile_doy +from xclim.core.calendar import convert_calendar, date_range, percentile_doy from xclim.core.options import set_options from xclim.core.units import ValidationError, convert_units_to, units @@ -456,13 +456,13 @@ def test_effective_growing_degree_days( np.testing.assert_array_equal(out, np.array([np.NaN, expected])) - # gamma reference results: Obtained with `monocongo/climate_indices` library - # fisk reference results: Obtained with R package `SPEI` + # gamma/APP reference results: Obtained with `monocongo/climate_indices` library + # MS/fisk/ML reference results: Obtained with R package `SPEI` # Using the method `APP` in XClim matches the method from monocongo, hence the very low # tolerance possible. # Repeated tests with lower tolerance means we want a more precise comparison, so we compare # the current version of XClim with the version where the test was implemented - # TODO : Add tests for SPI_daily. + @pytest.mark.slow @pytest.mark.parametrize( "freq, window, dist, method, values, diff_tol", [ @@ -518,12 +518,96 @@ def test_effective_growing_degree_days( [0.683273, 1.51189, 1.61597, 1.03875, 0.72531], 2e-2, ), + ( + "D", + 1, + "gamma", + "APP", + [-0.18618353, 1.44582971, 0.95985043, 0.15779587, -0.37801587], + 2e-2, + ), + ( + "D", + 12, + "gamma", + "APP", + [-0.24417774, -0.11404418, 0.64997039, 1.07670517, 0.6462852], + 2e-2, + ), + ( + "D", + 1, + "gamma", + "ML", + [-0.03577971, 1.30589409, 0.8863447, 0.23906544, -0.05185997], + 2e-2, + ), + ( + "D", + 12, + "gamma", + "ML", + [-0.15846245, -0.04924534, 0.66299367, 1.09938471, 0.66095752], + 2e-2, + ), + ( + "D", + 1, + "fisk", + "APP", + [-1.26216389, 1.03096183, 0.62985354, -0.50335153, -1.32788296], + 2e-2, + ), + ( + "D", + 12, + "fisk", + "APP", + [-0.57109258, -0.40657737, 0.55163493, 0.97381067, 0.55580649], + 2e-2, + ), + ( + "D", + 1, + "fisk", + "ML", + [-0.05562691, 1.30809152, 0.6954986, 0.33018744, -0.50258979], + 2e-2, + ), + ( + "D", + 12, + "fisk", + "ML", + [-0.14151269, -0.01914608, 0.7080277, 1.01510279, 0.6954002], + 2e-2, + ), + ( + None, + 1, + "gamma", + "APP", + [-0.18618353, 1.44582971, 0.95985043, 0.15779587, -0.37801587], + 2e-2, + ), + ( + None, + 12, + "gamma", + "APP", + [-0.24417774, -0.11404418, 0.64997039, 1.07670517, 0.6462852], + 2e-2, + ), ], ) def test_standardized_precipitation_index( self, open_dataset, freq, window, dist, method, values, diff_tol ): ds = open_dataset("sdba/CanESM2_1950-2100.nc").isel(location=1) + if freq == "D": + ds = convert_calendar( + ds, "366_day", missing=np.NaN + ) # to compare with ``climate_indices`` pr = ds.pr.sel(time=slice("1998", "2000")) pr_cal = ds.pr.sel(time=slice("1950", "1980")) params = xci.stats.standardized_index_fit_params( @@ -545,6 +629,7 @@ def test_standardized_precipitation_index( np.testing.assert_allclose(spi.values, values, rtol=0, atol=diff_tol) # See SPI version + @pytest.mark.slow @pytest.mark.parametrize( "freq, window, dist, method, values, diff_tol", [ diff --git a/tests/test_locales.py b/tests/test_locales.py index 4d7e12fdb..6f63977a3 100644 --- a/tests/test_locales.py +++ b/tests/test_locales.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# pylint: disable=unsubscriptable-object # Tests for `xclim.locales` from __future__ import annotations @@ -159,7 +159,11 @@ def test_xclim_translations(locale, official_indicators): @pytest.mark.parametrize( - "initeng,expected", [(False, ""), (True, atmos.tg_mean.cf_attrs[0]["long_name"])] + "initeng,expected", + [ + (False, ""), + (True, atmos.tg_mean.cf_attrs[0]["long_name"]), + ], ) def test_local_dict_generation(initeng, expected): dic = generate_local_dict("tlh", init_english=initeng) diff --git a/tests/test_modules.py b/tests/test_modules.py index b6b3abb47..1927fa94f 100644 --- a/tests/test_modules.py +++ b/tests/test_modules.py @@ -28,7 +28,9 @@ def virtual_indicator(request): def test_default_modules_exist(): - from xclim.indicators import anuclim, cf, icclim # noqa + from xclim.indicators import anuclim # noqa + from xclim.indicators import cf # noqa + from xclim.indicators import icclim # noqa assert hasattr(icclim, "TG") diff --git a/tests/test_sdba/test_adjustment.py b/tests/test_sdba/test_adjustment.py index 953bd5ded..7543ae9d7 100644 --- a/tests/test_sdba/test_adjustment.py +++ b/tests/test_sdba/test_adjustment.py @@ -1,3 +1,4 @@ +# pylint: disable=no-member from __future__ import annotations import numpy as np @@ -710,7 +711,8 @@ def test_default_grouper_understood(tas_series): class TestSBCKutils: @pytest.mark.slow @pytest.mark.parametrize( - "method", [m for m in dir(adjustment) if m.startswith("SBCK_")] + "method", + [m for m in dir(adjustment) if m.startswith("SBCK_")], ) @pytest.mark.parametrize("use_dask", [True]) # do we gain testing both? def test_sbck(self, method, use_dask, random): diff --git a/tests/test_sdba/test_base.py b/tests/test_sdba/test_base.py index ffba45c1f..028c06d8e 100644 --- a/tests/test_sdba/test_base.py +++ b/tests/test_sdba/test_base.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-kwoa from __future__ import annotations import jsonpickle @@ -159,60 +160,74 @@ def normalize_from_precomputed(grpds, dim=None): np.testing.assert_allclose(out, exp, rtol=1e-10) -def test_map_blocks(tas_series): - tas = tas_series(np.arange(366), start="2000-01-01") - tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk() - - # Test dim parsing - @map_blocks(reduces=["lat"], data=["lon"]) - def func(ds, *, group, lon=None): - assert group.window == 5 - data = ds.tas.rename(lat="lon") - return data.rename("data").to_dataset() - - # Raises on missing coords - with pytest.raises(ValueError, match="This function adds the lon dimension*"): - data = func(xr.Dataset(dict(tas=tas)), group="time.dayofyear", window=5) - - data = func( - xr.Dataset(dict(tas=tas)), group="time.dayofyear", window=5, lon=[1, 2, 3, 4] - ).load() - assert set(data.data.dims) == {"time", "lon"} - - @map_groups(data=[Grouper.PROP]) - def func(ds, *, dim): - assert isinstance(dim, list) - data = ds.tas.mean(dim) - return data.rename("data").to_dataset() - - data = func( - xr.Dataset(dict(tas=tas)), group="time.dayofyear", window=5, add_dims=["lat"] - ).load() - assert set(data.data.dims) == {"dayofyear"} - - @map_groups(data=[Grouper.PROP], main_only=True) - def func(ds, *, dim): - assert isinstance(dim, str) - data = ds.tas.mean(dim) - return data.rename("data").to_dataset() - - # with a scalar aux coord - data = func( - xr.Dataset(dict(tas=tas.isel(lat=0, drop=True)), coords=dict(leftover=1)), - group="time.dayofyear", - ).load() - assert set(data.data.dims) == {"dayofyear"} - assert "leftover" in data - - -def test_map_blocks_error(tas_series): - tas = tas_series(np.arange(366), start="2000-01-01") - tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk(lat=1) - - # Test dim parsing - @map_blocks(reduces=["lat"], data=[]) - def func(ds, *, group, lon=None): - return ds.tas.rename("data").to_dataset() - - with pytest.raises(ValueError, match="cannot be chunked"): - func(xr.Dataset(dict(tas=tas)), group="time") +class TestMapBlocks: + def test_lat_lon(self, tas_series): + tas = tas_series(np.arange(366), start="2000-01-01") + tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk() + + # Test dim parsing + @map_blocks(reduces=["lat"], data=["lon"]) + def func(ds, *, group, lon=None): + assert group.window == 5 + d = ds.tas.rename(lat="lon") + return d.rename("data").to_dataset() + + # Raises on missing coords + with pytest.raises(ValueError, match="This function adds the lon dimension*"): + data = func(xr.Dataset(dict(tas=tas)), group="time.dayofyear", window=5) + + data = func( + xr.Dataset(dict(tas=tas)), + group="time.dayofyear", + window=5, + lon=[1, 2, 3, 4], + ).load() + assert set(data.data.dims) == {"time", "lon"} + + def test_grouper_prop(self, tas_series): + tas = tas_series(np.arange(366), start="2000-01-01") + tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk() + + @map_groups(data=[Grouper.PROP]) + def func(ds, *, dim): + assert isinstance(dim, list) + d = ds.tas.mean(dim) + return d.rename("data").to_dataset() + + data = func( + xr.Dataset(dict(tas=tas)), + group="time.dayofyear", + window=5, + add_dims=["lat"], + ).load() + assert set(data.data.dims) == {"dayofyear"} + + def test_grouper_prop_main_only(self, tas_series): + tas = tas_series(np.arange(366), start="2000-01-01") + tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk() + + @map_groups(data=[Grouper.PROP], main_only=True) + def func(ds, *, dim): + assert isinstance(dim, str) + data = ds.tas.mean(dim) + return data.rename("data").to_dataset() + + # with a scalar aux coord + data = func( + xr.Dataset(dict(tas=tas.isel(lat=0, drop=True)), coords=dict(leftover=1)), + group="time.dayofyear", + ).load() + assert set(data.data.dims) == {"dayofyear"} + assert "leftover" in data + + def test_raises_error(self, tas_series): + tas = tas_series(np.arange(366), start="2000-01-01") + tas = tas.expand_dims(lat=[1, 2, 3, 4]).chunk(lat=1) + + # Test dim parsing + @map_blocks(reduces=["lat"], data=[]) + def func(ds, *, group, lon=None): + return ds.tas.rename("data").to_dataset() + + with pytest.raises(ValueError, match="cannot be chunked"): + func(xr.Dataset(dict(tas=tas)), group="time") diff --git a/tests/test_utils.py b/tests/test_utils.py index 45ec02004..f679cd1d9 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -38,9 +38,9 @@ def func(a, b=1, c=1): assert newf(1) == (1, 2, 2) assert newf.__doc__ == func.__doc__ - def func(a, b=1, c=1, **kws): + def func(a, b=1, c=1, **kws): # pylint: disable=function-redefined """Docstring""" - return (a, b, c) + return a, b, c newf = wrapped_partial(func, suggested=dict(c=2), a=2, b=2) assert list(signature(newf).parameters.keys()) == ["c", "kws"] diff --git a/tox.ini b/tox.ini index 3017f8de9..1c0692cf5 100644 --- a/tox.ini +++ b/tox.ini @@ -10,6 +10,7 @@ env_list = py39-upstream-doctest py310 py311 + py312-numba labels = test = py38, py39-upstream-doctest, py310, py311, notebooks_doctests, offline-prefetch requires = @@ -103,6 +104,12 @@ passenv = extras = dev deps = py38: scipy<1.9 + # FIXME: Remove when Python3.8 is dropped + py38: numba<0.59.0 + py38: llvmlite<0.42.0 + # FIXME: Remove when numba 0.59.0 is released + numba: numba==0.59.0rc1 + numba: llvmlite==0.42.0rc1 coverage: coveralls upstream: -rrequirements_upstream.txt eofs: eofs diff --git a/xclim/cli.py b/xclim/cli.py index 42dcd1f63..601292c6b 100644 --- a/xclim/cli.py +++ b/xclim/cli.py @@ -283,7 +283,7 @@ def dataflags(ctx, variables, raise_flags, append, dims, freq): @click.option( "-i", "--info", is_flag=True, help="Prints more details for each indicator." ) -def indices(info): +def indices(info): # noqa """List all indicators.""" formatter = click.HelpFormatter() formatter.write_heading("Listing all available indicators for computation.") @@ -464,9 +464,9 @@ def cli(ctx, **kwargs): ctx.obj = kwargs -@cli.result_callback() # noqa +@cli.result_callback() @click.pass_context -def write_file(ctx, *args, **kwargs): # noqa +def write_file(ctx, *args, **kwargs): """Write the output dataset to file.""" if ctx.obj["output"] is not None: if ctx.obj["verbose"]: diff --git a/xclim/core/calendar.py b/xclim/core/calendar.py index 1e72fcc45..e63d4ac21 100644 --- a/xclim/core/calendar.py +++ b/xclim/core/calendar.py @@ -1598,18 +1598,18 @@ def get_doys(start, end, inclusive): def _month_is_first_period_month(time, freq): """Returns True if the given time is from the first month of freq.""" if isinstance(time, cftime.datetime): - frqM = xr.coding.cftime_offsets.to_offset("MS") + frq_monthly = xr.coding.cftime_offsets.to_offset("MS") frq = xr.coding.cftime_offsets.to_offset(freq) - if frqM.onOffset(time): + if frq_monthly.onOffset(time): return frq.onOffset(time) - return frq.onOffset(frqM.rollback(time)) + return frq.onOffset(frq_monthly.rollback(time)) # Pandas time = pd.Timestamp(time) - frqM = pd.tseries.frequencies.to_offset("MS") + frq_monthly = pd.tseries.frequencies.to_offset("MS") frq = pd.tseries.frequencies.to_offset(freq) - if frqM.is_on_offset(time): + if frq_monthly.is_on_offset(time): return frq.is_on_offset(time) - return frq.is_on_offset(frqM.rollback(time)) + return frq.is_on_offset(frq_monthly.rollback(time)) def stack_periods( @@ -1635,8 +1635,8 @@ def stack_periods( ---------- da : xr.Dataset or xr.DataArray An xarray object with a `time` dimension. - Must have an uniform timestep length. - Output might be strange if this does not use an uniform calendar (noleap, 360_day, all_leap). + Must have a uniform timestep length. + Output might be strange if this does not use a uniform calendar (noleap, 360_day, all_leap). window : int The length of the moving window as a multiple of ``freq``. stride : int, optional @@ -1652,7 +1652,7 @@ def stack_periods( freq : str Units of ``window``, ``stride`` and ``min_length``, as a frequency string. Must be larger or equal to the data's sampling frequency. - Note that this function offers an easier interface for non uniform period (like years or months) + Note that this function offers an easier interface for non-uniform period (like years or months) but is much slower than a rolling-construct method. dim : str The new dimension name. @@ -1662,7 +1662,8 @@ def stack_periods( align_days : bool When True (default), an error is raised if the output would have unaligned days across periods. If `freq = 'YS'`, day-of-year alignment is checked and if `freq` is "MS" or "QS", we check day-in-month. - Only uniform-calendar will pass the test for `freq='YS'`. For other frequencies, only the `360_day` calendar will work. + Only uniform-calendar will pass the test for `freq='YS'`. + For other frequencies, only the `360_day` calendar will work. This check is ignored if the sampling rate of the data is coarser than "D". pad_value: Any When some periods are shorter than others, this value is used to pad them at the end. @@ -1677,7 +1678,7 @@ def stack_periods( That coordinate is the same for all periods, depending on the choice of ``window`` and ``freq``, it might make sense. But for unequal periods or non-uniform calendars, it will certainly not. If ``stride`` is a divisor of ``window``, the correct timeseries can be reconstructed with :py:func:`unstack_periods`. - The coordinate of `period` is the first timestep of each windows. + The coordinate of `period` is the first timestep of each window. """ from xclim.core.units import ( # Import in function to avoid cyclical imports ensure_cf_units, @@ -1734,9 +1735,9 @@ def stack_periods( ) periods = [] - longest = 0 + # longest = 0 # Iterate over strides, but recompute the full window for each stride start - for begin, strd_slc in da.resample(time=strd_frq).groups.items(): + for _, strd_slc in da.resample(time=strd_frq).groups.items(): win_resamp = time2.isel(time=slice(strd_slc.start, None)).resample(time=win_frq) # Get slice for first group win_slc = win_resamp._group_indices[0] @@ -1749,7 +1750,7 @@ def stack_periods( open_ended = min_slc.stop is None else: # The end of the group slice is None if no outside-group value was found after the last element - # As we added an extra step to time2, we avoid the case where a group ends exactly on the last element of ds. + # As we added an extra step to time2, we avoid the case where a group ends exactly on the last element of ds open_ended = win_slc.stop is None if open_ended: # Too short, we got to the end @@ -1760,7 +1761,8 @@ def stack_periods( and min_length == window and not _month_is_first_period_month(da.time[0].item(), freq) ): - # For annual or quartely frequencies (which can be anchor-based), if the first time is not in the first month of the first period, + # For annual or quartely frequencies (which can be anchor-based), + # if the first time is not in the first month of the first period, # then the first period is incomplete but by a fractional amount. continue periods.append( @@ -1783,7 +1785,7 @@ def stack_periods( m, u = infer_sampling_units(da) lengths = lengths * m lengths.attrs["units"] = ensure_cf_units(u) - # Start points for each periods + remember parameters for unstacking + # Start points for each period and remember parameters for unstacking starts = xr.DataArray( [da.time[slc.start].item() for slc in periods], dims=(dim,), @@ -1873,7 +1875,7 @@ def unstack_periods(da: xr.DataArray | xr.Dataset, dim: str = "period"): f"`unstack_periods` can't find the `{dim}_length` coordinate." ) from err # Get length as number of points - m, u = infer_sampling_units(da.time) + m, _ = infer_sampling_units(da.time) lengths = lengths // m else: # It is acceptable to lose "{dim}_length" if they were all equal diff --git a/xclim/core/indicator.py b/xclim/core/indicator.py index 6590ab9a4..1a8c26255 100644 --- a/xclim/core/indicator.py +++ b/xclim/core/indicator.py @@ -211,8 +211,8 @@ def update(self, other: dict) -> None: def is_parameter_dict(cls, other: dict) -> bool: """Return whether indicator has a parameter dictionary.""" return set(other.keys()).issubset( - cls.__dataclass_fields__.keys() - ) # pylint disable=no-member + cls.__dataclass_fields__.keys() # pylint: disable=no-member + ) def __getitem__(self, key) -> str: """Return an item in retro-compatible fashion.""" @@ -1502,15 +1502,13 @@ def _preprocess_and_checks(self, das, params): das, params = super()._preprocess_and_checks(das, params) # Check if the period is allowed: - if ( - self.allowed_periods is not None - and parse_offset(params["freq"])[1] not in self.allowed_periods - ): - raise ValueError( - f"Resampling frequency {params['freq']} is not allowed for indicator " - f"{self.identifier} (needs something equivalent to one " - f"of {self.allowed_periods})." - ) + if self.allowed_periods is not None: + if parse_offset(params["freq"])[1] not in self.allowed_periods: + raise ValueError( + f"Resampling frequency {params['freq']} is not allowed for indicator " + f"{self.identifier} (needs something equivalent to one " + f"of {self.allowed_periods})." + ) return das, params @@ -1623,12 +1621,12 @@ def build_indicator_module( ) out = getattr(indicators, name) if reload: - for name, ind in list(out.iter_indicators()): - if name not in objs: + for n, ind in list(out.iter_indicators()): + if n not in objs: # Remove the indicator from the registries and the module del registry[ind._registry_id] # noqa del _indicators_registry[ind.__class__] - del out.__dict__[name] + del out.__dict__[n] else: doc = doc or f"{name.capitalize()} indicators\n" + "=" * (len(name) + 11) try: diff --git a/xclim/core/units.py b/xclim/core/units.py index 1ffc9d567..185f6726a 100644 --- a/xclim/core/units.py +++ b/xclim/core/units.py @@ -1139,10 +1139,10 @@ def dec(func): # Raised when it is not understood, we assume it was a dimensionality try: units.get_dimensionality(dim.replace("dimensionless", "")) - except Exception: + except Exception as e: raise ValueError( f"Relative units for {name} are invalid. Got {dim}. (See stacktrace for more information)." - ) + ) from e @wraps(func) def wrapper(*args, **kwargs): diff --git a/xclim/core/utils.py b/xclim/core/utils.py index 89304b5ce..d582b5be2 100644 --- a/xclim/core/utils.py +++ b/xclim/core/utils.py @@ -136,10 +136,11 @@ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): msg = ( - f"`{func.__name__}` is deprecated{' from version {}'.format(from_version) if from_version else ''} " + f"`{func.__name__}` is deprecated" + f"{' from version {}'.format(from_version) if from_version else ''} " "and will be removed in a future version of xclim" - f"{'. Use `{}` instead'.format(suggested if suggested else '')}. " - f"Please update your scripts accordingly." + f"{'. Use `{}` instead'.format(suggested) if suggested else ''}. " + "Please update your scripts accordingly." ) warnings.warn( msg, diff --git a/xclim/ensembles/_filters.py b/xclim/ensembles/_filters.py index ef0e8f969..6923173da 100644 --- a/xclim/ensembles/_filters.py +++ b/xclim/ensembles/_filters.py @@ -47,7 +47,7 @@ def _concat_hist(da, **hist): raise ValueError("Too many values in hist scenario.") # Scenario dimension, and name of the historical scenario - ((dim, name),) = hist.items() + ((dim, _),) = hist.items() # Select historical scenario and drop it from the data h = da.sel(**hist).dropna("time", how="all") diff --git a/xclim/indices/_agro.py b/xclim/indices/_agro.py index a3ac3eebb..7183a75ac 100644 --- a/xclim/indices/_agro.py +++ b/xclim/indices/_agro.py @@ -1238,7 +1238,7 @@ def standardized_precipitation_index( spi = standardized_index(pr, params) spi.attrs = params.attrs - spi.attrs["freq"] = freq or xarray.infer_freq(spi.time) + spi.attrs["freq"] = (freq or xarray.infer_freq(spi.time)) or "undefined" spi.attrs["window"] = window spi.attrs["units"] = "" return spi diff --git a/xclim/indices/fire/_ffdi.py b/xclim/indices/fire/_ffdi.py index f811b32d4..b986fe2e3 100644 --- a/xclim/indices/fire/_ffdi.py +++ b/xclim/indices/fire/_ffdi.py @@ -1,3 +1,4 @@ +# pylint: disable=no-value-for-parameter r""" McArthur Forest Fire Danger (Mark 5) System =========================================== @@ -97,7 +98,7 @@ def _keetch_byram_drought_index(p, t, pa, kbdi0, kbdi: float): # pragma: no cov nopython=True, cache=True, ) -def _griffiths_drought_factor(p, smd, lim, df): # pragma: no cover # noqa: C901 +def _griffiths_drought_factor(p, smd, lim, df): # pragma: no cover """Compute the Griffiths drought factor. Parameters @@ -239,9 +240,15 @@ def keetch_byram_drought_index( """ def _keetch_byram_drought_index_pass(pr, tasmax, pr_annual, kbdi0): - """Pass inputs on to guvectorized function `_keetch_byram_drought_index`. DO NOT CALL DIRECTLY, use `keetch_byram_drought_index` instead.""" - # This function is actually only required as xr.apply_ufunc will not receive - # a guvectorized function which has the output(s) in its function signature + """Pass inputs on to guvectorized function `_keetch_byram_drought_index`. + + This function is actually only required as `xr.apply_ufunc` will not receive + a guvectorized function which has the output(s) in its function signature. + + Warnings + -------- + DO NOT CALL DIRECTLY, use `keetch_byram_drought_index` instead. + """ return _keetch_byram_drought_index(pr, tasmax, pr_annual, kbdi0) pr = convert_units_to(pr, "mm/day", context="hydro") @@ -311,9 +318,15 @@ def griffiths_drought_factor( """ def _griffiths_drought_factor_pass(pr, smd, lim): - """Pass inputs on to guvectorized function `_griffiths_drought_factor`. DO NOT CALL DIRECTLY, use `griffiths_drought_factor` instead.""" - # This function is actually only required as xr.apply_ufunc will not receive - # a guvectorized function which has the output(s) in its function signature + """Pass inputs on to guvectorized function `_griffiths_drought_factor`. + + This function is actually only required as xr.apply_ufunc will not receive + a guvectorized function which has the output(s) in its function signature. + + Warnings + -------- + DO NOT CALL DIRECTLY, use `griffiths_drought_factor` instead. + """ return _griffiths_drought_factor(pr, smd, lim) pr = convert_units_to(pr, "mm/day", context="hydro") diff --git a/xclim/indices/stats.py b/xclim/indices/stats.py index 7a91e89bc..c8a9396b4 100644 --- a/xclim/indices/stats.py +++ b/xclim/indices/stats.py @@ -8,7 +8,7 @@ import numpy as np import xarray as xr -from xclim.core.calendar import resample_doy, select_time +from xclim.core.calendar import compare_offsets, resample_doy, select_time from xclim.core.formatting import prefix_attrs, unprefix_attrs, update_history from xclim.core.units import convert_units_to from xclim.core.utils import Quantified, uses_dask @@ -625,13 +625,21 @@ def preprocess_standardized_index( # We could allow a more general frequency in this function and move # the constraint {"D", "MS"} in specific indices such as SPI / SPEI. final_freq = freq or xr.infer_freq(da.time) - try: - group = {"D": "time.dayofyear", "MS": "time.month"}[final_freq] - except KeyError(): - raise ValueError( - f"The input (following resampling if applicable) has a frequency `{final_freq}`" - "which is not supported for standardized indices." + if final_freq: + if final_freq == "D": + group = "time.dayofyear" + elif compare_offsets(final_freq, "==", "MS"): + group = "time.month" + else: + raise ValueError( + f"The input (following resampling if applicable) has a frequency `{final_freq}` " + "which is not supported for standardized indices." + ) + else: + warnings.warn( + "No resampling frequency was specified and a frequency for the dataset could not be identified with ``xr.infer_freq``" ) + group = "time.dayofyear" if freq is not None: da = da.resample(time=freq).mean(keep_attrs=True) @@ -732,10 +740,7 @@ def standardized_index_fit_params( "units": "", "offset": offset or "", } - if indexer != {}: - method, args = indexer.popitem() - else: - method, args = "", [] + method, args = ("", []) if indexer == {} else indexer.popitem() params.attrs["time_indexer"] = (method, *args) return params @@ -762,8 +767,6 @@ def standardized_index(da: xr.DataArray, params: xr.DataArray): def reindex_time(da, da_ref): if group == "time.dayofyear": - da = da.rename(day="time").reindex(time=da_ref.time.dt.dayofyear) - da["time"] = da_ref.time da = resample_doy(da, da_ref) elif group == "time.month": da = da.rename(month="time").reindex(time=da_ref.time.dt.month) diff --git a/xclim/sdba/adjustment.py b/xclim/sdba/adjustment.py index c56e19c64..17d0d2068 100644 --- a/xclim/sdba/adjustment.py +++ b/xclim/sdba/adjustment.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-kwoa """ Adjustment Methods ================== diff --git a/xclim/sdba/measures.py b/xclim/sdba/measures.py index ede225c31..b1fa8a5d0 100644 --- a/xclim/sdba/measures.py +++ b/xclim/sdba/measures.py @@ -120,15 +120,13 @@ def _preprocess_and_checks(self, das, params): if isinstance(params["group"], str): params["group"] = Grouper(params["group"]) - if ( - self.allowed_groups is not None - and params["group"].prop not in self.allowed_groups - ): - raise ValueError( - f"Grouping period {params['group'].prop_name} is not allowed for property " - f"{self.identifier} (needs something in " - f"{list(map(lambda g: '.' + g.replace('group', ''), self.allowed_groups))})." - ) + if self.allowed_groups is not None: + if params["group"].prop not in self.allowed_groups: + raise ValueError( + f"Grouping period {params['group'].prop_name} is not allowed for property " + f"{self.identifier} (needs something in " + f"{list(map(lambda g: '.' + g.replace('group', ''), self.allowed_groups))})." + ) # Convert grouping and check if allowed: sim = das["sim"] diff --git a/xclim/sdba/nbutils.py b/xclim/sdba/nbutils.py index fcd4b5444..9fd245d20 100644 --- a/xclim/sdba/nbutils.py +++ b/xclim/sdba/nbutils.py @@ -1,3 +1,4 @@ +# pylint: disable=no-value-for-parameter """ Numba-accelerated Utilities =========================== @@ -23,7 +24,7 @@ def _vecquantiles(arr, rnk, res): res[0] = np.nanquantile(arr, rnk) -def vecquantiles(da, rnk, dim): +def vecquantiles(da: DataArray, rnk: DataArray, dim: str | DataArray.dims) -> DataArray: """For when the quantile (rnk) is different for each point. da and rnk must share all dimensions but dim. @@ -54,7 +55,7 @@ def _quantile(arr, q): return out -def quantile(da, q, dim): +def quantile(da: DataArray, q, dim: str | DataArray.dims) -> DataArray: """Compute the quantiles from a fixed list `q`.""" # We have two cases : # - When all dims are processed : we stack them and use _quantile1d @@ -68,7 +69,7 @@ def quantile(da, q, dim): da = da.stack({tem: dims}) # So we cut in half the definitions to declare in numba - # We still use q as the coords so it corresponds to what was done upstream + # We still use q as the coords, so it corresponds to what was done upstream if not hasattr(q, "dtype") or q.dtype != da.dtype: qc = np.array(q, dtype=da.dtype) else: @@ -90,7 +91,7 @@ def quantile(da, q, dim): # All dims are processed res = DataArray( _quantile(da.values, qc), - dims=("quantiles"), + dims="quantiles", coords={"quantiles": q}, attrs=da.attrs, ) @@ -184,9 +185,7 @@ def _first_and_last_nonnull(arr): @njit -def _extrapolate_on_quantiles( - interp, oldx, oldg, oldy, newx, newg, method="constant" -): # noqa +def _extrapolate_on_quantiles(interp, oldx, oldg, oldy, newx, newg, method="constant"): """Apply extrapolation to the output of interpolation on quantiles with a given grouping. Arguments are the same as _interp_on_quantiles_2D. diff --git a/xclim/sdba/processing.py b/xclim/sdba/processing.py index f6d833ae8..742877ed8 100644 --- a/xclim/sdba/processing.py +++ b/xclim/sdba/processing.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-kwoa """ Pre- and Post-Processing Submodule ================================== @@ -46,10 +47,10 @@ def adapt_freq( Parameters ---------- - ds : xr.Dataset - With variables : "ref", Target/reference data, usually observed data, and "sim", Simulated data. - dim : str - Dimension name. + ref : xr.Dataset + Target/reference data, usually observed data, with a "time" dimension. + sim : xr.Dataset + Simulated data, with a "time" dimension. group : str or Grouper Grouping information, see base.Grouper thresh : str diff --git a/xclim/sdba/properties.py b/xclim/sdba/properties.py index e729a13ee..694ae9d6c 100644 --- a/xclim/sdba/properties.py +++ b/xclim/sdba/properties.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-kwoa """ Properties Submodule ==================== @@ -77,15 +78,13 @@ def _preprocess_and_checks(self, das, params): if isinstance(params["group"], str): params["group"] = Grouper(params["group"]) - if ( - self.allowed_groups is not None - and params["group"].prop not in self.allowed_groups - ): - raise ValueError( - f"Grouping period {params['group'].prop_name} is not allowed for property " - f"{self.identifier} (needs something in " - f"{map(lambda g: '.' + g.replace('group', ''), self.allowed_groups)})." - ) + if self.allowed_groups is not None: + if params["group"].prop not in self.allowed_groups: + raise ValueError( + f"Grouping period {params['group'].prop_name} is not allowed for property " + f"{self.identifier} (needs something in " + f"{map(lambda g: '.' + g.replace('group', ''), self.allowed_groups)})." + ) return das, params @@ -1112,7 +1111,7 @@ def _decorrelation_length( corr = _pairwise_spearman(da, dims) - dists, mn, mx = _pairwise_haversine_and_bins( + dists, _, _ = _pairwise_haversine_and_bins( corr.cf["longitude"].values, corr.cf["latitude"].values, transpose=True ) diff --git a/xclim/testing/diagnostics.py b/xclim/testing/diagnostics.py index a7f8a6c21..8aede8e47 100644 --- a/xclim/testing/diagnostics.py +++ b/xclim/testing/diagnostics.py @@ -1,3 +1,4 @@ +# pylint: disable=no-member,missing-kwoa """ SDBA Diagnostic Testing Module ============================== @@ -128,7 +129,7 @@ def adapt_freq_graph(): x = series(synth_rainfall(2, 2, wet_freq=0.25, size=n), "pr") # sim y = series(synth_rainfall(2, 2, wet_freq=0.5, size=n), "pr") # ref - xp = adapt_freq(x, y, thresh=0).sim_ad + xp = adapt_freq(x, y, thresh=0).sim_ad # noqa fig, (ax1, ax2) = plt.subplots(2, 1) sx = x.sortby(x) diff --git a/xclim/testing/utils.py b/xclim/testing/utils.py index c15a92054..c4bb93211 100644 --- a/xclim/testing/utils.py +++ b/xclim/testing/utils.py @@ -202,7 +202,8 @@ def _get( local_md5 = file_md5_checksum(local_file) try: url = "/".join((github_url, "raw", branch, md5_name.as_posix())) - logger.info(f"Attempting to fetch remote file md5: {md5_name.as_posix()}") + msg = f"Attempting to fetch remote file md5: {md5_name.as_posix()}" + logger.info(msg) urlretrieve(url, md5_file) # nosec with open(md5_file) as f: remote_md5 = f.read() @@ -235,7 +236,8 @@ def _get( local_file.parent.mkdir(exist_ok=True, parents=True) url = "/".join((github_url, "raw", branch, fullname.as_posix())) - logger.info(f"Fetching remote file: {fullname.as_posix()}") + msg = f"Fetching remote file: {fullname.as_posix()}" + logger.info(msg) try: urlretrieve(url, local_file) # nosec except HTTPError as e: @@ -256,7 +258,8 @@ def _get( raise FileNotFoundError(msg) from e try: url = "/".join((github_url, "raw", branch, md5_name.as_posix())) - logger.info(f"Fetching remote file md5: {md5_name.as_posix()}") + msg = f"Fetching remote file md5: {md5_name.as_posix()}" + logger.info(msg) urlretrieve(url, md5_file) # nosec except (HTTPError, URLError) as e: msg = ( @@ -531,7 +534,9 @@ def publish_release_notes( if not file: return changes if isinstance(file, (Path, os.PathLike)): - file = Path(file).open("w") + with Path(file).open("w") as f: + print(changes, file=f) + return print(changes, file=file) @@ -588,5 +593,7 @@ def show_versions( if not file: return message if isinstance(file, (Path, os.PathLike)): - file = Path(file).open("w") + with Path(file).open("w") as f: + print(message, file=f) + return print(message, file=file)