diff --git a/.cruft.json b/.cruft.json
index 273c1b23..32ee8e97 100644
--- a/.cruft.json
+++ b/.cruft.json
@@ -1,6 +1,6 @@
{
- "template": "/home/tjs/git/cookiecutter-pypackage",
- "commit": "f9e0b049711af2023e9a3f5df594f4dbc25b07c1",
+ "template": "https://github.com/Ouranosinc/cookiecutter-pypackage",
+ "commit": "14556700478b0afdb158d61dd35db26a77c2b83d",
"checkout": null,
"context": {
"cookiecutter": {
@@ -20,10 +20,10 @@
"add_translations": "y",
"command_line_interface": "Click",
"create_author_file": "y",
- "open_source_license": "Not open source",
+ "open_source_license": "Apache Software License 2.0",
"generated_with_cruft": "y",
"__gh_slug": "https://github.com/Ouranosinc/xscen",
- "_template": "/home/tjs/git/cookiecutter-pypackage"
+ "_template": "https://github.com/Ouranosinc/cookiecutter-pypackage"
}
},
"directory": null
diff --git a/.flake8 b/.flake8
index 12028909..3a5dd7c2 100644
--- a/.flake8
+++ b/.flake8
@@ -6,9 +6,6 @@ exclude =
docs/conf.py,
tests
ignore =
- AZ100,
- AZ200,
- AZ300,
C,
D,
E,
diff --git a/.github/deactivated/actions-versions-updater.yml b/.github/deactivated/actions-versions-updater.yml
deleted file mode 100644
index 88c0a129..00000000
--- a/.github/deactivated/actions-versions-updater.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: GitHub Actions Version Updater
-
-on:
- schedule:
- # 12:00 AM on the first of every month
- - cron: '0 0 1 * *'
- workflow_dispatch:
-
-permissions:
- contents: read
-
-jobs:
- build:
- runs-on: ubuntu-latest
- permissions:
- actions: write
- contents: write
- pull-requests: write
- steps:
- - name: Harden Runner
- uses: step-security/harden-runner@eb238b55efaa70779f274895e782ed17c84f2895 # v2.6.1
- with:
- disable-sudo: true
- egress-policy: block
- allowed-endpoints: >
- api.github.com:443
- github.com:443
-
- - uses: actions/checkout@v4.1.1
- with:
- # This requires a personal access token with the privileges to push directly to `main`
- token: ${{ secrets.ACTIONS_VERSION_UPDATER_TOKEN }}
- persist-credentials: true
-
- - name: Run GitHub Actions Version Updater
- uses: saadmk11/github-actions-version-updater@v0.8.1
- with:
- token: ${{ secrets.ACTIONS_VERSION_UPDATER_TOKEN }}
- committer_email: 'bumpversion[bot]@ouranos.ca'
- committer_username: 'update-github-actions[bot]'
- pull_request_title: '[bot] Update GitHub Action Versions'
diff --git a/.github/deactivated/conda-build.yml b/.github/deactivated/conda-build.yml
index b30dc882..d8d7bc22 100644
--- a/.github/deactivated/conda-build.yml
+++ b/.github/deactivated/conda-build.yml
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.9"]
+ python-version: [ "3.9" ]
steps:
- uses: actions/checkout@v4.1.1
if: ${{ github.event.inputs.tag == '' }}
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index d86e88c8..053707cf 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,13 +1,22 @@
+# Please see the documentation for all configuration options:
+# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
+
version: 2
updates:
- package-ecosystem: github-actions
- directory: /
+ directory: /.github/workflows
schedule:
interval: monthly
- open-pull-requests-limit: 10
+ groups:
+ actions:
+ patterns:
+ - "*"
- package-ecosystem: pip
directory: /
schedule:
interval: monthly
- open-pull-requests-limit: 10
+ groups:
+ python:
+ patterns:
+ - "*"
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 6d75ce34..cd74ab70 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -18,17 +18,22 @@
- '.readthedocs.yml'
- '.secrets.baseline'
- '.yamllint.yaml'
+ - 'CI/**/*'
- 'Makefile'
- 'docs/Makefile'
- 'tox.ini'
+# label 'docs' all documentation-related steps and files
'docs':
- changed_files:
- any-glob-to-any-file:
+ - '.github/DISCUSSION_TEMPLATE/**/*'
- '.github/ISSUE_TEMPLATE.md'
- '.github/ISSUE_TEMPLATE/**/*'
- '.github/PULL_REQUEST_TEMPLATE.md'
- '.readthedocs.yml'
+ - 'AUTHORS.rst'
+ - 'CODE_OF_CONDUCT.md'
- 'CONTRIBUTING.rst'
- 'README.rst'
- 'docs/**/*'
diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml
index 411712a6..65720805 100644
--- a/.github/workflows/bump-version.yml
+++ b/.github/workflows/bump-version.yml
@@ -22,6 +22,10 @@ on:
- .yamllint.yaml
- AUTHORS.rst
- CHANGELOG.rst
+ - CI/**/*.in
+ - CI/**/*.py
+ - CI/**/*.txt
+ - CODE_OF_CONDUCT.md
- CONTRIBUTING.rst
- MANIFEST.in
- Makefile
@@ -70,14 +74,13 @@ jobs:
run: |
git config --local user.email "bumpversion[bot]@ouranos.ca"
git config --local user.name "bumpversion[bot]"
- - name: Install bump-my-version
- run: |
- python -m pip install "bump-my-version>=0.18.3"
- name: Current Version
run: |
- bump-my-version show current_version
CURRENT_VERSION="$(grep -E '__version__' src/xscen/__init__.py | cut -d ' ' -f3)"
echo "CURRENT_VERSION=${CURRENT_VERSION}" >> $GITHUB_ENV
+ - name: Install CI libraries
+ run: |
+ python -m pip install --require-hashes -r CI/requirements_ci.txt
- name: Conditional Bump Version
run: |
if [[ ${{ env.CURRENT_VERSION }} =~ -dev(\.\d+)? ]]; then
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index e550dbb4..0203ce6d 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -4,6 +4,7 @@ on:
push:
branches:
- main
+ pull_request:
schedule:
- cron: '30 23 * * 5'
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
index 088cf174..bafdd25f 100644
--- a/.github/workflows/dependency-review.yml
+++ b/.github/workflows/dependency-review.yml
@@ -1,6 +1,7 @@
# Dependency Review Action
#
-# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
+# This Action will scan dependency manifest files that change as part of a Pull Request, surfacing known-vulnerable versions of the packages declared or updated in the PR.
+# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
diff --git a/.github/workflows/first-pull-request.yml b/.github/workflows/first-pull-request.yml
index d040e1d2..1c602015 100644
--- a/.github/workflows/first-pull-request.yml
+++ b/.github/workflows/first-pull-request.yml
@@ -13,7 +13,6 @@ jobs:
name: Welcome
runs-on: ubuntu-latest
permissions:
- contents: read
pull-requests: write
steps:
- name: Harden Runner
@@ -57,5 +56,5 @@ jobs:
It appears that this is your first Pull Request. To give credit where it's due, we ask that you add your information to the \`AUTHORS.rst\` and \`.zenodo.json\`:
- [ ] The relevant author information has been added to \`AUTHORS.rst\` and \`.zenodo.json\`.
- Please make sure you've read our [contributing guide](CONTRIBUTING.rst). We look forward to reviewing your Pull Request shortly β¨`
+ Please make sure you've read our [contributing guide](https://github.com/Ouranosinc/xscen/blob/main/CONTRIBUTING.rst). We look forward to reviewing your Pull Request shortly β¨`
})
diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml
index f2c6eea2..595ff5b9 100644
--- a/.github/workflows/label.yml
+++ b/.github/workflows/label.yml
@@ -6,7 +6,7 @@
# https://github.com/actions/labeler/blob/master/README.md
name: Labeler
-on: [pull_request_target]
+on: [ pull_request_target ]
# Note: potential security risk from this action using pull_request_target.
# Do not add actions in here which need a checkout of the repo, and do not use any caching in here.
# See: https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target
@@ -19,8 +19,6 @@ jobs:
name: Label
runs-on: ubuntu-latest
permissions:
- checks: write
- contents: read
pull-requests: write
steps:
- name: Harden Runner
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index db91c90a..05be90c8 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -9,7 +9,7 @@ on:
- CHANGELOG.rst
- README.rst
- pyproject.toml
- - xscen/__init__.py
+ - src/xscen/__init__.py
pull_request:
concurrency:
@@ -44,32 +44,26 @@ jobs:
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
python-version: ${{ matrix.python-version }}
- - name: Install tox
+ cache: pip
+ - name: Install CI libraries
run: |
- python -m pip install tox
+ python -m pip install --require-hashes -r CI/requirements_ci.txt
- name: Run linting suite
run: |
python -m tox -e lint
test-pypi:
- name: ${{ matrix.tox-build }} (Python${{ matrix.python-version }})
+ name: Test with Python${{ matrix.python-version }} (tox, ${{ matrix.os }})
needs: lint
- runs-on: ubuntu-latest
+ runs-on: ${{ matrix.os }}
env:
COVERALLS_PARALLEL: true
COVERALLS_SERVICE_NAME: github
esmf-version: 8.4.2
strategy:
matrix:
- include:
- - python-version: "3.9"
- tox-build: "py39-coveralls"
- - python-version: "3.10"
- tox-build: "py310-coveralls"
- - python-version: "3.11"
- tox-build: "py311-coveralls"
- - python-version: "3.12"
- tox-build: "py312-esmpy-coveralls"
+ os: [ 'ubuntu-latest' ]
+ python-version: [ "3.10", "3.11", "3.12" ] # "3.13"
defaults:
run:
shell: bash -l {0}
@@ -87,16 +81,23 @@ jobs:
environment-name: xscen-pypi
create-args: >-
esmf=${{ env.esmf-version }}
- mamba
python=${{ matrix.python-version }}
- tox
+ tox>=4.17.1
+ tox-gh>=1.3.2
+ - name: Environment Caching
+ uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
+ with:
+ path: .tox
+ key: ${{ matrix.os }}-Python${{ matrix.python-version }}-${{ hashFiles('pyproject.toml', 'tox.ini') }}
- name: Test with tox
run: |
- python -m tox -e ${{ matrix.tox-build }}
+ python -m tox
env:
ESMF_VERSION: ${{ env.esmf-version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: run-Python${{ matrix.python-version }}
+ COVERALLS_PARALLEL: true
+ COVERALLS_SERVICE_NAME: github
# - name: Install esmpy
# run: |
@@ -122,16 +123,13 @@ jobs:
# COVERALLS_SERVICE_NAME: github
test-conda:
- name: Python${{ matrix.python-version }} (conda)
+ name: Test with Python${{ matrix.python-version }} (Anaconda, ${{ matrix.os }})
needs: lint
- runs-on: ubuntu-latest
+ runs-on: ${{ matrix.os }}
strategy:
matrix:
- include:
- - python-version: "3.9"
- - python-version: "3.10"
- - python-version: "3.11"
- - python-version: "3.12"
+ os: [ 'ubuntu-latest' ]
+ python-version: [ "3.10", "3.11", "3.12" ] # "3.13"
defaults:
run:
shell: bash -l {0}
@@ -156,7 +154,6 @@ jobs:
micromamba install -y -c conda-forge intake-esm=2023.11.10
- name: Conda and Mamba versions
run: |
- micromamba list
echo "micromamba $(micromamba --version)"
- name: Compile catalogs and install xscen
run: |
@@ -164,12 +161,12 @@ jobs:
python -m pip install --no-user --no-deps .
- name: Check versions
run: |
- conda list
+ micromamba list
python -m pip check || true
- name: Test with pytest
run: |
python -m pytest --cov xscen
- - name: Report coverage
+ - name: Report Coverage
run: |
python -m coveralls
env:
@@ -183,12 +180,13 @@ jobs:
- test-pypi
- test-conda
runs-on: ubuntu-latest
- container: python:3-slim
steps:
+ - name: Harden Runner
+ uses: step-security/harden-runner@5c7944e73c4c2a096b17a9cb74d65b6c2bbafbde # v2.9.1
+ with:
+ disable-sudo: true
+ egress-policy: audit
- name: Coveralls Finished
- run: |
- python -m pip install --upgrade coveralls
- python -m coveralls --finish
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- COVERALLS_SERVICE_NAME: github
+ uses: coverallsapp/github-action@643bc377ffa44ace6394b2b5d0d3950076de9f63 # v2.3.0
+ with:
+ parallel-finished: true
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index 01f233a0..b93ac61c 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -33,11 +33,11 @@ jobs:
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
python-version: "3.x"
- - name: Install packaging libraries
+ - name: Install CI libraries
run: |
- python -m pip install build setuptools wheel
+ python -m pip install --require-hashes -r CI/requirements_ci.txt
- name: Build a binary wheel and a source tarball
run: |
python -m build --sdist --wheel
- name: Publish distribution π¦ to PyPI
- uses: pypa/gh-action-pypi-publish@8a08d616893759ef8e1aa1f2785787c0b97e20d6 # v1.10.0
+ uses: pypa/gh-action-pypi-publish@0ab0b79471669eb3a4d647e625009c62f9f3b241 # v1.10.1
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
index 35afe070..1705352c 100644
--- a/.github/workflows/scorecard.yml
+++ b/.github/workflows/scorecard.yml
@@ -80,6 +80,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: Upload to code-scanning
- uses: github/codeql-action/upload-sarif@b7cec7526559c32f1616476ff32d17ba4c59b2d6 # 3.25.5
+ uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # 3.26.6
with:
sarif_file: results.sarif
diff --git a/.github/workflows/tag-testpypi.yml b/.github/workflows/tag-testpypi.yml
index fea99960..a42aa6e0 100644
--- a/.github/workflows/tag-testpypi.yml
+++ b/.github/workflows/tag-testpypi.yml
@@ -21,7 +21,7 @@ jobs:
with:
egress-policy: audit
- name: Checkout code
- uses: actions/checkout@v4.1.7
+ uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Create Release
uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # 2.0.8
env:
@@ -57,14 +57,14 @@ jobs:
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
with:
python-version: "3.x"
- - name: Install packaging libraries
+ - name: Install CI libraries
run: |
- python -m pip install build setuptools wheel
+ python -m pip install --require-hashes -r CI/requirements_ci.txt
- name: Build a binary wheel and a source tarball
run: |
python -m build --sdist --wheel
- name: Publish distribution π¦ to Test PyPI
- uses: pypa/gh-action-pypi-publish@8a08d616893759ef8e1aa1f2785787c0b97e20d6 # v1.10.0
+ uses: pypa/gh-action-pypi-publish@0ab0b79471669eb3a4d647e625009c62f9f3b241 # v1.10.1
with:
repository-url: https://test.pypi.org/legacy/
skip-existing: true
diff --git a/.github/workflows/upstream.yml b/.github/workflows/upstream.yml
index 458a77c2..77918ca4 100644
--- a/.github/workflows/upstream.yml
+++ b/.github/workflows/upstream.yml
@@ -8,7 +8,7 @@ on:
- CHANGELOG.rst
- README.rst
- pyproject.toml
- - xscen/__init__.py
+ - src/xscen/__init__.py
schedule:
- cron: "0 0 * * *" # Daily βAt 00:00β UTC
workflow_dispatch: # allows you to trigger the workflow run manually
@@ -25,7 +25,6 @@ jobs:
name: Python${{ matrix.python-version }}-upstream
runs-on: ubuntu-latest
permissions:
- contents: read
issues: write
if: |
(github.event_name == 'schedule') ||
diff --git a/.github/workflows/workflow-warning.yml b/.github/workflows/workflow-warning.yml
index 03768b37..3e998058 100644
--- a/.github/workflows/workflow-warning.yml
+++ b/.github/workflows/workflow-warning.yml
@@ -22,7 +22,6 @@ jobs:
if: |
(github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)
permissions:
- contents: read
pull-requests: write
steps:
- name: Harden Runner
@@ -32,38 +31,45 @@ jobs:
egress-policy: block
allowed-endpoints: >
api.github.com:443
- - name: Find comment
+ - name: Find Warning Comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
- id: fc
+ id: fc_warning
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: |
This Pull Request modifies GitHub workflows and is coming from a fork.
- - name: Create comment
+ - name: Create Warning Comment
if: |
- (steps.fc.outputs.comment-id == '') &&
+ (steps.fc_warning.outputs.comment-id == '') &&
(!contains(github.event.pull_request.labels.*.name, 'approved')) &&
(github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
with:
- comment-id: ${{ steps.fc.outputs.comment-id }}
+ comment-id: ${{ steps.fc_warning.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body: |
- > **Warning**
+ > [!WARNING]
> This Pull Request modifies GitHub Workflows and is coming from a fork.
**It is very important for the reviewer to ensure that the workflow changes are appropriate.**
edit-mode: replace
- - name: Update comment
+ - name: Find Note Comment
+ uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
+ id: fc_note
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-author: 'github-actions[bot]'
+ body-includes: Workflow changes in this Pull Request have been approved!
+ - name: Update Comment
if: |
contains(github.event.pull_request.labels.*.name, 'approved')
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0
with:
- comment-id: ${{ steps.fc.outputs.comment-id }}
+ comment-id: ${{ steps.fc_note.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body: |
- > **Note**
- > Changes have been approved by a maintainer.
+ > [!NOTE]
+ > Workflow changes in this Pull Request have been approved!
reactions: |
hooray
edit-mode: append
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a8de3b16..ef7e837d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,11 +1,11 @@
default_language_version:
- python: python3
+ python: python3
repos:
- repo: https://github.com/asottile/pyupgrade
- rev: v3.16.0
+ rev: v3.17.0
hooks:
- - id: pyupgrade
+ - id: pyupgrade
args: [ '--py39-plus' ]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
@@ -36,7 +36,7 @@ repos:
hooks:
- id: toml-sort-fix
- repo: https://github.com/psf/black-pre-commit-mirror
- rev: 24.4.2
+ rev: 24.8.0
hooks:
- id: black
exclude: ^docs/
@@ -46,14 +46,16 @@ repos:
- id: isort
exclude: ^docs/
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.5.0
+ rev: v0.5.7
hooks:
- id: ruff
+ args: [ '--fix', '--show-fixes' ]
+ # - id: ruff-format
- repo: https://github.com/pycqa/flake8
- rev: 7.1.0
+ rev: 7.1.1
hooks:
- id: flake8
- additional_dependencies: [ 'flake8-alphabetize', 'flake8-rst-docstrings' ]
+ additional_dependencies: [ 'flake8-rst-docstrings' ]
args: [ '--config=.flake8' ]
- repo: https://github.com/keewis/blackdoc
rev: v0.3.9
@@ -66,15 +68,20 @@ repos:
hooks:
- id: yamllint
args: [ '--config-file=.yamllint.yaml' ]
+# - repo: https://github.com/numpy/numpydoc
+# rev: v1.8.0
+# hooks:
+# - id: numpydoc-validation
+# exclude: ^docs/|^tests/
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.8.5
hooks:
- id: nbqa-pyupgrade
args: [ '--py39-plus' ]
- additional_dependencies: [ 'pyupgrade==3.15.2' ]
+ additional_dependencies: [ 'pyupgrade==3.17.0' ]
- id: nbqa-black
args: [ '--target-version=py39' ]
- additional_dependencies: [ 'black==24.4.2' ]
+ additional_dependencies: [ 'black==24.8.0' ]
- id: nbqa-isort
additional_dependencies: [ 'isort==5.13.2' ]
- repo: https://github.com/kynan/nbstripout
@@ -90,7 +97,7 @@ repos:
exclude: .cruft.json|docs/notebooks
args: [ '--baseline=.secrets.baseline' ]
- repo: https://github.com/python-jsonschema/check-jsonschema
- rev: 0.28.6
+ rev: 0.29.1
hooks:
- id: check-github-workflows
- id: check-readthedocs
@@ -100,13 +107,13 @@ repos:
- id: check-useless-excludes
ci:
- autofix_commit_msg: |
- [pre-commit.ci] auto fixes from pre-commit.com hooks
+ autofix_commit_msg: |
+ [pre-commit.ci] auto fixes from pre-commit.com hooks
- for more information, see https://pre-commit.ci
- autofix_prs: true
- autoupdate_branch: ''
- autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate'
- autoupdate_schedule: quarterly
- skip: []
- submodules: false
+ for more information, see https://pre-commit.ci
+ autofix_prs: true
+ autoupdate_branch: ''
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate'
+ autoupdate_schedule: monthly
+ skip: [ ]
+ submodules: false
diff --git a/.yamllint.yaml b/.yamllint.yaml
index f01a5d64..92c0cacb 100644
--- a/.yamllint.yaml
+++ b/.yamllint.yaml
@@ -1,10 +1,41 @@
---
rules:
+
+ brackets:
+ forbid: false
+ min-spaces-inside: 1
+ max-spaces-inside: 1
+
+ commas:
+ min-spaces-after: 1
+
document-start: disable
+
+ float-values:
+ require-numeral-before-decimal: true
+
+ hyphens:
+ max-spaces-after: 1
+
+ indentation:
+ indent-sequences: whatever
+ spaces: consistent
+
+ key-duplicates:
+ forbid-duplicated-merge-keys: true
+
line-length:
- max: 120
+ allow-non-breakable-words: true
+ allow-non-breakable-inline-mappings: true
+ max: 225
level: warning
+
+ new-lines:
+ type: unix
+
+ trailing-spaces: {}
+
truthy: disable
ignore: |
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index ec4154a8..f6f0768e 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -4,7 +4,7 @@ Changelog
v0.9.2 (unreleased)
-------------------
-Contributors to this version: Juliette Lavoie (:user:`juliettelavoie`), Pascal Bourgault (:user:`aulemahal`), Gabriel Rondeau-Genesse (:user:`RondeauG`).
+Contributors to this version: Juliette Lavoie (:user:`juliettelavoie`), Pascal Bourgault (:user:`aulemahal`), Gabriel Rondeau-Genesse (:user:`RondeauG`), Trevor James Smith (:user:`Zeitsperre`).
New features and enhancements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -37,6 +37,11 @@ Bug fixes
* Fix ``xs.catalog.concat_data_catalogs`` for catalogs that have not been search yet. (:pull:`431`).
* Fix indicator computation using ``freq=2Q*`` by assuming this means a semiannual frequency anchored at the given month (pandas assumes 2 quarter steps, any of them anchored at the given month). (:pull:`431`).
+Breaking changes
+^^^^^^^^^^^^^^^^
+* `convert_calendar` in ``clean_up`` now uses `xarray` instead of `xclim`. Keywords aren't compatible between the two, but given that `xclim` will abandon its function, no backwards compatibility was sought. (:pull:`450`).
+* `attrs_to_remove` and `remove_all_attrs_except` in ``clean_up`` now use real regex. It should not be too breaking since a `fullmatch()` is used, but `*` is now `.*`. (:pull:`450`).
+
Internal changes
^^^^^^^^^^^^^^^^
* ``DataCatalog.to_dataset`` can now accept a ``preprocess`` argument even if ``create_ensemble_on`` is given. The user assumes calendar handling. (:pull:`431`).
@@ -46,11 +51,16 @@ Internal changes
* Explicitly assign coords of multiindex in `xs.unstack_fill_nan`. (:pull:`427`).
* French translations are compiled offline. A new check ensures no PR are merged with missing messages. (:issue:`342`, :pull:`443`).
* Continued work to add tests. (:pull:`450`).
-
-Breaking changes
-^^^^^^^^^^^^^^^^
-* `convert_calendar` in ``clean_up`` now uses `xarray` instead of `xclim`. Keywords aren't compatible between the two, but given that `xclim` will abandon its function, no backwards compatibility was sought. (:pull:`450`).
-* `attrs_to_remove` and `remove_all_attrs_except` in ``clean_up`` now use real regex. It should not be too breaking since a `fullmatch()` is used, but `*` is now `.*`. (:pull:`450`).
+* Updated the cookiecutter template via `cruft`: (:pull:`452`)
+ * GitHub Workflows that use rely on `PyPI`-based dependencies now use commit hashes.
+ * `Dependabot` will now group updates by type.
+ * Dependencies have been updated and synchronized.
+ * Contributor guidance documentation has been adjusted.
+ * `numpydoc-validate` has been added to the linting tools.
+ * Linting checks are more reliant on `ruff` suggestions and stricter.
+ * `flake8-alphabetize` has been replaced by `ruff`.
+ * License information has been updated in the library top-level `__init__.py`.
+* Docstrings have been adjusted to meet the `numpydoc` standard. (:pull:`452`).
v0.9.1 (2024-06-04)
-------------------
diff --git a/CI/requirements_ci.in b/CI/requirements_ci.in
new file mode 100644
index 00000000..e2d596fd
--- /dev/null
+++ b/CI/requirements_ci.in
@@ -0,0 +1,7 @@
+bump-my-version==0.26.0
+coveralls==4.0.1
+pip==24.2.0
+setuptools==74.1.2
+setuptools-scm==8.1.0
+tox==4.18.0
+tox-gh==1.3.2
diff --git a/CI/requirements_ci.txt b/CI/requirements_ci.txt
new file mode 100644
index 00000000..9ef0b488
--- /dev/null
+++ b/CI/requirements_ci.txt
@@ -0,0 +1,429 @@
+#
+# This file is autogenerated by pip-compile with Python 3.9
+# by the following command:
+#
+# pip-compile --allow-unsafe --generate-hashes --output-file=CI/requirements_ci.txt CI/requirements_ci.in
+#
+annotated-types==0.7.0 \
+ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \
+ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89
+ # via pydantic
+bracex==2.4 \
+ --hash=sha256:a27eaf1df42cf561fed58b7a8f3fdf129d1ea16a81e1fadd1d17989bc6384beb \
+ --hash=sha256:efdc71eff95eaff5e0f8cfebe7d01adf2c8637c8c92edaf63ef348c241a82418
+ # via wcmatch
+bump-my-version==0.26.0 \
+ --hash=sha256:9e2c01b7639960379440c4a371b3c8c0aa66cf6979985f1c9ba2e7c2fb4a185f \
+ --hash=sha256:fe35ebae91e92deebe809ce06bfa37303e45e4f087ad4a371f605702e767623f
+ # via -r CI/requirements_ci.in
+cachetools==5.4.0 \
+ --hash=sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474 \
+ --hash=sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827
+ # via tox
+certifi==2024.7.4 \
+ --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \
+ --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90
+ # via requests
+chardet==5.2.0 \
+ --hash=sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7 \
+ --hash=sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970
+ # via tox
+charset-normalizer==3.3.2 \
+ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \
+ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \
+ --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \
+ --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \
+ --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \
+ --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \
+ --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \
+ --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \
+ --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \
+ --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \
+ --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \
+ --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \
+ --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \
+ --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \
+ --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \
+ --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \
+ --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \
+ --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \
+ --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \
+ --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \
+ --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \
+ --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \
+ --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \
+ --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \
+ --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \
+ --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \
+ --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \
+ --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \
+ --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \
+ --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \
+ --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \
+ --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \
+ --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \
+ --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \
+ --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \
+ --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \
+ --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \
+ --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \
+ --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \
+ --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \
+ --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \
+ --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \
+ --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \
+ --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \
+ --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \
+ --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \
+ --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \
+ --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \
+ --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \
+ --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \
+ --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \
+ --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \
+ --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \
+ --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \
+ --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \
+ --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \
+ --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \
+ --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \
+ --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \
+ --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \
+ --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \
+ --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \
+ --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \
+ --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \
+ --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \
+ --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \
+ --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \
+ --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \
+ --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \
+ --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \
+ --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \
+ --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \
+ --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \
+ --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \
+ --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \
+ --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \
+ --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \
+ --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \
+ --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \
+ --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \
+ --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \
+ --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \
+ --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \
+ --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \
+ --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \
+ --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \
+ --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \
+ --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \
+ --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \
+ --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561
+ # via requests
+click==8.1.7 \
+ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \
+ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de
+ # via
+ # bump-my-version
+ # rich-click
+colorama==0.4.6 \
+ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
+ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
+ # via tox
+coverage[toml]==7.6.0 \
+ --hash=sha256:0086cd4fc71b7d485ac93ca4239c8f75732c2ae3ba83f6be1c9be59d9e2c6382 \
+ --hash=sha256:01c322ef2bbe15057bc4bf132b525b7e3f7206f071799eb8aa6ad1940bcf5fb1 \
+ --hash=sha256:03cafe82c1b32b770a29fd6de923625ccac3185a54a5e66606da26d105f37dac \
+ --hash=sha256:044a0985a4f25b335882b0966625270a8d9db3d3409ddc49a4eb00b0ef5e8cee \
+ --hash=sha256:07ed352205574aad067482e53dd606926afebcb5590653121063fbf4e2175166 \
+ --hash=sha256:0d1b923fc4a40c5832be4f35a5dab0e5ff89cddf83bb4174499e02ea089daf57 \
+ --hash=sha256:0e7b27d04131c46e6894f23a4ae186a6a2207209a05df5b6ad4caee6d54a222c \
+ --hash=sha256:1fad32ee9b27350687035cb5fdf9145bc9cf0a094a9577d43e909948ebcfa27b \
+ --hash=sha256:289cc803fa1dc901f84701ac10c9ee873619320f2f9aff38794db4a4a0268d51 \
+ --hash=sha256:3c59105f8d58ce500f348c5b56163a4113a440dad6daa2294b5052a10db866da \
+ --hash=sha256:46c3d091059ad0b9c59d1034de74a7f36dcfa7f6d3bde782c49deb42438f2450 \
+ --hash=sha256:482855914928c8175735a2a59c8dc5806cf7d8f032e4820d52e845d1f731dca2 \
+ --hash=sha256:49c76cdfa13015c4560702574bad67f0e15ca5a2872c6a125f6327ead2b731dd \
+ --hash=sha256:4b03741e70fb811d1a9a1d75355cf391f274ed85847f4b78e35459899f57af4d \
+ --hash=sha256:4bea27c4269234e06f621f3fac3925f56ff34bc14521484b8f66a580aacc2e7d \
+ --hash=sha256:4d5fae0a22dc86259dee66f2cc6c1d3e490c4a1214d7daa2a93d07491c5c04b6 \
+ --hash=sha256:543ef9179bc55edfd895154a51792b01c017c87af0ebaae092720152e19e42ca \
+ --hash=sha256:54dece71673b3187c86226c3ca793c5f891f9fc3d8aa183f2e3653da18566169 \
+ --hash=sha256:6379688fb4cfa921ae349c76eb1a9ab26b65f32b03d46bb0eed841fd4cb6afb1 \
+ --hash=sha256:65fa405b837060db569a61ec368b74688f429b32fa47a8929a7a2f9b47183713 \
+ --hash=sha256:6616d1c9bf1e3faea78711ee42a8b972367d82ceae233ec0ac61cc7fec09fa6b \
+ --hash=sha256:6fe885135c8a479d3e37a7aae61cbd3a0fb2deccb4dda3c25f92a49189f766d6 \
+ --hash=sha256:7221f9ac9dad9492cecab6f676b3eaf9185141539d5c9689d13fd6b0d7de840c \
+ --hash=sha256:76d5f82213aa78098b9b964ea89de4617e70e0d43e97900c2778a50856dac605 \
+ --hash=sha256:7792f0ab20df8071d669d929c75c97fecfa6bcab82c10ee4adb91c7a54055463 \
+ --hash=sha256:831b476d79408ab6ccfadaaf199906c833f02fdb32c9ab907b1d4aa0713cfa3b \
+ --hash=sha256:9146579352d7b5f6412735d0f203bbd8d00113a680b66565e205bc605ef81bc6 \
+ --hash=sha256:9cc44bf0315268e253bf563f3560e6c004efe38f76db03a1558274a6e04bf5d5 \
+ --hash=sha256:a73d18625f6a8a1cbb11eadc1d03929f9510f4131879288e3f7922097a429f63 \
+ --hash=sha256:a8659fd33ee9e6ca03950cfdcdf271d645cf681609153f218826dd9805ab585c \
+ --hash=sha256:a94925102c89247530ae1dab7dc02c690942566f22e189cbd53579b0693c0783 \
+ --hash=sha256:ad4567d6c334c46046d1c4c20024de2a1c3abc626817ae21ae3da600f5779b44 \
+ --hash=sha256:b2e16f4cd2bc4d88ba30ca2d3bbf2f21f00f382cf4e1ce3b1ddc96c634bc48ca \
+ --hash=sha256:bbdf9a72403110a3bdae77948b8011f644571311c2fb35ee15f0f10a8fc082e8 \
+ --hash=sha256:beb08e8508e53a568811016e59f3234d29c2583f6b6e28572f0954a6b4f7e03d \
+ --hash=sha256:c4cbe651f3904e28f3a55d6f371203049034b4ddbce65a54527a3f189ca3b390 \
+ --hash=sha256:c7b525ab52ce18c57ae232ba6f7010297a87ced82a2383b1afd238849c1ff933 \
+ --hash=sha256:ca5d79cfdae420a1d52bf177de4bc2289c321d6c961ae321503b2ca59c17ae67 \
+ --hash=sha256:cdab02a0a941af190df8782aafc591ef3ad08824f97850b015c8c6a8b3877b0b \
+ --hash=sha256:d17c6a415d68cfe1091d3296ba5749d3d8696e42c37fca5d4860c5bf7b729f03 \
+ --hash=sha256:d39bd10f0ae453554798b125d2f39884290c480f56e8a02ba7a6ed552005243b \
+ --hash=sha256:d4b3cd1ca7cd73d229487fa5caca9e4bc1f0bca96526b922d61053ea751fe791 \
+ --hash=sha256:d50a252b23b9b4dfeefc1f663c568a221092cbaded20a05a11665d0dbec9b8fb \
+ --hash=sha256:da8549d17489cd52f85a9829d0e1d91059359b3c54a26f28bec2c5d369524807 \
+ --hash=sha256:dcd070b5b585b50e6617e8972f3fbbee786afca71b1936ac06257f7e178f00f6 \
+ --hash=sha256:ddaaa91bfc4477d2871442bbf30a125e8fe6b05da8a0015507bfbf4718228ab2 \
+ --hash=sha256:df423f351b162a702c053d5dddc0fc0ef9a9e27ea3f449781ace5f906b664428 \
+ --hash=sha256:dff044f661f59dace805eedb4a7404c573b6ff0cdba4a524141bc63d7be5c7fd \
+ --hash=sha256:e7e128f85c0b419907d1f38e616c4f1e9f1d1b37a7949f44df9a73d5da5cd53c \
+ --hash=sha256:ed8d1d1821ba5fc88d4a4f45387b65de52382fa3ef1f0115a4f7a20cdfab0e94 \
+ --hash=sha256:f2501d60d7497fd55e391f423f965bbe9e650e9ffc3c627d5f0ac516026000b8 \
+ --hash=sha256:f7db0b6ae1f96ae41afe626095149ecd1b212b424626175a6633c2999eaad45b
+ # via coveralls
+coveralls==4.0.1 \
+ --hash=sha256:7a6b1fa9848332c7b2221afb20f3df90272ac0167060f41b5fe90429b30b1809 \
+ --hash=sha256:7b2a0a2bcef94f295e3cf28dcc55ca40b71c77d1c2446b538e85f0f7bc21aa69
+ # via -r CI/requirements_ci.in
+distlib==0.3.8 \
+ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \
+ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64
+ # via virtualenv
+docopt==0.6.2 \
+ --hash=sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491
+ # via coveralls
+filelock==3.15.4 \
+ --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \
+ --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7
+ # via
+ # tox
+ # virtualenv
+idna==3.7 \
+ --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \
+ --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0
+ # via requests
+markdown-it-py==3.0.0 \
+ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+ # via rich
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+packaging==24.1 \
+ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \
+ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124
+ # via
+ # pyproject-api
+ # setuptools-scm
+ # tox
+platformdirs==4.2.2 \
+ --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \
+ --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3
+ # via
+ # tox
+ # virtualenv
+pluggy==1.5.0 \
+ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \
+ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669
+ # via tox
+prompt-toolkit==3.0.36 \
+ --hash=sha256:3e163f254bef5a03b146397d7c1963bd3e2812f0964bb9a24e6ec761fd28db63 \
+ --hash=sha256:aa64ad242a462c5ff0363a7b9cfe696c20d55d9fc60c11fd8e632d064804d305
+ # via questionary
+pydantic==2.8.2 \
+ --hash=sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a \
+ --hash=sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8
+ # via
+ # bump-my-version
+ # pydantic-settings
+pydantic-core==2.20.1 \
+ --hash=sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d \
+ --hash=sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f \
+ --hash=sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686 \
+ --hash=sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482 \
+ --hash=sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006 \
+ --hash=sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83 \
+ --hash=sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6 \
+ --hash=sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88 \
+ --hash=sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86 \
+ --hash=sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a \
+ --hash=sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6 \
+ --hash=sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a \
+ --hash=sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6 \
+ --hash=sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6 \
+ --hash=sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43 \
+ --hash=sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c \
+ --hash=sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4 \
+ --hash=sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e \
+ --hash=sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203 \
+ --hash=sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd \
+ --hash=sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1 \
+ --hash=sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24 \
+ --hash=sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc \
+ --hash=sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc \
+ --hash=sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3 \
+ --hash=sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598 \
+ --hash=sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98 \
+ --hash=sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331 \
+ --hash=sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2 \
+ --hash=sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a \
+ --hash=sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6 \
+ --hash=sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688 \
+ --hash=sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91 \
+ --hash=sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa \
+ --hash=sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b \
+ --hash=sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0 \
+ --hash=sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840 \
+ --hash=sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c \
+ --hash=sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd \
+ --hash=sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3 \
+ --hash=sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231 \
+ --hash=sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1 \
+ --hash=sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953 \
+ --hash=sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250 \
+ --hash=sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a \
+ --hash=sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2 \
+ --hash=sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20 \
+ --hash=sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434 \
+ --hash=sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab \
+ --hash=sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703 \
+ --hash=sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a \
+ --hash=sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2 \
+ --hash=sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac \
+ --hash=sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611 \
+ --hash=sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121 \
+ --hash=sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e \
+ --hash=sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b \
+ --hash=sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09 \
+ --hash=sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906 \
+ --hash=sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9 \
+ --hash=sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7 \
+ --hash=sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b \
+ --hash=sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987 \
+ --hash=sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c \
+ --hash=sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b \
+ --hash=sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e \
+ --hash=sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237 \
+ --hash=sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1 \
+ --hash=sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19 \
+ --hash=sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b \
+ --hash=sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad \
+ --hash=sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0 \
+ --hash=sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94 \
+ --hash=sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312 \
+ --hash=sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f \
+ --hash=sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669 \
+ --hash=sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1 \
+ --hash=sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe \
+ --hash=sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99 \
+ --hash=sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a \
+ --hash=sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a \
+ --hash=sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52 \
+ --hash=sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c \
+ --hash=sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad \
+ --hash=sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1 \
+ --hash=sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a \
+ --hash=sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f \
+ --hash=sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a \
+ --hash=sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27
+ # via pydantic
+pydantic-settings==2.3.4 \
+ --hash=sha256:11ad8bacb68a045f00e4f862c7a718c8a9ec766aa8fd4c32e39a0594b207b53a \
+ --hash=sha256:c5802e3d62b78e82522319bbc9b8f8ffb28ad1c988a99311d04f2a6051fca0a7
+ # via bump-my-version
+pygments==2.18.0 \
+ --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \
+ --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a
+ # via rich
+pyproject-api==1.7.1 \
+ --hash=sha256:2dc1654062c2b27733d8fd4cdda672b22fe8741ef1dde8e3a998a9547b071eeb \
+ --hash=sha256:7ebc6cd10710f89f4cf2a2731710a98abce37ebff19427116ff2174c9236a827
+ # via tox
+python-dotenv==1.0.1 \
+ --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \
+ --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a
+ # via pydantic-settings
+questionary==2.0.1 \
+ --hash=sha256:8ab9a01d0b91b68444dff7f6652c1e754105533f083cbe27597c8110ecc230a2 \
+ --hash=sha256:bcce898bf3dbb446ff62830c86c5c6fb9a22a54146f0f5597d3da43b10d8fc8b
+ # via bump-my-version
+requests==2.32.3 \
+ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \
+ --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6
+ # via coveralls
+rich==13.7.1 \
+ --hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \
+ --hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432
+ # via
+ # bump-my-version
+ # rich-click
+rich-click==1.8.3 \
+ --hash=sha256:636d9c040d31c5eee242201b5bf4f2d358bfae4db14bb22ec1cafa717cfd02cd \
+ --hash=sha256:6d75bdfa7aa9ed2c467789a0688bc6da23fbe3a143e19aa6ad3f8bac113d2ab3
+ # via bump-my-version
+setuptools-scm==8.1.0 \
+ --hash=sha256:42dea1b65771cba93b7a515d65a65d8246e560768a66b9106a592c8e7f26c8a7 \
+ --hash=sha256:897a3226a6fd4a6eb2f068745e49733261a21f70b1bb28fce0339feb978d9af3
+ # via -r CI/requirements_ci.in
+tomli==2.0.1 \
+ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \
+ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f
+ # via
+ # coverage
+ # pyproject-api
+ # setuptools-scm
+ # tox
+tomlkit==0.13.0 \
+ --hash=sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72 \
+ --hash=sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264
+ # via bump-my-version
+tox==4.18.0 \
+ --hash=sha256:0a457400cf70615dc0627eb70d293e80cd95d8ce174bb40ac011011f0c03a249 \
+ --hash=sha256:5dfa1cab9f146becd6e351333a82f9e0ade374451630ba65ee54584624c27b58
+ # via
+ # -r CI/requirements_ci.in
+ # tox-gh
+tox-gh==1.3.2 \
+ --hash=sha256:beb8d277d5d7c1a1f09c107e4ef80bd7dd2f8f5d020edfaf4c1e3ae8fd45bf6f \
+ --hash=sha256:c2d6e977f66712e7cd5e5d1b655a1bd4c91ebaf3be104befdb53c81587292d7e
+ # via -r CI/requirements_ci.in
+typing-extensions==4.12.2 \
+ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \
+ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8
+ # via
+ # pydantic
+ # pydantic-core
+ # rich-click
+ # setuptools-scm
+urllib3==2.2.2 \
+ --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \
+ --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168
+ # via requests
+virtualenv==20.26.3 \
+ --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \
+ --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589
+ # via tox
+wcmatch==8.5.2 \
+ --hash=sha256:17d3ad3758f9d0b5b4dedc770b65420d4dac62e680229c287bf24c9db856a478 \
+ --hash=sha256:a70222b86dea82fb382dd87b73278c10756c138bd6f8f714e2183128887b9eb2
+ # via bump-my-version
+wcwidth==0.2.13 \
+ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \
+ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5
+ # via prompt-toolkit
+
+# The following packages are considered to be unsafe in a requirements file:
+pip==24.2 \
+ --hash=sha256:2cd581cf58ab7fcfca4ce8efa6dcacd0de5bf8d0a3eb9ec927e07405f4d9e2a2 \
+ --hash=sha256:5b5e490b5e9cb275c879595064adce9ebd31b854e3e803740b72f9ccf34a45b8
+ # via -r CI/requirements_ci.in
+setuptools==74.1.2 \
+ --hash=sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308 \
+ --hash=sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6
+ # via
+ # -r CI/requirements_ci.in
+ # setuptools-scm
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 6c0280dc..52c2d478 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -44,15 +44,14 @@ If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
-* Remember that this is a volunteer-driven project, and that contributions
- are welcome. :)
+* Remember that this is a volunteer-driven project, and that contributions are welcome. :)
Get Started!
------------
.. note::
- If you are new to using GitHub and `git`, please read `this guide `_ first.
+ If you are new to using `GitHub `_ and ``git``, please read `this guide `_ first.
.. warning::
@@ -69,11 +68,19 @@ Get Started!
Ready to contribute? Here's how to set up ``xscen`` for local development.
-#. Clone the repo locally:
+#. First, clone the ``xscen`` repo locally.
- .. code-block:: console
+ * If you are not a ``xscen`` collaborator, first fork the ``xscen`` repo on GitHub, then clone your fork locally.
+
+ .. code-block:: console
+
+ git clone git@github.com:your_name_here/xscen.git
- git clone git@github.com:Ouranosinc/xscen.git
+ * If you are a ``xscen`` collaborator, clone the ``xscen`` repo directly.
+
+ .. code-block:: console
+
+ git clone git@github.com:Ouranosinc/xscen.git
#. Install your local copy into a development environment. You can create a new Anaconda development environment with:
@@ -83,6 +90,13 @@ Ready to contribute? Here's how to set up ``xscen`` for local development.
conda activate xscen-dev
make dev
+ If you are on Windows, replace the ``make dev`` command with the following:
+
+ .. code-block:: console
+
+ python -m pip install -e .[dev]
+ pre-commit install
+
This installs ``xscen`` in an "editable" state, meaning that changes to the code are immediately seen by the environment. To ensure a consistent coding style, `make dev` also installs the ``pre-commit`` hooks to your local clone.
On commit, ``pre-commit`` will check that ``black``, ``blackdoc``, ``isort``, ``flake8``, and ``ruff`` checks are passing, perform automatic fixes if possible, and warn of violations that require intervention. If your commit fails the checks initially, simply fix the errors, re-add the files, and re-commit.
@@ -107,7 +121,6 @@ Ready to contribute? Here's how to set up ``xscen`` for local development.
git checkout -b name-of-your-bugfix-or-feature
-
Now you can make your changes locally.
#. When you're done making changes, we **strongly** suggest running the tests in your environment or with the help of ``tox``:
@@ -140,10 +153,32 @@ Ready to contribute? Here's how to set up ``xscen`` for local development.
git commit -m "Your detailed description of your changes."
git push origin name-of-your-bugfix-or-feature
- If ``pre-commit`` hooks fail, try re-committing your changes (or, if need be, you can skip them with `$ git commit --no-verify`).
+ If ``pre-commit`` hooks fail, try fixing the issues, re-staging the files to be committed, and re-committing your changes (or, if need be, you can skip them with `git commit --no-verify`).
+
#. Submit a `Pull Request `_ through the GitHub website.
+#. If changes to your branch are made on GitHub, you can update your local branch with:
+
+ .. code-block:: console
+
+ git checkout name-of-your-bugfix-or-feature
+ git fetch
+ git pull origin name-of-your-bugfix-or-feature
+
+ If you have merge conflicts, you might need to replace `git pull` with `git merge` and resolve the conflicts manually.
+ Resolving conflicts from the command line can be tricky. If you are not comfortable with this, you can ignore the last command and instead use a GUI like PyCharm or Visual Studio Code to merge the remote changes and resolve the conflicts.
+
+#. Before merging, your Pull Request will need to be based on the `main` branch of the `xscen`` repository. If your branch is not up-to-date with the `main` branch, you can perform similar steps as above to update your branch:
+
+ .. code-block:: console
+
+ git checkout name-of-your-bugfix-or-feature
+ git fetch
+ git pull origin main
+
+ See the previous step for more information on resolving conflicts.
+
#. When pushing your changes to your branch on GitHub, the documentation will automatically be tested to reflect the changes in your Pull Request. This build process can take several minutes at times. If you are actively making changes that affect the documentation and wish to save time, you can compile and test your changes beforehand locally with:
.. code-block:: console
@@ -168,7 +203,7 @@ Ready to contribute? Here's how to set up ``xscen`` for local development.
- `ReadTheDocs` will automatically build the documentation and publish it to the `latest` branch of `xscen` documentation website.
- If your branch is not a fork (ie: you are a maintainer), your branch will be automatically deleted.
- You will have contributed your first changes to ``xscen``!
+You will have contributed to ``xscen``!
.. _translating-xscen:
@@ -216,11 +251,13 @@ Before you submit a pull request, check that it meets these guidelines:
#. The pull request should include tests and should aim to provide `code coverage `_ for all new lines of code. You can use the ``--cov-report html --cov xscen`` flags during the call to ``pytest`` to generate an HTML report and analyse the current test coverage.
-#. If the pull request adds functionality, the docs should also be updated. Put your new functionality into a function with a docstring, and add the feature to the list in ``README.rst``.
+#. All functions should be documented with `docstrings` following the `numpydoc `_ format.
+
+#. If the pull request adds functionality, either update the documentation or create a new notebook that demonstrates the feature. Library-defining features should also be listed in ``README.rst``.
#. The pull request should not break the templates.
-#. The pull request should work for Python 3.9, 3.10, 3.11, and 3.12. Check that the tests pass for all supported Python versions.
+#. The pull request should work for all currently supported Python versions. Check the `pyproject.toml` or `tox.ini` files for the list of supported versions.
Tips
----
@@ -243,13 +280,14 @@ To run specific code style checks:
.. code-block:: console
- python -m black --check xscen tests
- python -m isort --check xscen tests
- python -m blackdoc --check xscen docs
- python -m ruff check xscen tests
- python -m flake8 xscen tests
+ python -m black --check src/xscen tests
+ python -m isort --check src/xscen tests
+ python -m blackdoc --check src/xscen docs
+ python -m ruff check src/xscen tests
+ python -m flake8 src/xscen tests
+ validate-docstrings src/xscen/**.py
-To get ``black``, ``isort``, ``blackdoc``, ``ruff``, and ``flake8`` (with plugins ``flake8-alphabetize`` and ``flake8-rst-docstrings``) simply install them with ``pip`` (or ``conda``) into your environment.
+To get ``black``, ``isort``, ``blackdoc``, ``ruff``, ``flake8`` (with the ``flake8-rst-docstrings`` plugin), and ``numpydoc`` (for ``validate-docstrings``), simply install them with ``pip`` (or ``conda``) into your environment.
Versioning/Tagging
------------------
diff --git a/Makefile b/Makefile
index ee70daaf..4acee60c 100644
--- a/Makefile
+++ b/Makefile
@@ -58,6 +58,7 @@ clean-test: ## remove test and coverage artifacts
lint/flake8: ## check style with flake8
python -m ruff check src/xscen tests
python -m flake8 --config=.flake8 src/xscen tests
+ # python -m numpydoc lint src/xscen/**.py # FIXME: disabled until the codebase is fully numpydoc compliant
lint/black: ## check style with black
python -m black --check src/xscen tests
@@ -70,7 +71,7 @@ test: ## run tests quickly with the default Python
python -m pytest
test-all: ## run tests on every Python version with tox
- tox
+ python -m tox
initialize-translations: clean-docs ## initialize translations, ignoring autodoc-generated files
${MAKE} -C docs gettext
diff --git a/docs/conf.py b/docs/conf.py
index 4284498c..20e10c3b 100755
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -141,10 +141,8 @@
# templates_path = ['_templates']
# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-#
-# source_suffix = ['.rst', '.md']
-source_suffix = [".rst"]
+# You can specify multiple suffix as a dictionary of suffix: filetype
+source_suffix = {'.rst': 'restructuredtext'}
# The master toctree document.
master_doc = "index"
@@ -213,15 +211,15 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
+if not os.path.exists("_static"):
+ os.makedirs("_static")
html_static_path = ["_static"]
-
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "xscendoc"
-
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
@@ -252,14 +250,12 @@
),
]
-
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "xscen", "xscen Documentation", [author], 1)]
-
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
diff --git a/docs/installation.rst b/docs/installation.rst
index 8860ee9c..9dee7071 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -2,6 +2,20 @@
Installation
============
+We strongly recommend installing xscen in an Anaconda Python environment.
+Furthermore, due to the complexity of some packages, the default dependency solver can take a long time to resolve the environment.
+If `mamba` is not already your default solver, consider running the following commands in order to speed up the process:
+
+ .. code-block:: console
+
+ conda install -n base conda-libmamba-solver
+ conda config --set solver libmamba
+
+If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process.
+
+.. _pip: https://pip.pypa.io
+.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
+
Official Sources
----------------
@@ -13,6 +27,8 @@ Because of some packages being absent from PyPI (such as `xESMF`), we strongly r
conda install -c conda-forge xscen
+This is the preferred method to install xscen, as it will always install the most recent stable release.
+
.. note::
If you are unable to install the package due to missing dependencies, ensure that `conda-forge` is listed as a source in your `conda` configuration: `conda config --add channels conda-forge`!
@@ -28,28 +44,57 @@ Development Installation (Anaconda + pip)
For development purposes, we provide the means for generating a conda environment with the latest dependencies in an `environment.yml` file at the top-level of the `Github repo `_.
-In order to get started, first clone the repo locally:
+The sources for xscen can be downloaded from the `Github repo`_.
-.. code-block:: console
+#. Download the source code from the `Github repo`_ using one of the following methods:
- git clone git@github.com:Ouranosinc/xscen.git
+ * Clone the public repository:
-Or download the `tarball `_:
+ .. code-block:: console
- .. code-block:: console
+ git clone git@github.com:Ouranosinc/xscen.git
- curl -OJL https://github.com/Ouranosinc/xscen/tarball/main
+ * Download the `tarball `_:
-Then you can create the environment and install the package:
+ .. code-block:: console
-.. code-block:: console
+ curl -OJL https://github.com/Ouranosinc/xscen/tarball/main
- cd xscen
- conda env create -f environment.yml
+#. Once you have a copy of the source, you can install it with:
-Finally, perform an `--editable` install of xscen and compile the translation catalogs:
+ .. code-block:: console
-.. code-block:: console
+ conda env create -f environment-dev.yml
+ conda activate xscen-dev
+ make dev
+
+ If you are on Windows, replace the ``make dev`` command with the following:
+
+ .. code-block:: console
+
+ python -m pip install -e .[dev]
+
+ Even if you do not intend to contribute to `xscen`, we favor using `environment-dev.yml` over `environment.yml` because it includes additional packages that are used to run all the examples provided in the documentation.
+ If for some reason you wish to install the `PyPI` version of `xscen` into an existing Anaconda environment (*not recommended if requirements are not met*), only run the last command above.
+
+#. When new changes are made to the `Github repo`_, if using a clone, you can update your local copy using the following commands from the root of the repository:
+
+ .. code-block:: console
+
+ git fetch
+ git checkout main
+ git pull origin main
+ conda env update -n xscen-dev -f environment-dev.yml
+ conda activate xscen-dev
+ make dev
+
+ These commands should work most of the time, but if big changes are made to the repository, you might need to remove the environment and create it again.
+
+#. Finally, in order to compile the translation catalogs, run the following command from the root of the repository:
+
+ .. code-block:: console
+
+ python -m pip install -e .
+ make translate
- python -m pip install -e .
- make translate
+.. _Github repo: https://github.com/Ouranosinc/xscen
diff --git a/environment-dev.yml b/environment-dev.yml
index 75a4cc05..7d266d10 100644
--- a/environment-dev.yml
+++ b/environment-dev.yml
@@ -29,7 +29,7 @@ dependencies:
- shapely >=2.0
- sparse
- toolz
- - xarray >=2023.11.0, !=2024.6.0
+ - xarray >=2023.11.0, !=2024.6.0, <2024.09.0
- xclim >=0.50, <0.51
- xesmf >=0.7
- zarr
@@ -38,11 +38,11 @@ dependencies:
- pyarrow >=10.0.1
# Dev
- babel
- - black ==24.2.0
+ - black ==24.8.0
- blackdoc ==0.3.9
- - bump-my-version >=0.18.3
- - coverage>=7.0.0
- - coveralls>=3.3.1
+ - bump-my-version >=0.25.1
+ - coverage>=7.5.0
+ - coveralls>=4.0.1
- flake8 >=6.1.0
- flake8-rst-docstrings>=0.3.0
- ipykernel
@@ -51,12 +51,13 @@ dependencies:
- jupyter_client
- nbsphinx
- nbval
+ - numpydoc >=1.8.0
- pandoc
- pooch
- - pre-commit >=3.3.2
- - pytest >=7.3.1
- - pytest-cov >=4.0.0
- - ruff >=0.3.0
+ - pre-commit >=3.5.0
+ - pytest >=8.3.2
+ - pytest-cov >=5.0.0
+ - ruff >=0.5.7
- setuptools >=65.0.0
- setuptools-scm >=8.0.0
- sphinx
@@ -66,13 +67,11 @@ dependencies:
- sphinx-codeautolink
- sphinx-copybutton
- sphinx-mdinclude
- - watchdog >=3.0.0
+ - watchdog >=4.0.0
- xdoctest
# Testing
- - tox >=4.5.1
+ - tox >=4.17.1
+ - tox-gh >=1.3.2
# packaging
- conda-build
- wheel
- - pip >=23.3.0
- - pip:
- - flake8-alphabetize
diff --git a/environment.yml b/environment.yml
index 94cb7c0b..7192487c 100644
--- a/environment.yml
+++ b/environment.yml
@@ -29,7 +29,7 @@ dependencies:
- shapely >=2.0
- sparse
- toolz
- - xarray >=2023.11.0, !=2024.6.0
+ - xarray >=2023.11.0, !=2024.6.0, <2024.09.0
- xclim >=0.50, <0.51
- xesmf >=0.7
- zarr
diff --git a/pyproject.toml b/pyproject.toml
index 45313665..bf2f3e7a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,9 +9,14 @@ build-backend = "setuptools.build_meta"
[project]
name = "xscen"
authors = [
- {name = "Gabriel Rondeau-Genesse", email = "rondeau-genesse.gabriel@ouranos.ca"}
+ {name = "Gabriel Rondeau-Genesse", email = "rondeau-genesse.gabriel@ouranos.ca"},
+ {name = "Pascal Bourgault", email = "bourgault.pascal@ouranos.ca"},
+ {name = "Juliette Lavoie", email = "lavoie.juliette@ouranos.ca"}
+]
+maintainers = [
+ {name = "Gabriel Rondeau-Genesse", email = "rondeau-genesse.gabriel@ouranos.ca"},
+ {name = "Trevor James Smith", email = "smith.trevorj@ouranos.ca"}
]
-maintainers = []
description = "A climate change scenario-building analysis framework, built with xclim/xarray."
readme = "README.rst"
requires-python = ">=3.9.0"
@@ -23,12 +28,13 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
- "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ # "Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Atmospheric Science"
]
@@ -60,7 +66,7 @@ dependencies = [
"shapely >=2.0",
"sparse",
"toolz",
- "xarray >=2023.11.0, !=2024.6.0",
+ "xarray >=2023.11.0, !=2024.6.0, <2024.09.0",
"xclim >=0.50, <0.51",
"zarr"
]
@@ -68,25 +74,25 @@ dependencies = [
[project.optional-dependencies]
dev = [
# Dev tools and testing
- "pip >=23.3.0",
+ "pip >=24.2.0",
"babel",
"black[jupyter] ==24.8.0",
"blackdoc ==0.3.9",
- "bump-my-version >=0.18.3",
- "coverage >=7.0.0",
- "coveralls >=3.3.1",
- "flake8-alphabetize >=0.0.21",
+ "bump-my-version >=0.26.0",
+ "coverage >=7.5.0",
+ "coveralls >=4.0.1",
+ "flake8 >=7.1.1",
"flake8-rst-docstrings >=0.3.0",
- "flake8 >=6.1.0",
"isort ==5.13.2",
"mypy",
+ "numpydoc >=1.8.0",
"pooch",
"pre-commit >=3.3.2",
- "pytest-cov >=4.0.0",
- "pytest >=7.3.1",
- "ruff >=0.3.0",
- "tox >=4.5.1",
- "watchdog >=3.0.0",
+ "pytest-cov >=5.0.0",
+ "pytest >=8.3.2",
+ "ruff >=0.5.7",
+ "tox >=4.18.0",
+ "watchdog >=4.0.0",
"xdoctest"
]
docs = [
@@ -111,9 +117,9 @@ extra = [
all = ["xscen[dev]", "xscen[docs]", "xscen[extra]"]
[project.urls]
-"About Ouranos" = "https://www.ouranos.ca/en/"
-"Changelog" = "https://xscen.readthedocs.io/en/stable/changes.html"
"Homepage" = "https://xscen.readthedocs.io/"
+"Changelog" = "https://xscen.readthedocs.io/en/stable/changelog.html"
+"About Ouranos" = "https://ouranos.ca/en/"
"Issue tracker" = "https://github.com/Ouranosinc/xscen/issues"
"Source" = "https://github.com/Ouranosinc/xscen"
@@ -140,6 +146,33 @@ serialize = [
"{major}.{minor}.{patch}"
]
+[[tool.bumpversion.files]]
+filename = "CHANGELOG.rst"
+include_bumps = ["release"]
+search = """\
+`Unreleased `_ (latest)
+------------------------------------------------------------
+"""
+replace = """\
+`Unreleased `_ (latest)
+------------------------------------------------------------
+
+Contributors:
+
+Changes
+^^^^^^^
+* No change.
+
+Fixes
+^^^^^
+* No change.
+
+.. _changes_{new_version}:
+
+`v{new_version} `_
+----------------------------------------------------------------------
+"""
+
[[tool.bumpversion.files]]
filename = "src/xscen/__init__.py"
search = "__version__ = \"{current_version}\""
@@ -160,10 +193,13 @@ values = [
"release"
]
+[tool.coverage.paths]
+source = ["src/xscen/", "*/site-packages/xscen/"]
+
[tool.coverage.run]
-relative_files = true
-include = ["src/xscen/*"]
omit = ["docs/notebooks/*.ipynb", "tests/*.py", "src/xscen/reduce.py"] # FIXME: Remove xscen/reduce.py when it's fully deleted.
+relative_files = true
+source = ["xscen"]
[tool.isort]
append_only = true
@@ -190,6 +226,26 @@ allow_untyped_defs = true
disable_error_code = "attr-defined"
ignore_missing_imports = true
+[tool.numpydoc_validation]
+checks = [
+ "all", # report on all checks, except the below
+ "EX01",
+ "SA01",
+ "ES01"
+]
+# remember to use single quotes for regex in TOML
+exclude = [
+ # don't report on objects that match any of these regex
+ '\.undocumented_method$',
+ '\.__repr__$'
+]
+override_SS05 = [
+ # override SS05 to allow docstrings starting with these words
+ '^Process ',
+ '^Assess ',
+ '^Access '
+]
+
[tool.pytest.ini_options]
addopts = [
"--color=yes",
@@ -214,20 +270,43 @@ exclude = [
]
[tool.ruff.format]
+quote-style = "double"
+indent-style = "space"
+skip-magic-trailing-comma = false
line-ending = "auto"
[tool.ruff.lint]
+extend-select = [
+ "RUF022" # unsorted-dunder-all
+]
ignore = [
- "D205",
- "D400",
- "D401"
+ "COM", # commas
+ "D205", # blank-line-after-summary
+ "D400", # ends-in-period
+ "D401", # non-imperative-mood
+ # The following are disabled because the codebase is not yet compliant.
+ "N801", # invalid-class-name
+ "N806", # non-lowercase-variable-in-function
+ "PERF203", # try-except-in-loop
+ "PERF401", # manual-list-comprehension
+ "S110" # try-except-pass
]
+preview = true
select = [
- "C9",
- "D",
- "E",
- "F",
- "W"
+ "BLE", # blind-except
+ "C90", # mccabe-complexity
+ "D", # docstrings
+ "E", # pycodestyle errors
+ "FLY002", # static-join-to-fstring
+ "G", # logging-format
+ "N", # naming conventions
+ "PERF", # iterator performance
+ "PTH", # pathlib
+ "RUF010", # explicit-f-string-type-conversion
+ "RUF013", # implicit-optional
+ "S", # bandit
+ "UP", # python version conventions
+ "W" # pycodestyle warnings
]
[tool.ruff.lint.flake8-bandit]
@@ -237,7 +316,7 @@ check-typed-exception = true
known-first-party = ["xscen"]
case-sensitive = true
detect-same-package = false
-lines-after-imports = 1
+lines-after-imports = 2
no-lines-before = ["future", "standard-library"]
[tool.ruff.lint.mccabe]
@@ -246,7 +325,7 @@ max-complexity = 15
[tool.ruff.lint.per-file-ignores]
"docs/**/*.py" = ["E402"]
"src/xscen/**/__init__.py" = ["F401", "F403"]
-"tests/**/*.py" = ["D100", "D101", "D102", "D103"]
+"tests/**/*.py" = ["D100", "D101", "D102", "D103", "S101"]
[tool.ruff.lint.pycodestyle]
max-doc-length = 180
diff --git a/src/xscen/__init__.py b/src/xscen/__init__.py
index 4582f0d7..95d3b7be 100644
--- a/src/xscen/__init__.py
+++ b/src/xscen/__init__.py
@@ -1,5 +1,23 @@
"""A climate change scenario-building analysis framework, built with xclim/xarray."""
+###################################################################################
+# Apache Software License 2.0
+#
+# Copyright (c) 2024, Gabriel Rondeau-Genesse, Pascal Bourgault, Juliette Lavoie, Trevor James Smith
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+###################################################################################
+
import warnings
# Import the submodules
@@ -57,7 +75,29 @@
def warning_on_one_line(
message: str, category: Warning, filename: str, lineno: int, file=None, line=None
):
- """Monkeypatch Reformat warning so that `warnings.warn` doesn't mention itself."""
+ """
+ Monkeypatch Reformat warning so that `warnings.warn` doesn't mention itself.
+
+ Parameters
+ ----------
+ message : str
+ The warning message.
+ category : Warning
+ The warning category.
+ filename : str
+ The filename where the warning was raised.
+ lineno : int
+ The line number where the warning was raised.
+ file : file
+ The file where the warning was raised.
+ line : str
+ The line where the warning was raised.
+
+ Returns
+ -------
+ str
+ The reformatted warning message.
+ """
return f"{filename}:{lineno}: {category.__name__}: {message}\n"
diff --git a/src/xscen/aggregate.py b/src/xscen/aggregate.py
index 7bf04dee..3110e883 100644
--- a/src/xscen/aggregate.py
+++ b/src/xscen/aggregate.py
@@ -833,14 +833,14 @@ def spatial_mean( # noqa: C901
)
if "units" not in ds.cf["latitude"].attrs:
- logger.warning(
- f"{ds.attrs.get('cat:id', '')}: Latitude does not appear to have units. Make sure that the computation is right."
- )
+ msg = f"{ds.attrs.get('cat:id', '')}: Latitude does not appear to have units. Make sure that the computation is right."
+ logger.warning(msg)
elif ds.cf["latitude"].attrs["units"] != "degrees_north":
- logger.warning(
+ msg = (
f"{ds.attrs.get('cat:id', '')}: Latitude units is '{ds.cf['latitude'].attrs['units']}', expected 'degrees_north'. "
- f"Make sure that the computation is right."
+ "Make sure that the computation is right."
)
+ logger.warning(msg)
if ((ds.cf["longitude"].min() < -160) & (ds.cf["longitude"].max() > 160)) or (
(ds.cf["longitude"].min() < 20) & (ds.cf["longitude"].max() > 340)
diff --git a/src/xscen/biasadjust.py b/src/xscen/biasadjust.py
index 63306489..5a7362a5 100644
--- a/src/xscen/biasadjust.py
+++ b/src/xscen/biasadjust.py
@@ -240,7 +240,8 @@ def adjust(
# evaluate the dict that was stored as a string
if not isinstance(dtrain.attrs["train_params"], dict):
- dtrain.attrs["train_params"] = eval(dtrain.attrs["train_params"])
+ # FIXME: eval is bad. There has to be a better way!β’
+ dtrain.attrs["train_params"] = eval(dtrain.attrs["train_params"]) # noqa: S307
var = dtrain.attrs["train_params"]["var"]
if len(var) != 1:
diff --git a/src/xscen/catalog.py b/src/xscen/catalog.py
index 841f12d3..788e4f79 100644
--- a/src/xscen/catalog.py
+++ b/src/xscen/catalog.py
@@ -37,8 +37,8 @@
__all__ = [
"COLUMNS",
- "DataCatalog",
"ID_COLUMNS",
+ "DataCatalog",
"ProjectCatalog",
"concat_data_catalogs",
"generate_id",
@@ -236,7 +236,7 @@ def from_df(
).reset_index(drop=True)
if isinstance(esmdata, os.PathLike):
- with open(esmdata) as f:
+ with Path(esmdata).open(encoding="utf-8") as f:
esmdata = json.load(f)
elif esmdata is None:
esmdata = deepcopy(esm_col_data)
@@ -356,9 +356,8 @@ def check_existing(row):
path = Path(row.path)
exists = (path.is_dir() and path.suffix == ".zarr") or (path.is_file())
if not exists:
- logger.info(
- f"File {path} was not found on disk, removing from catalog."
- )
+ msg = f"File {path} was not found on disk, removing from catalog."
+ logger.info(msg)
return exists
# In case variables were deleted manually in a Zarr, double-check that they still exist
@@ -399,7 +398,8 @@ def exists_in_cat(self, **columns) -> bool:
"""
exists = bool(len(self.search(**columns)))
if exists:
- logger.info(f"An entry exists for: {columns}")
+ msg = f"An entry exists for: {columns}"
+ logger.info(msg)
return exists
def to_dataset(
@@ -596,21 +596,26 @@ def copy_files(
else:
data = build_path(data, root=dest).drop(columns=["new_path_type"])
- logger.debug(f"Will copy {len(data)} files.")
+ msg = f"Will copy {len(data)} files."
+ logger.debug(msg)
for i, row in data.iterrows():
old = Path(row.path)
new = Path(row.new_path)
if unzip and old.suffix == ".zip":
- logger.info(f"Unzipping {old} to {new}.")
+ msg = f"Unzipping {old} to {new}."
+ logger.info(msg)
unzip_directory(old, new)
elif zipzarr and old.suffix == ".zarr":
- logger.info(f"Zipping {old} to {new}.")
+ msg = f"Zipping {old} to {new}."
+ logger.info(msg)
zip_directory(old, new)
elif old.is_dir():
- logger.info(f"Copying directory tree {old} to {new}.")
+ msg = f"Copying directory tree {old} to {new}."
+ logger.info(msg)
sh.copytree(old, new)
else:
- logger.info(f"Copying file {old} to {new}.")
+ msg = f"Copying file {old} to {new}."
+ logger.info(msg)
sh.copy(old, new)
if inplace:
self.esmcat._df["path"] = data["new_path"]
@@ -643,26 +648,26 @@ def create(
Parameters
----------
filename : os.PathLike or str
- A path to the json file (with or without suffix).
+ A path to the json file (with or without suffix).
project : dict, optional
- Metadata to create the catalog. If None, `CONFIG['project']` will be used.
- Valid fields are:
+ Metadata to create the catalog. If None, `CONFIG['project']` will be used.
+ Valid fields are:
- - title : Name of the project, given as the catalog's "title".
- - id : slug-like version of the name, given as the catalog's id (should be url-proof)
- Defaults to a modified name.
- - version : Version of the project (and thus the catalog), string like "x.y.z".
- - description : Detailed description of the project, given to the catalog's "description".
- - Any other entry defined in :py:data:`esm_col_data`.
+ - title : Name of the project, given as the catalog's "title".
+ - id : slug-like version of the name, given as the catalog's id (should be url-proof)
+ Defaults to a modified name.
+ - version : Version of the project (and thus the catalog), string like "x.y.z".
+ - description : Detailed description of the project, given to the catalog's "description".
+ - Any other entry defined in :py:data:`esm_col_data`.
- At least one of `id` and `title` must be given, the rest is optional.
+ At least one of `id` and `title` must be given, the rest is optional.
overwrite : bool
- If True, will overwrite any existing JSON and CSV file.
+ If True, will overwrite any existing JSON and CSV file.
Returns
-------
ProjectCatalog
- An empty intake_esm catalog.
+ An empty intake_esm catalog.
"""
path = Path(filename)
meta_path = path.with_suffix(".json")
@@ -701,10 +706,10 @@ def create(
)
# Change catalog_file to a relative path
- with open(meta_path) as f:
+ with Path(meta_path).open(encoding="utf-8") as f:
meta = json.load(f)
meta["catalog_file"] = data_path.name
- with open(meta_path, "w") as f:
+ with Path(meta_path).open("w", encoding="utf-8") as f:
json.dump(meta, f, indent=2)
return cls(str(meta_path))
@@ -718,7 +723,8 @@ def __init__(
project: Optional[dict] = None,
**kwargs,
):
- """Open or create a project catalog.
+ """
+ Open or create a project catalog.
Parameters
----------
@@ -740,9 +746,7 @@ def __init__(
The βdfβ key must be a Pandas DataFrame containing content that would otherwise be in the CSV file.
"""
if create:
- if isinstance(df, (str, Path)) and (
- not os.path.isfile(Path(df)) or overwrite
- ):
+ if isinstance(df, (str, Path)) and (not Path(df).is_file() or overwrite):
self.create(df, project=project, overwrite=overwrite)
super().__init__(df, *args, **kwargs)
self.check_valid()
@@ -895,9 +899,8 @@ def update_from_ds(
if "format" not in d:
d["format"] = Path(d["path"]).suffix.split(".")[1]
- logger.info(
- f"File format not specified. Adding it as '{d['format']}' based on file name."
- )
+ msg = f"File format not specified. Adding it as '{d['format']}' based on file name."
+ logger.info(msg)
self.update(pd.Series(d))
@@ -1018,7 +1021,7 @@ def unstack_id(df: Union[pd.DataFrame, ProjectCatalog, DataCatalog]) -> dict:
[
col
for col in subset.columns
- if bool(re.search(f"((_)|(^)){str(subset[col].iloc[0])}((_)|($))", ids))
+ if bool(re.search(f"((_)|(^)){subset[col].iloc[0]!s}((_)|($))", ids))
]
].drop("id", axis=1)
@@ -1070,9 +1073,8 @@ def subset_file_coverage(
# Check for duplicated Intervals
if duplicates_ok is False and intervals.is_overlapping:
- logging.warning(
- f"{df['id'].iloc[0] + ': ' if 'id' in df.columns else ''}Time periods are overlapping."
- )
+ msg = f"{df['id'].iloc[0] + ': ' if 'id' in df.columns else ''}Time periods are overlapping."
+ logging.warning(msg)
return pd.DataFrame(columns=df.columns)
# Create an array of True/False
@@ -1086,9 +1088,8 @@ def subset_file_coverage(
files_in_range = intervals.overlaps(period_interval)
if not files_in_range.any():
- logging.warning(
- f"{df['id'].iloc[0] + ': ' if 'id' in df.columns else ''}Insufficient coverage (no files in range {period})."
- )
+ msg = f"{df['id'].iloc[0] + ': ' if 'id' in df.columns else ''}Insufficient coverage (no files in range {period})."
+ logging.warning(msg)
return pd.DataFrame(columns=df.columns)
# Very rough guess of the coverage relative to the requested period,
@@ -1107,10 +1108,11 @@ def subset_file_coverage(
).length.sum()
if guessed_length / period_length < coverage:
- logging.warning(
+ msg = (
f"{df['id'].iloc[0] + ': ' if 'id' in df.columns else ''}Insufficient coverage "
f"(guessed at {guessed_length / period_length:.1%})."
)
+ logging.warning(msg)
return pd.DataFrame(columns=df.columns)
files_to_keep.append(files_in_range)
diff --git a/src/xscen/catutils.py b/src/xscen/catutils.py
index 86d24c63..12e875be 100644
--- a/src/xscen/catutils.py
+++ b/src/xscen/catutils.py
@@ -129,13 +129,13 @@ def _find_assets(
Parameters
----------
- root: str or Pathlike
+ root : str or Pathlike
Path of the directory to walk through.
- exts: set of strings
+ exts : set of strings
Set of file extensions to look for.
- lengths: set of ints
+ lengths : set of ints
Set of path depths to look for.
- dirglob: str, optional
+ dirglob : str, optional
A glob pattern. If given, only parent folders matching this pattern are walked through.
This pattern can not include the asset's basename.
"""
@@ -159,11 +159,11 @@ def _find_assets(
if ".zarr" in exts:
for zr in zarrs:
- yield os.path.join(top, zr)
+ yield Path(top).joinpath(zr).as_posix()
if exts - {".zarr"}: # There are more exts than
for file in files:
- if os.path.splitext(file)[-1] in exts:
- yield os.path.join(top, file)
+ if Path(file).suffix in exts:
+ yield Path(top).joinpath(file).as_posix()
def _compile_pattern(pattern: str) -> parse.Parser:
@@ -280,13 +280,13 @@ def _parse_dir( # noqa: C901
Parameters
----------
- root: os.PathLike or str
+ root : os.PathLike or str
Path to walk through.
- patterns: list of strings or compiled parsers
+ patterns : list of strings or compiled parsers
Patterns that the files will be checked against.
The extensions of the patterns are extracted and only paths with these are returned.
Also, the depths of the patterns are calculated and only paths of this depth under the root are returned.
- dirglob: str
+ dirglob : str
A glob pattern. If given, only parent folders matching this pattern are walked through.
This pattern can not include the asset's basename.
checks: list of strings, optional
@@ -302,7 +302,7 @@ def _parse_dir( # noqa: C901
If `read_from_file` is not None, passed directly to :py:func:`parse_from_ds`.
xr_open_kwargs : dict, optional
If `read_from_file` is not None, passed directly to :py:func:`parse_from_ds`.
- progress: bool
+ progress : bool
If True, the number of found files is printed to stdout.
Return
@@ -311,7 +311,7 @@ def _parse_dir( # noqa: C901
Metadata parsed from each found asset.
"""
lengths = {patt.count(os.path.sep) for patt in patterns}
- exts = {os.path.splitext(patt)[-1] for patt in patterns}
+ exts = {Path(patt).suffix for patt in patterns}
comp_patterns = list(map(_compile_pattern, patterns))
checks = checks or []
@@ -320,7 +320,7 @@ def _parse_dir( # noqa: C901
# Another thread runs the checks
# Another thread parses the path and file.
# In theory, for a local disk, walking a directory cannot be parallelized. This is not as true for network-mounted drives.
- # Thus we parallelize the parsing steps.
+ # Thus, we parallelize the parsing steps.
# If the name-parsing step becomes blocking, we could try to increase the number of threads (but netCDF4 can't multithread...)
# Usually, the walking is the bottleneck.
q_found = queue.Queue()
@@ -343,7 +343,8 @@ def check_worker():
# TODO: testing for zarr validity is not implemented
with netCDF4.Dataset(path):
pass
- except Exception:
+ # FIXME: This is a catch-all, we should catch the specific exception raised by netCDF4.
+ except Exception: # noqa: BLE001
valid = False
if valid:
q_checked.put(path)
@@ -362,8 +363,10 @@ def parse_worker():
attrs_map=attrs_map,
xr_open_kwargs=xr_open_kwargs,
)
- except Exception as err:
- logger.error(f"Parsing file {path} failed with {err}.")
+ # FIXME: This is not specific enough, we should catch the specific exception raised by _name_parser.
+ except Exception as err: # noqa: BLE001
+ msg = f"Parsing file {path} failed with {err}."
+ logger.error(msg)
else:
if d is not None:
parsed.append(d)
@@ -374,7 +377,8 @@ def parse_worker():
):
print(f"Found {n:7d} files", end="\r")
else:
- logger.debug(f"File {path} didn't match any pattern.")
+ msg = f"File {path} didn't match any pattern."
+ logger.debug(msg)
q_checked.task_done()
CW = threading.Thread(target=check_worker, daemon=True)
@@ -437,7 +441,8 @@ def _parse_first_ds(
"""Parse attributes from one file per group, apply them to the whole group."""
fromfile = parse_from_ds(grp.path.iloc[0], cols, attrs_map, **xr_open_kwargs)
- logger.info(f"Got {len(fromfile)} fields, applying to {len(grp)} entries.")
+ msg = f"Got {len(fromfile)} fields, applying to {len(grp)} entries."
+ logger.info(msg)
out = grp.copy()
for col, val in fromfile.items():
for i in grp.index: # If val is an iterable we can't use loc.
@@ -583,7 +588,7 @@ def parse_directory( # noqa: C901
if cvs is not None:
if not isinstance(cvs, dict):
- with open(cvs) as f:
+ with Path(cvs).open(encoding="utf-8") as f:
cvs = yaml.safe_load(f)
attrs_map = cvs.pop("attributes", {})
else:
@@ -617,8 +622,9 @@ def parse_directory( # noqa: C901
raise ValueError("No files found.")
else:
if progress:
- print()
- logger.info(f"Found and parsed {len(parsed)} files.")
+ print() # This is because of the \r outputted in the _parse_dir call.
+ msg = f"Found and parsed {len(parsed)} files."
+ logger.info(msg)
# Path has become NaN when some paths didn't fit any passed pattern
df = pd.DataFrame(parsed).dropna(axis=0, subset=["path"])
@@ -691,7 +697,8 @@ def parse_directory( # noqa: C901
warnings.warn(
f"{n} invalid entries where the start and end dates are Null but the frequency is not 'fx'."
)
- logger.debug(f"Paths: {df.path[invalid].values}")
+ msg = f"Paths: {df.path[invalid].values}"
+ logger.debug(msg)
df = df[~invalid]
# Exact opposite
invalid = df.date_start.notnull() & df.date_end.notnull() & (df.xrfreq == "fx")
@@ -700,7 +707,8 @@ def parse_directory( # noqa: C901
warnings.warn(
f"{n} invalid entries where the start and end dates are given but the frequency is 'fx'."
)
- logger.debug(f"Paths: {df.path[invalid].values}")
+ msg = f"Paths: {df.path[invalid].values}"
+ logger.debug(msg)
df = df[~invalid]
# Create id from user specifications
@@ -753,18 +761,21 @@ def parse_from_ds( # noqa: C901
obj = Path(obj)
if isinstance(obj, Path) and obj.suffixes[-1] == ".zarr":
- logger.info(f"Parsing attributes from Zarr {obj}.")
+ msg = f"Parsing attributes from Zarr {obj}."
+ logger.info(msg)
ds_attrs, variables, time = _parse_from_zarr(
obj, get_vars="variable" in names, get_time=get_time
)
elif isinstance(obj, Path) and obj.suffixes[-1] == ".nc":
- logger.info(f"Parsing attributes with netCDF4 from {obj}.")
+ msg = f"Parsing attributes with netCDF4 from {obj}."
+ logger.info(msg)
ds_attrs, variables, time = _parse_from_nc(
obj, get_vars="variable" in names, get_time=get_time
)
else:
if isinstance(obj, Path):
- logger.info(f"Parsing attributes with xarray from {obj}.")
+ msg = f"Parsing attributes with xarray from {obj}."
+ logger.info(msg)
obj = xr.open_dataset(obj, engine=get_engine(obj), **xrkwargs)
ds_attrs = obj.attrs
time = obj.indexes["time"] if "time" in obj else None
@@ -801,7 +812,8 @@ def parse_from_ds( # noqa: C901
elif name in ds_attrs:
attrs[name] = ds_attrs[name].strip()
- logger.debug(f"Got fields {attrs.keys()} from file.")
+ msg = f"Got fields {attrs.keys()} from file."
+ logger.debug(msg)
return attrs
@@ -1036,7 +1048,7 @@ def _read_schemas(schemas):
elif not isinstance(schemas, dict):
if schemas is None:
schemas = Path(__file__).parent / "data" / "file_schema.yml"
- with open(schemas) as f:
+ with Path(schemas).open(encoding="utf-8") as f:
schemas = yaml.safe_load(f)
for name, schema in schemas.items():
missing_fields = {"with", "folders", "filename"} - set(schema.keys())
@@ -1223,7 +1235,7 @@ def patterns_from_schema(
"""
if isinstance(schema, str):
schemas = Path(__file__).parent / "data" / "file_schema.yml"
- with open(schemas) as f:
+ with schemas.open(encoding="utf-8") as f:
schema = yaml.safe_load(f)[schema]
# # Base folder patterns
diff --git a/src/xscen/config.py b/src/xscen/config.py
index 36d1a021..06a4eeb7 100644
--- a/src/xscen/config.py
+++ b/src/xscen/config.py
@@ -52,7 +52,7 @@
from copy import deepcopy
from functools import wraps
from pathlib import Path
-from typing import Any
+from typing import Any, Optional
import xarray as xr
import xclim as xc
@@ -131,7 +131,10 @@ def args_as_str(*args: tuple[Any, ...]) -> tuple[str, ...]:
def load_config(
- *elements, reset: bool = False, encoding: str = None, verbose: bool = False
+ *elements,
+ reset: bool = False,
+ encoding: Optional[str] = None,
+ verbose: bool = False,
):
"""Load configuration from given files or key=value pairs.
@@ -151,12 +154,12 @@ def load_config(
If a directory is passed, all `.yml` files of this directory are added, in alphabetical order.
If a "key=value" string, "key" is a dotted name and value will be evaluated if possible.
"key=value" pairs are set last, after all files are being processed.
- reset: bool
- If True, the current config is erased before loading files.
- encoding: str, optional
+ reset : bool
+ If True, erases the current config before loading files.
+ encoding : str, optional
The encoding to use when reading files.
verbose: bool
- if True, each element triggers a INFO log line.
+ If True, each element triggers a INFO log line.
Example
-------
@@ -178,7 +181,8 @@ def load_config(
key, value = element.split("=")
CONFIG.update_from_list([(key, value)])
if verbose:
- logger.info(f"Updated the config with {element}.")
+ msg = f"Updated the config with {element}."
+ logger.info(msg)
else:
file = Path(element)
if file.is_dir():
@@ -191,7 +195,8 @@ def load_config(
with configfile.open(encoding=encoding) as f:
recursive_update(CONFIG, yaml.safe_load(f))
if verbose:
- logger.info(f"Updated the config with {configfile}.")
+ msg = f"Updated the config with {configfile}."
+ logger.info(msg)
for module, old in zip(EXTERNAL_MODULES, old_external):
if old != CONFIG.get(module, {}):
@@ -213,13 +218,14 @@ def _wrapper(*args, **kwargs):
from_config = CONFIG.get(module, {}).get(func.__name__, {})
sig = inspect.signature(func)
if CONFIG.get("print_it_all"):
- logger.debug(f"For func {func}, found config {from_config}.")
- logger.debug(f"Original kwargs : {kwargs}")
+ msg = f"For func {func}, found config {from_config}.\nOriginal kwargs : {kwargs}"
+ logger.debug(msg)
for k, v in from_config.items():
if k in sig.parameters:
kwargs.setdefault(k, v)
if CONFIG.get("print_it_all"):
- logger.debug(f"Modified kwargs : {kwargs}")
+ msg = f"Modified kwargs : {kwargs}"
+ logger.debug(msg)
return func(*args, **kwargs)
diff --git a/src/xscen/data/file_schema.yml b/src/xscen/data/file_schema.yml
index 8ebe3c6f..36330e5b 100644
--- a/src/xscen/data/file_schema.yml
+++ b/src/xscen/data/file_schema.yml
@@ -26,14 +26,14 @@
original-non-sims:
with:
- facet: type
- value: [station-obs, reconstruction, forecast]
+ value: [ station-obs, reconstruction, forecast ]
- facet: processing_level
value: raw
folders:
- type
- domain
- institution
- - [source, version]
+ - [ source, version ]
- (member)
- frequency
- variable
@@ -67,7 +67,7 @@ original-sims-ba:
folders:
- type
- processing_level
- - [bias_adjust_project, version]
+ - [ bias_adjust_project, version ]
- mip_era
- activity
- domain
@@ -92,7 +92,7 @@ original-hydro-reconstruction:
- hydrology_source
- (hydrology_member)
- institution
- - [source, version]
+ - [ source, version ]
- (member)
- frequency
- variable
@@ -132,7 +132,7 @@ original-hydro-sims-ba:
- hydrology_source
- (hydrology_member)
- processing_level
- - [bias_adjust_project, version]
+ - [ bias_adjust_project, version ]
- mip_era
- activity
- domain
@@ -160,7 +160,7 @@ derived-sims-ba:
- facet: bias_adjust_project
folders:
- type
- - [bias_adjust_project, version]
+ - [ bias_adjust_project, version ]
- mip_era
- activity
- institution
@@ -199,7 +199,7 @@ derived-reconstruction:
folders:
- type
- institution
- - [source, version]
+ - [ source, version ]
- (member)
- domain
- processing_level
@@ -216,7 +216,7 @@ derived-hydro-sims-ba:
- hydrology_project
- hydrology_source
- (hydrology_member)
- - [bias_adjust_project, version]
+ - [ bias_adjust_project, version ]
- mip_era
- activity
- institution
@@ -261,7 +261,7 @@ derived-hydro-reconstruction:
- hydrology_source
- (hydrology_member)
- institution
- - [source, version]
+ - [ source, version ]
- (member)
- domain
- processing_level
diff --git a/src/xscen/diagnostics.py b/src/xscen/diagnostics.py
index c916cf53..3af73970 100644
--- a/src/xscen/diagnostics.py
+++ b/src/xscen/diagnostics.py
@@ -270,9 +270,10 @@ def _message():
"missing",
)
else:
- logger.info(
+ msg = (
f"Variable '{v}' has no time dimension. The missing data check will be skipped.",
)
+ logger.info(msg)
if flags is not None:
if return_flags:
@@ -373,7 +374,8 @@ def properties_and_measures( # noqa: C901
except TypeError:
N = None
else:
- logger.info(f"Computing {N} properties.")
+ msg = f"Computing {N} properties."
+ logger.info(msg)
period = standardize_periods(period, multiple=False)
# select period for ds
@@ -405,7 +407,8 @@ def properties_and_measures( # noqa: C901
else:
iden = ind.identifier
# Make the call to xclim
- logger.info(f"{i} - Computing {iden}.")
+ msg = f"{i} - Computing {iden}."
+ logger.info(msg)
out = ind(ds=ds)
vname = out.name
prop[vname] = out
diff --git a/src/xscen/ensembles.py b/src/xscen/ensembles.py
index a8c747d6..6eeddc0c 100644
--- a/src/xscen/ensembles.py
+++ b/src/xscen/ensembles.py
@@ -133,9 +133,8 @@ def ensemble_stats( # noqa: C901
for stat in statistics_to_compute:
stats_kwargs = deepcopy(statistics.get(stat) or {})
- logger.info(
- f"Calculating {stat} from an ensemble of {len(ens.realization)} simulations."
- )
+ msg = f"Calculating {stat} from an ensemble of {len(ens.realization)} simulations."
+ logger.info(msg)
# Workaround for robustness_categories
real_stat = None
@@ -181,9 +180,8 @@ def ensemble_stats( # noqa: C901
f"{v} is a delta, but 'ref' was still specified."
)
if delta_kind in ["rel.", "relative", "*", "/"]:
- logging.info(
- f"Relative delta detected for {v}. Applying 'v - 1' before change_significance."
- )
+ msg = f"Relative delta detected for {v}. Applying 'v - 1' before change_significance."
+ logging.info(msg)
ens_v = ens[v] - 1
else:
ens_v = ens[v]
@@ -335,9 +333,8 @@ def generate_weights( # noqa: C901
if skipna is False:
if v_for_skipna is None:
v_for_skipna = list(datasets[list(datasets.keys())[0]].data_vars)[0]
- logger.info(
- f"Using '{v_for_skipna}' as the variable to check for missing values."
- )
+ msg = f"Using '{v_for_skipna}' as the variable to check for missing values."
+ logger.info(msg)
# Check if any dataset has dimensions that are not 'time' or 'horizon'
other_dims = {
@@ -678,12 +675,13 @@ def generate_weights( # noqa: C901
def build_partition_data(
datasets: Union[dict, list[xr.Dataset]],
partition_dim: list[str] = ["source", "experiment", "bias_adjust_project"],
- subset_kw: dict = None,
- regrid_kw: dict = None,
- indicators_kw: dict = None,
- rename_dict: dict = None,
+ subset_kw: Optional[dict] = None,
+ regrid_kw: Optional[dict] = None,
+ indicators_kw: Optional[dict] = None,
+ rename_dict: Optional[dict] = None,
):
- """Get the input for the xclim partition functions.
+ """
+ Get the input for the xclim partition functions.
From a list or dictionary of datasets, create a single dataset with
`partition_dim` dimensions (and time) to pass to one of the xclim partition functions
@@ -692,27 +690,26 @@ def build_partition_data(
they have to be subsetted and regridded to a common grid/point.
Indicators can also be computed before combining the datasets.
-
Parameters
----------
datasets : dict
List or dictionnary of Dataset objects that will be included in the ensemble.
The datasets should include the necessary ("cat:") attributes to understand their metadata.
Tip: With a project catalog, you can do: `datasets = pcat.search(**search_dict).to_dataset_dict()`.
- partition_dim: list[str]
+ partition_dim : list[str]
Components of the partition. They will become the dimension of the output.
The default is ['source', 'experiment', 'bias_adjust_project'].
For source, the dimension will actually be institution_source_member.
- subset_kw: dict
+ subset_kw : dict, optional
Arguments to pass to `xs.spatial.subset()`.
- regrid_kw:
+ regrid_kw : dict, optional
Arguments to pass to `xs.regrid_dataset()`.
- indicators_kw:
+ indicators_kw : dict, optional
Arguments to pass to `xs.indicators.compute_indicators()`.
All indicators have to be for the same frequency, in order to be put on a single time axis.
- rename_dict:
+ rename_dict : dict, optional
Dictionary to rename the dimensions from xscen names to xclim names.
- The default is {'source': 'model', 'bias_adjust_project': 'downscaling', 'experiment': 'scenario'}.
+ If None, the default is {'source': 'model', 'bias_adjust_project': 'downscaling', 'experiment': 'scenario'}.
Returns
-------
@@ -722,7 +719,6 @@ def build_partition_data(
See Also
--------
xclim.ensembles
-
"""
if isinstance(datasets, dict):
datasets = list(datasets.values())
diff --git a/src/xscen/extract.py b/src/xscen/extract.py
index 0f6f7bc6..322ea242 100644
--- a/src/xscen/extract.py
+++ b/src/xscen/extract.py
@@ -243,10 +243,8 @@ def extract_dataset( # noqa: C901
if pd.to_timedelta(
CV.xrfreq_to_timedelta(catalog[key].df["xrfreq"].iloc[0])
) < pd.to_timedelta(CV.xrfreq_to_timedelta(xrfreq)):
- logger.info(
- f"Resampling {var_name} from [{catalog[key].df['xrfreq'].iloc[0]}]"
- f" to [{xrfreq}]."
- )
+ msg = f"Resampling {var_name} from [{catalog[key].df['xrfreq'].iloc[0]}] to [{xrfreq}]."
+ logger.info(msg)
ds = ds.assign(
{
var_name: resample(
@@ -373,19 +371,20 @@ def resample( # noqa: C901
and var_name in CV.resampling_methods.dict[target_frequency]
):
method = CV.resampling_methods(target_frequency)[var_name]
- logger.info(
- f"Resampling method for {var_name}: '{method}', based on variable name and frequency."
- )
+ msg = f"Resampling method for {var_name}: '{method}', based on variable name and frequency."
+ logger.info(msg)
elif var_name in CV.resampling_methods.dict["any"]:
method = CV.resampling_methods("any")[var_name]
- logger.info(
+ msg = (
f"Resampling method for {var_name}: '{method}', based on variable name."
)
+ logger.info(msg)
else:
method = "mean"
- logger.info(f"Resampling method for {var_name} defaulted to: 'mean'.")
+ msg = f"Resampling method for {var_name} defaulted to: 'mean'."
+ logger.info(msg)
weights = None
if (
@@ -671,7 +670,8 @@ def search_data_catalogs( # noqa: C901
},
**cat_kwargs,
)
- logger.info(f"Catalog opened: {catalog} from {len(data_catalogs)} files.")
+ msg = f"Catalog opened: {catalog} from {len(data_catalogs)} files."
+ logger.info(msg)
if match_hist_and_fut:
logger.info("Dispatching historical dataset to future experiments.")
@@ -684,15 +684,15 @@ def search_data_catalogs( # noqa: C901
catalog.esmcat._df = pd.concat([catalog.df, ex.df]).drop_duplicates(
keep=False
)
- logger.info(
- f"Removing {len(ex.df)} assets based on exclusion dict '{k}': {exclusions[k]}."
- )
+ msg = f"Removing {len(ex.df)} assets based on exclusion dict '{k}': {exclusions[k]}."
+ logger.info(msg)
full_catalog = deepcopy(catalog) # Used for searching for fixed fields
if other_search_criteria:
catalog = catalog.search(**other_search_criteria)
- logger.info(
+ msg = (
f"{len(catalog.df)} assets matched the criteria : {other_search_criteria}."
)
+ logger.info(msg)
if restrict_warming_level:
if isinstance(restrict_warming_level, bool):
restrict_warming_level = {}
@@ -720,7 +720,8 @@ def search_data_catalogs( # noqa: C901
coverage_kwargs = coverage_kwargs or {}
periods = standardize_periods(periods)
- logger.info(f"Iterating over {len(catalog.unique('id'))} potential datasets.")
+ msg = f"Iterating over {len(catalog.unique('id'))} potential datasets."
+ logger.info(msg)
# Loop on each dataset to assess whether they have all required variables
# And select best freq/timedelta for each
catalogs = {}
@@ -782,9 +783,8 @@ def search_data_catalogs( # noqa: C901
varcat = scat.search(
variable=var_id, require_all_on=["id", "xrfreq"]
)
- logger.debug(
- f"At var {var_id}, after search cat has {varcat.derivedcat.keys()}"
- )
+ msg = f"At var {var_id}, after search cat has {varcat.derivedcat.keys()}"
+ logger.debug(msg)
# TODO: Temporary fix until this is changed in intake_esm
varcat._requested_variables_true = [var_id]
varcat._dependent_variables = list(
@@ -851,9 +851,8 @@ def search_data_catalogs( # noqa: C901
varcat.esmcat._df = pd.DataFrame()
if varcat.df.empty:
- logger.debug(
- f"Dataset {sim_id} doesn't have all needed variables (missing at least {var_id})."
- )
+ msg = f"Dataset {sim_id} doesn't have all needed variables (missing at least {var_id})."
+ logger.debug(msg)
break
if "timedelta" in varcat.df.columns:
varcat.df.drop(columns=["timedelta"], inplace=True)
@@ -869,9 +868,8 @@ def search_data_catalogs( # noqa: C901
catalogs[sim_id]._requested_periods = periods
if len(catalogs) > 0:
- logger.info(
- f"Found {len(catalogs)} with all variables requested and corresponding to the criteria."
- )
+ msg = f"Found {len(catalogs)} with all variables requested and corresponding to the criteria."
+ logger.info(msg)
else:
logger.warning("Found no match corresponding to the search criteria.")
@@ -1021,9 +1019,10 @@ def _get_warming_level(model):
)
tas_sel = tas.isel(simulation=candidates.argmax())
selected = "_".join([tas_sel[c].item() for c in FIELDS])
- logger.debug(
+ msg = (
f"Computing warming level +{wl}Β°C for {model} from simulation: {selected}."
)
+ logger.debug(msg)
# compute reference temperature for the warming and difference from reference
yearly_diff = tas_sel - tas_sel.sel(time=slice(*tas_baseline_period)).mean()
@@ -1039,10 +1038,11 @@ def _get_warming_level(model):
yrs = rolling_diff.where(rolling_diff >= wl, drop=True)
if yrs.size == 0:
- logger.info(
+ msg = (
f"Global warming level of +{wl}C is not reached by the last year "
f"({tas.time[-1].dt.year.item()}) of the provided 'tas_src' database for {selected}."
)
+ logger.info(msg)
return [None, None] if return_horizon else None
yr = yrs.isel(time=0).time.dt.year.item()
@@ -1428,7 +1428,8 @@ def _restrict_by_resolution(
domains = pd.unique(df_sim["domain"])
if len(domains) > 1:
- logger.info(f"Dataset {i} appears to have multiple resolutions.")
+ msg = f"Dataset {i} appears to have multiple resolutions."
+ logger.info(msg)
# For CMIP, the order is dictated by a list of grid labels
if "MIP" in pd.unique(df_sim["activity"])[0]:
@@ -1504,10 +1505,8 @@ def _restrict_by_resolution(
)
else:
- logger.warning(
- f"Dataset {i} seems to have multiple resolutions, "
- "but its activity is not yet recognized or supported."
- )
+ msg = f"Dataset {i} seems to have multiple resolutions, but its activity is not yet recognized or supported."
+ logger.warning(msg)
chosen = list(domains)
pass
@@ -1520,7 +1519,8 @@ def _restrict_by_resolution(
)
for k in to_remove:
- logger.info(f"Removing {k} from the results.")
+ msg = f"Removing {k} from the results."
+ logger.info(msg)
catalogs.pop(k)
return catalogs
@@ -1563,9 +1563,8 @@ def _restrict_multimembers(
members = pd.unique(df_sim["member"])
if len(members) > 1:
- logger.info(
- f"Dataset {i} has {len(members)} valid members. Restricting as per requested."
- )
+ msg = f"Dataset {i} has {len(members)} valid members. Restricting as per requested."
+ logger.info(msg)
if "ordered" in restrictions:
members = natural_sort(members)[0 : restrictions["ordered"]]
@@ -1583,7 +1582,8 @@ def _restrict_multimembers(
)
for k in to_remove:
- logger.info(f"Removing {k} from the results.")
+ msg = f"Removing {k} from the results."
+ logger.info(msg)
catalogs.pop(k)
return catalogs
@@ -1610,7 +1610,6 @@ def _restrict_wl(df: pd.DataFrame, restrictions: dict):
to_keep = get_warming_level(df, return_horizon=False, **restrictions).notnull()
removed = pd.unique(df[~to_keep]["id"])
df = df[to_keep]
- logger.info(
- f"Removing the following datasets because of the restriction for warming levels: {list(removed)}"
- )
+ msg = f"Removing the following datasets because of the restriction for warming levels: {list(removed)}"
+ logger.info(msg)
return df
diff --git a/src/xscen/indicators.py b/src/xscen/indicators.py
index af866800..da156beb 100644
--- a/src/xscen/indicators.py
+++ b/src/xscen/indicators.py
@@ -170,7 +170,8 @@ def compute_indicators( # noqa: C901
except TypeError:
N = None
else:
- logger.info(f"Computing {N} indicators.")
+ msg = f"Computing {N} indicators."
+ logger.info(msg)
periods = standardize_periods(periods)
in_freq = xr.infer_freq(ds.time) if "time" in ds.dims else "fx"
@@ -182,16 +183,19 @@ def compute_indicators( # noqa: C901
iden, ind = ind
else:
iden = ind.identifier
- logger.info(f"{i} - Computing {iden}.")
+ msg = f"{i} - Computing {iden}."
+ logger.info(msg)
_, freq = get_indicator_outputs(ind, in_freq)
if rechunk_input and freq not in ["fx", in_freq]:
if freq not in dss_rechunked:
- logger.debug(f"Rechunking with flox for freq {freq}")
+ msg = f"Rechunking with flox for freq {freq}."
+ logger.debug(msg)
dss_rechunked[freq] = rechunk_for_resample(ds, time=freq)
else:
- logger.debug(f"Using rechunked for freq {freq}")
+ msg = f"Using rechunked for freq {freq}"
+ logger.debug(msg)
ds_in = dss_rechunked[freq]
else:
ds_in = ds
diff --git a/src/xscen/io.py b/src/xscen/io.py
index e7350c31..a3ed9259 100644
--- a/src/xscen/io.py
+++ b/src/xscen/io.py
@@ -245,7 +245,8 @@ def subset_maxsize(
size_of_file = size_of_file + (varsize * dtype_size) / 1024**3
if size_of_file < maxsize_gb:
- logger.info(f"Dataset is already smaller than {maxsize_gb} Gb.")
+ msg = f"Dataset is already smaller than {maxsize_gb} Gb."
+ logger.info(msg)
return [ds]
elif "time" in ds:
@@ -286,7 +287,8 @@ def clean_incomplete(path: Union[str, os.PathLike], complete: Sequence[str]) ->
for fold in filter(lambda p: p.is_dir(), path.iterdir()):
if fold.name not in complete:
- logger.warning(f"Removing {fold} from disk")
+ msg = f"Removing {fold} from disk"
+ logger.warning(msg)
sh.rmtree(fold)
@@ -431,45 +433,46 @@ def save_to_zarr( # noqa: C901
itervar: bool = False,
timeout_cleanup: bool = True,
):
- """Save a Dataset to Zarr format, rechunking and compressing if requested.
+ """
+ Save a Dataset to Zarr format, rechunking and compressing if requested.
According to mode, removes variables that we don't want to re-compute in ds.
Parameters
----------
ds : xr.Dataset
- Dataset to be saved.
+ Dataset to be saved.
filename : str
- Name of the Zarr file to be saved.
+ Name of the Zarr file to be saved.
rechunk : dict, optional
- This is a mapping from dimension name to new chunks (in any format understood by dask).
- Spatial dimensions can be generalized as 'X' and 'Y' which will be mapped to the actual grid type's
- dimension names.
- Rechunking is only done on *data* variables sharing dimensions with this argument.
+ This is a mapping from dimension name to new chunks (in any format understood by dask).
+ Spatial dimensions can be generalized as 'X' and 'Y' which will be mapped to the actual grid type's
+ dimension names.
+ Rechunking is only done on *data* variables sharing dimensions with this argument.
zarr_kwargs : dict, optional
- Additional arguments to send to_zarr()
+ Additional arguments to send to_zarr()
compute : bool
- Whether to start the computation or return a delayed object.
+ Whether to start the computation or return a delayed object.
mode : {'f', 'o', 'a'}
- If 'f', fails if any variable already exists.
- if 'o', removes the existing variables.
- if 'a', skip existing variables, writes the others.
+ If 'f', fails if any variable already exists.
+ if 'o', removes the existing variables.
+ if 'a', skip existing variables, writes the others.
encoding : dict, optional
- If given, skipped variables are popped in place.
+ If given, skipped variables are popped in place.
bitround : bool or int or dict
- If not False, float variables are bit-rounded by dropping a certain number of bits from their mantissa,
- allowing for a much better compression.
- If an int, this is the number of bits to keep for all float variables.
- If a dict, a mapping from variable name to the number of bits to keep.
- If True, the number of bits to keep is guessed based on the variable's name, defaulting to 12,
- which yields a relative error of 0.012%.
+ If not False, float variables are bit-rounded by dropping a certain number of bits from their mantissa,
+ allowing for a much better compression.
+ If an int, this is the number of bits to keep for all float variables.
+ If a dict, a mapping from variable name to the number of bits to keep.
+ If True, the number of bits to keep is guessed based on the variable's name, defaulting to 12,
+ which yields a relative error of 0.012%.
itervar : bool
- If True, (data) variables are written one at a time, appending to the zarr.
- If False, this function computes, no matter what was passed to kwargs.
+ If True, (data) variables are written one at a time, appending to the zarr.
+ If False, this function computes, no matter what was passed to kwargs.
timeout_cleanup : bool
- If True (default) and a :py:class:`xscen.scripting.TimeoutException` is raised during the writing,
- the variable being written is removed from the dataset as it is incomplete.
- This does nothing if `compute` is False.
+ If True (default) and a :py:class:`xscen.scripting.TimeoutException` is raised during the writing,
+ the variable being written is removed from the dataset as it is incomplete.
+ This does nothing if `compute` is False.
Returns
-------
@@ -510,7 +513,8 @@ def _skip(var):
if mode == "o":
if exists:
var_path = path / var
- logger.warning(f"Removing {var_path} to overwrite.")
+ msg = f"Removing {var_path} to overwrite."
+ logger.warning(msg)
sh.rmtree(var_path)
return False
@@ -521,7 +525,8 @@ def _skip(var):
for var in list(ds.data_vars.keys()):
if _skip(var):
- logger.info(f"Skipping {var} in {path}.")
+ msg = f"Skipping {var} in {path}."
+ logger.info(msg)
ds = ds.drop_vars(var)
if encoding:
encoding.pop(var)
@@ -547,7 +552,8 @@ def _skip(var):
dsbase = ds.drop_vars(allvars)
dsbase.to_zarr(path, **zarr_kwargs, mode="w")
for i, (name, var) in enumerate(ds.data_vars.items()):
- logger.debug(f"Writing {name} ({i + 1} of {len(ds.data_vars)}) to {path}")
+ msg = f"Writing {name} ({i + 1} of {len(ds.data_vars)}) to {path}"
+ logger.debug(msg)
dsvar = ds.drop_vars(allvars - {name})
try:
dsvar.to_zarr(
@@ -558,21 +564,22 @@ def _skip(var):
)
except TimeoutException:
if timeout_cleanup:
- logger.info(f"Removing incomplete {name}.")
+ msg = f"Removing incomplete {name}."
+ logger.info(msg)
sh.rmtree(path / name)
raise
else:
- logger.debug(f"Writing {list(ds.data_vars.keys())} for {filename}.")
+ msg = f"Writing {list(ds.data_vars.keys())} for {filename}."
+ logger.debug(msg)
try:
return ds.to_zarr(
filename, compute=compute, mode="a", encoding=encoding, **zarr_kwargs
)
except TimeoutException:
if timeout_cleanup:
- logger.info(
- f"Removing incomplete {list(ds.data_vars.keys())} for {filename}."
- )
+ msg = f"Removing incomplete {list(ds.data_vars.keys())} for {filename}."
+ logger.info(msg)
for name in ds.data_vars:
sh.rmtree(path / name)
raise
diff --git a/src/xscen/regrid.py b/src/xscen/regrid.py
index 604ed7e7..24c680cd 100644
--- a/src/xscen/regrid.py
+++ b/src/xscen/regrid.py
@@ -5,6 +5,7 @@
import os
import warnings
from copy import deepcopy
+from pathlib import Path
from typing import Optional, Union
import cartopy.crs as ccrs
@@ -116,19 +117,19 @@ def regrid_dataset( # noqa: C901
ds = out or ds
kwargs = deepcopy(regridder_kwargs)
- # if weights_location does no exist, create it
- if not os.path.exists(weights_location):
- os.makedirs(weights_location)
+ # if weights_location does not exist, create it
+ if not Path(weights_location).exists():
+ Path(weights_location).mkdir(parents=True)
id = ds.attrs["cat:id"] if "cat:id" in ds.attrs else "weights"
# give unique name to weights file
- weights_filename = os.path.join(
+ weights_filename = Path(
weights_location,
f"{id}_{domain}_regrid{i}"
f"{'_'.join(kwargs[k] for k in kwargs if isinstance(kwargs[k], str))}.nc",
)
# Re-use existing weight file if possible
- if os.path.isfile(weights_filename) and not (
+ if Path(weights_filename).is_file() and not (
("reuse_weights" in kwargs) and (kwargs["reuse_weights"] is False)
):
kwargs["weights"] = weights_filename
@@ -350,7 +351,7 @@ def _regridder(
unmapped_to_nan=unmapped_to_nan,
**kwargs,
)
- if not os.path.isfile(filename):
+ if not Path(filename).is_file():
regridder.to_netcdf(filename)
return regridder
diff --git a/src/xscen/scripting.py b/src/xscen/scripting.py
index 7ec0b86c..532ca06b 100644
--- a/src/xscen/scripting.py
+++ b/src/xscen/scripting.py
@@ -255,7 +255,8 @@ def __init__(
def __enter__(self): # noqa: D105
self.start = time.perf_counter()
self.start_cpu = time.process_time()
- self.logger.info(f"Started process {self.name}.")
+ msg = f"Started process {self.name}."
+ self.logger.info(msg)
return
def __exit__(self, *args, **kwargs): # noqa: D105
@@ -269,7 +270,8 @@ def __exit__(self, *args, **kwargs): # noqa: D105
self.logger.info(s)
-class TimeoutException(Exception):
+# FIXME: This should be written as "TimeoutError"
+class TimeoutException(Exception): # noqa: N818
"""An exception raised with a timeout occurs."""
def __init__(self, seconds: int, task: str = "", **kwargs):
@@ -424,7 +426,8 @@ def save_and_update(
# update catalog
pcat.update_from_ds(ds=ds, path=path, **update_kwargs)
- logger.info(f"File {path} has saved succesfully and the catalog was updated.")
+ msg = f"File {path} has been saved successfully and the catalog was updated."
+ logger.info(msg)
def move_and_delete(
@@ -456,7 +459,8 @@ def move_and_delete(
source, dest = files[0], files[1]
if Path(source).exists():
if copy:
- logger.info(f"Copying {source} to {dest}.")
+ msg = f"Copying {source} to {dest}."
+ logger.info(msg)
copied_files = copy_tree(source, dest)
for f in copied_files:
# copied files don't include zarr files
@@ -467,13 +471,15 @@ def move_and_delete(
ds = xr.open_dataset(f)
pcat.update_from_ds(ds=ds, path=f)
else:
- logger.info(f"Moving {source} to {dest}.")
+ msg = f"Moving {source} to {dest}."
+ logger.info(msg)
sh.move(source, dest)
if Path(dest).suffix in [".zarr", ".nc"]:
ds = xr.open_dataset(dest)
pcat.update_from_ds(ds=ds, path=dest)
else:
- logger.info(f"You are trying to move {source}, but it does not exist.")
+ msg = f"You are trying to move {source}, but it does not exist."
+ logger.info(msg)
else:
raise ValueError("`moving` should be a list of lists.")
@@ -481,9 +487,10 @@ def move_and_delete(
if isinstance(deleting, list):
for dir_to_delete in deleting:
if Path(dir_to_delete).exists() and Path(dir_to_delete).is_dir():
- logger.info(f"Deleting content inside {dir_to_delete}.")
+ msg = f"Deleting content inside {dir_to_delete}."
+ logger.info(msg)
sh.rmtree(dir_to_delete)
- os.mkdir(dir_to_delete)
+ Path(dir_to_delete).mkdir()
elif deleting is None:
pass
else:
diff --git a/src/xscen/utils.py b/src/xscen/utils.py
index b6469589..ab6e51b1 100644
--- a/src/xscen/utils.py
+++ b/src/xscen/utils.py
@@ -424,7 +424,7 @@ def stack_drop_nans(
domain = ds.attrs.get("cat:domain", "unknown")
to_file = to_file.format(domain=domain, shape=original_shape)
if not Path(to_file).parent.exists():
- os.makedirs(Path(to_file).parent, exist_ok=True)
+ Path(to_file).parent.mkdir(exist_ok=True)
# Add all coordinates that might have been affected by the stack
mask = mask.assign_coords(
{c: ds[c] for c in ds.coords if any(d in mask.dims for d in ds[c].dims)}
@@ -512,7 +512,8 @@ def unstack_fill_nan(
original_shape = ds[c].attrs["original_shape"]
domain = ds.attrs.get("cat:domain", "unknown")
coords = coords.format(domain=domain, shape=original_shape)
- logger.info(f"Dataset unstacked using {coords}.")
+ msg = f"Dataset unstacked using {coords}."
+ logger.info(msg)
coords = xr.open_dataset(coords)
# separate coords that are dims or not
coords_and_dims = {
@@ -728,7 +729,7 @@ def maybe_unstack(
)
-def __read_CVs(cvfile):
+def __read_CVs(cvfile): # noqa: N802
with cvfile.open("r") as f:
cv = json.load(f)
is_regex = cv.pop("is_regex", False)
@@ -776,10 +777,11 @@ def cvfunc(key, default="error"):
return cvfunc
-for cvfile in (Path(__file__).parent / "CVs").glob("*.json"):
+for cvfile in Path(__file__).parent.joinpath("CVs").glob("*.json"):
try:
CV.__dict__[cvfile.stem] = __read_CVs(cvfile)
- except Exception as err:
+ # FIXME: This is a catch-all, but we should be more specific
+ except Exception as err: # noqa: BLE001
raise ValueError(f"While reading {cvfile} got {err}")
@@ -920,7 +922,8 @@ def clean_up( # noqa: C901
ds = ds.copy()
if variables_and_units:
- logger.info(f"Converting units: {variables_and_units}")
+ msg = f"Converting units: {variables_and_units}"
+ logger.info(msg)
ds = change_units(ds=ds, variables_and_units=variables_and_units)
# convert calendar
@@ -946,15 +949,17 @@ def clean_up( # noqa: C901
):
convert_calendar_kwargs["align_on"] = "random"
- logger.info(f"Converting calendar with {convert_calendar_kwargs} ")
+ msg = f"Converting calendar with {convert_calendar_kwargs}."
+ logger.info(msg)
ds = ds.convert_calendar(**convert_calendar_kwargs).where(~ocean)
# convert each variable individually
if missing_by_var:
- # remove 'missing' argument to be replace by `missing_by_var`
+ # remove 'missing' argument to be replaced by `missing_by_var`
del convert_calendar_kwargs["missing"]
for var, missing in missing_by_var.items():
- logging.info(f"Filling missing {var} with {missing}")
+ msg = f"Filling missing {var} with {missing}"
+ logging.info(msg)
if missing == "interpolate":
ds_with_nan = ds[var].where(ds[var] != -9999)
converted_var = ds_with_nan.chunk({"time": -1}).interpolate_na(
@@ -1011,7 +1016,8 @@ def clean_up( # noqa: C901
try:
ds.attrs["cat:id"] = generate_id(ds).iloc[0]
except IndexError as err:
- logger.warning(f"Unable to generate a new id for the dataset. Got {err}.")
+ msg = f"Unable to generate a new id for the dataset. Got {err}."
+ logger.warning(msg)
if to_level:
ds.attrs["cat:processing_level"] = to_level
@@ -1084,7 +1090,7 @@ def clean_up( # noqa: C901
def publish_release_notes(
style: str = "md",
file: Optional[Union[os.PathLike, StringIO, TextIO]] = None,
- changes: Union[str, os.PathLike] = None,
+ changes: Optional[Union[str, os.PathLike]] = None,
) -> Optional[str]:
"""Format release history in Markdown or ReStructuredText.
@@ -1114,7 +1120,7 @@ def publish_release_notes(
if not changes_file.exists():
raise FileNotFoundError("Changes file not found in xscen file tree.")
- with open(changes_file) as f:
+ with Path(changes_file).open(encoding="utf-8") as f:
changes = f.read()
if style == "rst":
@@ -1146,7 +1152,7 @@ def publish_release_notes(
str(grouping[0]).replace("(", r"\(").replace(")", r"\)")
)
search = rf"({fixed_grouping})\n([\{level}]{'{' + str(len(grouping[1])) + '}'})"
- replacement = f"{'##' if level=='-' else '###'} {grouping[0]}"
+ replacement = f"{'##' if level == '-' else '###'} {grouping[0]}"
changes = re.sub(search, replacement, changes)
link_expressions = r"[\`]{1}([\w\s]+)\s<(.+)>`\_"
@@ -1166,7 +1172,7 @@ def publish_release_notes(
def unstack_dates( # noqa: C901
ds: xr.Dataset,
seasons: Optional[dict[int, str]] = None,
- new_dim: str = None,
+ new_dim: Optional[str] = None,
winter_starts_year: bool = False,
):
"""Unstack a multi-season timeseries into a yearly axis and a season one.
@@ -1528,10 +1534,10 @@ def season_sort_key(idx: pd.Index, name: Optional[str] = None):
if (name or getattr(idx, "name", None)) == "month":
m = list(xr.coding.cftime_offsets._MONTH_ABBREVIATIONS.values())
return idx.map(m.index)
- except (TypeError, ValueError):
+ except (TypeError, ValueError) as err:
# ValueError if string not in seasons, or value not in months
# TypeError if season element was not a string.
- pass
+ logging.error(err)
return idx
diff --git a/templates/1-basic_workflow_with_config/config1.yml b/templates/1-basic_workflow_with_config/config1.yml
index 2699461b..e66ab33a 100644
--- a/templates/1-basic_workflow_with_config/config1.yml
+++ b/templates/1-basic_workflow_with_config/config1.yml
@@ -478,7 +478,7 @@ aggregate:
op: mean
window: 30
stride: 10
- periods: [['1951', '2100']]
+ periods: [ [ '1951', '2100' ] ]
to_level: climatology
#periods_as_dim: True
#min_periods:
@@ -549,11 +549,11 @@ logging: # general logging args
xscen:
propagate: False
level: INFO
- handlers: [console] # [file, console] could also be used to write the log to a file
+ handlers: [ console ] # [file, console] could also be used to write the log to a file
xclim: # Options for xclim
- metadata_locales: # Enable french translation for xclim indicators, but also some xscen methods.
+ metadata_locales: # Enable French translation for xclim indicators, but also some xscen methods.
- fr
diff --git a/templates/1-basic_workflow_with_config/paths1_example.yml b/templates/1-basic_workflow_with_config/paths1_example.yml
index 208f1ab5..4521a8ee 100644
--- a/templates/1-basic_workflow_with_config/paths1_example.yml
+++ b/templates/1-basic_workflow_with_config/paths1_example.yml
@@ -13,11 +13,11 @@ extract:
reconstruction:
search_data_catalogs:
data_catalogs:
- - PATH_TO_OFFICIAL_CATALOGUES/reconstruction.json
+ - PATH_TO_OFFICIAL_CATALOGUES/reconstruction.json
simulation:
search_data_catalogs:
data_catalogs:
- - PATH_TO_OFFICIAL_CATALOGUES/simulation.json
+ - PATH_TO_OFFICIAL_CATALOGUES/simulation.json
regrid:
regrid_dataset:
@@ -60,13 +60,13 @@ dask:
# filename: PATH/logger.log
utils:
- stack_drop_nans:
- to_file: &coords
- PATH/stack_coords/coords_{domain}_{shape}.nc
- unstack_fill_nan:
- coords: *coords
- maybe_unstack:
- coords: *coords
+ stack_drop_nans:
+ to_file: &coords
+ PATH/stack_coords/coords_{domain}_{shape}.nc
+ unstack_fill_nan:
+ coords: *coords
+ maybe_unstack:
+ coords: *coords
scripting:
send_mail:
diff --git a/templates/1-basic_workflow_with_config/workflow1.py b/templates/1-basic_workflow_with_config/workflow1.py
index 9cad6bb4..70824b6b 100644
--- a/templates/1-basic_workflow_with_config/workflow1.py
+++ b/templates/1-basic_workflow_with_config/workflow1.py
@@ -2,6 +2,7 @@
import atexit
import logging
+from pathlib import Path
import xarray as xr
from dask import config as dskconf
@@ -37,8 +38,10 @@
# Copy config to the top of the log file
if "logging" in CONFIG and "file" in CONFIG["logging"]["handlers"]:
- f1 = open(CONFIG["logging"]["handlers"]["file"]["filename"], "a+")
- f2 = open("config1.yml")
+ f1 = Path(CONFIG["logging"]["handlers"]["file"]["filename"], "a+").open(
+ encoding="utf-8"
+ )
+ f2 = Path("config1.yml").open(encoding="utf-8")
f1.write(f2.read())
f1.close()
f2.close()
@@ -64,7 +67,7 @@
if "extract" in CONFIG["tasks"]:
# Iterate on types of data to extract (reconstruction, simulation)
# and get the respective dictionary from the config
- for source_type, type_dict in CONFIG["extract"].items():
+ for type_dict in CONFIG["extract"].values():
# Filter the catalog to get only the datasets that match the arguments in the config.
# Arguments are not passed automatically, because the config is different for each type of data.
# Therefore, we must manually send the 'type_dict' entry to the search_data_catalogs function.
@@ -170,7 +173,7 @@
for var, ba_dict in CONFIG["biasadjust"].items():
# Search the ProjectCatalog for the results of the previous step, then iterate over each dataset.
dict_sim = pcat.search(**ba_dict["sim_inputs"]).to_dataset_dict(**tdd)
- for id_sim, ds_sim in dict_sim.items():
+ for ds_sim in dict_sim.values():
cur = {
"id": ds_sim.attrs["cat:id"],
"xrfreq": ds_sim.attrs["cat:xrfreq"],
@@ -286,7 +289,7 @@
if "diagnostics" in CONFIG["tasks"]:
# The properties and measures that we want to compute are different for each type of data (ref, sim, scen),
# so we need to iterate over them.
- for kind, kind_dict in CONFIG["diagnostics"]["kind"].items():
+ for kind_dict in CONFIG["diagnostics"]["kind"].values():
# Search for the right datasets and iterate over them
dict_input = pcat.search(**kind_dict["inputs"]).to_dataset_dict(**tdd)
for key_input, ds_input in dict_input.items():
@@ -336,7 +339,7 @@
meas_dict = pcat.search(processing_level="diag-measures-sim").to_dataset_dict(
**tdd
)
- for id_meas, ds_meas_sim in meas_dict.items():
+ for ds_meas_sim in meas_dict.values():
cur = {
"id": ds_meas_sim.attrs["cat:id"],
"processing_level": "diag-improved",
diff --git a/templates/2-indicators_only/config2.yml b/templates/2-indicators_only/config2.yml
index 03f4427b..b65fb341 100644
--- a/templates/2-indicators_only/config2.yml
+++ b/templates/2-indicators_only/config2.yml
@@ -2,36 +2,36 @@
## Comments starting with a single # are example/suggested entries
## Descriptive comments start with ##.
dask:
- client:
- n_workers: 3
- threads_per_worker: 4
- memory_limit: 10GB
- # dashboard_address: 11111
- # silence_logs: 50 # To supress warnings about garbage collection and other inevitable stuff
+ client:
+ n_workers: 3
+ threads_per_worker: 4
+ memory_limit: 10GB
+ # dashboard_address: 11111
+ # silence_logs: 50 # To supress warnings about garbage collection and other inevitable stuff
array.slicing.split_large_chunks: False
logging:
- formatters:
- default:
- format: '%(asctime)s %(levelname)-8s %(name)-15s %(message)s'
- datefmt: '%Y-%m-%d %H:%M:%S'
- handlers:
- console:
- class : logging.StreamHandler
- formatter: default
- level : DEBUG
- loggers:
- workflow:
- level: INFO
- propagate: False
- handlers: [console]
- xscen:
- level: INFO
- propagate: False
- handlers: [console]
- root:
- level: INFO
- handlers: [console]
+ formatters:
+ default:
+ format: '%(asctime)s %(levelname)-8s %(name)-15s %(message)s'
+ datefmt: '%Y-%m-%d %H:%M:%S'
+ handlers:
+ console:
+ class : logging.StreamHandler
+ formatter: default
+ level : DEBUG
+ loggers:
+ workflow:
+ level: INFO
+ propagate: False
+ handlers: [ console ]
+ xscen:
+ level: INFO
+ propagate: False
+ handlers: [ console ]
+ root:
+ level: INFO
+ handlers: [ console ]
xclim:
## Enable french translation for xclim indicators, but also some xscen methods.
@@ -41,54 +41,54 @@ xclim:
cf_compliance: log
main:
- ## Path to a project catalog, the workflow creates it if needed
- catalog:
- ## The template of the file name, including the parent path, valid fields are the catalog's column
- ## One zarr dataset per xrfreq is produced
- # outfilename: /DATA/{source}_indicators_{xrfreq}_{date_start:%Y}-{date_end:%Y}.zarr
- outfilename:
+ ## Path to a project catalog, the workflow creates it if needed
+ catalog:
+ ## The template of the file name, including the parent path, valid fields are the catalog's column
+ ## One zarr dataset per xrfreq is produced
+ # outfilename: /DATA/{source}_indicators_{xrfreq}_{date_start:%Y}-{date_end:%Y}.zarr
+ outfilename:
indicators:
- ## Path (careful : this is relative to where the script is called)
- module: indicators2.yml
+ ## Path (careful : this is relative to where the script is called)
+ module: indicators2.yml
extract:
- ## Arguments to select the dataset and extract it.
- ## Example args as comments, good defaults uncommented
- search_data_catalogs:
- data_catalogs:
- # - /DATA/reconstruction.json
- variables_and_freqs:
- tas: D
- tasmax: D
- tasmin: D
- pr: D
- other_search_criteria:
- # source: ERA5-Land
- # domain: NAM
- allow_conversion: True # Mainly for (tasmax, tasmin)-> tas
- allow_resampling: True # To get from hourly to daily
+ ## Arguments to select the dataset and extract it.
+ ## Example args as comments, good defaults uncommented
+ search_data_catalogs:
+ data_catalogs:
+ # - /DATA/reconstruction.json
+ variables_and_freqs:
+ tas: D
+ tasmax: D
+ tasmin: D
+ pr: D
+ other_search_criteria:
+ # source: ERA5-Land
+ # domain: NAM
+ allow_conversion: True # Mainly for (tasmax, tasmin)-> tas
+ allow_resampling: True # To get from hourly to daily
- extract_dataset:
- ## This might not need any arguments
- ## But a region might be of interest:
- # region :
- # name: region_name
- # method: bbox
- # lat_bnds: [45, 47]
- # lon_bnds: [-75, -70]
+ extract_dataset:
+ ## This might not need any arguments
+ ## But a region might be of interest:
+ # region :
+ # name: region_name
+ # method: bbox
+ # lat_bnds: [45, 47]
+ # lon_bnds: [-75, -70]
io:
- save_to_zarr:
- # Mode o will remove existing variables in the zarr, the script already checks what was already computed through the catalog.
- # but the dataset itself and other variables are preserved.
- mode: o
- itervar: False # if the computation is too slow, write one indicator at a time by turning this to True.
- rechunk: # Normal chunking of daily data would be too small for indicators, one only usually needs to change the time chunk
- time: 40
+ save_to_zarr:
+ # Mode o will remove existing variables in the zarr, the script already checks what was already computed through the catalog.
+ # but the dataset itself and other variables are preserved.
+ mode: o
+ itervar: False # if the computation is too slow, write one indicator at a time by turning this to True.
+ rechunk: # Normal chunking of daily data would be too small for indicators, one only usually needs to change the time chunk
+ time: 40
scripting:
- send_main_on_exit:
- subject: "Indicator computing terminated."
- msg_ok: "π₯³ Everything went well."
- msg_err: "π₯ Something went wrong while computing the indicators. π₯"
+ send_main_on_exit:
+ subject: "Indicator computing terminated."
+ msg_ok: "π₯³ Everything went well."
+ msg_err: "π₯ Something went wrong while computing the indicators. π₯"
diff --git a/templates/2-indicators_only/workflow2.py b/templates/2-indicators_only/workflow2.py
index 6234656f..e31a9a62 100644
--- a/templates/2-indicators_only/workflow2.py
+++ b/templates/2-indicators_only/workflow2.py
@@ -61,12 +61,13 @@
to_compute.append((name, ind))
if not to_compute:
- logger.info(f"Everything computed for {dsid}.")
+ msg = f"Everything computed for {dsid}."
+ logger.info(msg)
continue
outd = compute_indicators(ds, indicators=to_compute, to_level="indicators")
- for freq, outds in outd.items():
+ for outds in outd.values():
outpath = CONFIG["main"]["outfilename"].format(**get_cat_attrs(outds))
save_to_zarr(outds, outpath)
pcat.update_from_ds(outds, path=outpath)
diff --git a/tests/test_catutils.py b/tests/test_catutils.py
index 3f05827c..8d5d792b 100644
--- a/tests/test_catutils.py
+++ b/tests/test_catutils.py
@@ -20,7 +20,7 @@
[{".nc", ".zarr"}, {6, 7, 8}, "*ssp126*", 2],
),
)
-def test_find_assets(exts, lens, dirglob, N):
+def test_find_assets(exts, lens, dirglob, N): # noqa: N803
finder = cu._find_assets(str(SAMPLES_DIR), exts=exts, lengths=lens, dirglob=dirglob)
assert isinstance(finder, Generator)
assert len(list(finder)) == N
diff --git a/tests/test_extract.py b/tests/test_extract.py
index fc8c0569..0b9d0eff 100644
--- a/tests/test_extract.py
+++ b/tests/test_extract.py
@@ -364,14 +364,14 @@ def test_wrong_types(self):
"CMIP6_CanESM5_ssp585_r1i1p1f1", wl=2, window=3.85, return_horizon=True
)
- def test_DataArray(self):
+ def test_DataArray(self): # noqa: N802
reals = xr.DataArray(
["CMIP6_CanESM5_ssp126_r1i1p1f1"], dims=("x",), coords={"x": [1]}
)
out = xs.get_warming_level(reals, wl=2, return_horizon=False)
xr.testing.assert_identical(out, reals.copy(data=["2026"]))
- def test_DataFrame(self):
+ def test_DataFrame(self): # noqa: N802
reals = pd.DataFrame.from_records(
[
{
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 070afad2..e003606c 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -853,7 +853,7 @@ def test_file(self, tmpdir):
file=tmpdir / "foo.md",
changes=Path(__file__).parent.parent.joinpath("CHANGELOG.rst"),
)
- with open(tmpdir / "foo.md") as f:
+ with Path(tmpdir).joinpath("foo.md").open(encoding="utf-8") as f:
assert f.read().startswith("# Changelog\n\n")
@@ -1044,7 +1044,7 @@ def test_errors(self):
def test_show_version(tmpdir):
xs.utils.show_versions(file=tmpdir / "versions.txt")
- with open(tmpdir / "versions.txt") as f:
+ with Path(tmpdir).joinpath("versions.txt").open(encoding="utf-8") as f:
out = f.read()
assert "xscen" in out
assert "xclim" in out
diff --git a/tox.ini b/tox.ini
index fd3425f9..5ed966da 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,16 +1,22 @@
[tox]
-min_version = 4.0
+min_version = 4.18.0
envlist =
lint
- py{39,310,311,312}
+ py{310,311,312}
docs-esmpy
requires =
- pip >= 23.3.0
+ pip >= 24.2.0
setuptools >= 65.0
opts =
--colored
--verbose
+[gh]
+python =
+ 3.10 = py310-coveralls
+ 3.11 = py311-coveralls
+ 3.12 = py312-esmpy-coveralls
+
[testenv:lint]
description = Check for Code Compliance and missing french translations
skip_install = True
@@ -18,14 +24,13 @@ download = true
conda_channels =
conda_env =
deps =
- babel
- black[jupyter] ==24.4.2
+ black[jupyter] ==24.8.0
blackdoc ==0.3.9
isort ==5.13.2
- flake8
- flake8-alphabetize
- flake8-rst-docstrings
- ruff >=0.3.0
+ flake8 >=7.1.1
+ flake8-rst-docstrings >=0.3.0
+ ruff >=0.5.7
+ numpydoc >=1.8.0
commands_pre =
pip list
commands =
@@ -88,6 +93,7 @@ commands_pre =
pip check
commands =
pytest {posargs}
+; Coveralls requires access to a repo token set in .coveralls.yml in order to report stats
coveralls: - coveralls
allowlist_externals =
make